1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 static bool lpfc_pldv_detect; 72 73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 75 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 77 static int lpfc_post_rcv_buf(struct lpfc_hba *); 78 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 80 static int lpfc_setup_endian_order(struct lpfc_hba *); 81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 82 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 84 static void lpfc_init_sgl_list(struct lpfc_hba *); 85 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 86 static void lpfc_free_active_sgl(struct lpfc_hba *); 87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 92 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 260 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 261 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 262 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 263 lpfc_vpd_data + offset, 264 mb->un.varDmp.word_cnt); 265 offset += mb->un.varDmp.word_cnt; 266 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 267 268 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 269 270 kfree(lpfc_vpd_data); 271 out_free_mbox: 272 mempool_free(pmb, phba->mbox_mem_pool); 273 return 0; 274 } 275 276 /** 277 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for driver's configuring asynchronous event 282 * mailbox command to the device. If the mailbox command returns successfully, 283 * it will set internal async event support flag to 1; otherwise, it will 284 * set internal async event support flag to 0. 285 **/ 286 static void 287 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 288 { 289 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 290 phba->temp_sensor_support = 1; 291 else 292 phba->temp_sensor_support = 0; 293 mempool_free(pmboxq, phba->mbox_mem_pool); 294 return; 295 } 296 297 /** 298 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 299 * @phba: pointer to lpfc hba data structure. 300 * @pmboxq: pointer to the driver internal queue element for mailbox command. 301 * 302 * This is the completion handler for dump mailbox command for getting 303 * wake up parameters. When this command complete, the response contain 304 * Option rom version of the HBA. This function translate the version number 305 * into a human readable string and store it in OptionROMVersion. 306 **/ 307 static void 308 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 309 { 310 struct prog_id *prg; 311 uint32_t prog_id_word; 312 char dist = ' '; 313 /* character array used for decoding dist type. */ 314 char dist_char[] = "nabx"; 315 316 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 317 mempool_free(pmboxq, phba->mbox_mem_pool); 318 return; 319 } 320 321 prg = (struct prog_id *) &prog_id_word; 322 323 /* word 7 contain option rom version */ 324 prog_id_word = pmboxq->u.mb.un.varWords[7]; 325 326 /* Decode the Option rom version word to a readable string */ 327 if (prg->dist < 4) 328 dist = dist_char[prg->dist]; 329 330 if ((prg->dist == 3) && (prg->num == 0)) 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 332 prg->ver, prg->rev, prg->lev); 333 else 334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 335 prg->ver, prg->rev, prg->lev, 336 dist, prg->num); 337 mempool_free(pmboxq, phba->mbox_mem_pool); 338 return; 339 } 340 341 /** 342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 343 * cfg_soft_wwnn, cfg_soft_wwpn 344 * @vport: pointer to lpfc vport data structure. 345 * 346 * 347 * Return codes 348 * None. 349 **/ 350 void 351 lpfc_update_vport_wwn(struct lpfc_vport *vport) 352 { 353 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 354 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 355 356 /* If the soft name exists then update it using the service params */ 357 if (vport->phba->cfg_soft_wwnn) 358 u64_to_wwn(vport->phba->cfg_soft_wwnn, 359 vport->fc_sparam.nodeName.u.wwn); 360 if (vport->phba->cfg_soft_wwpn) 361 u64_to_wwn(vport->phba->cfg_soft_wwpn, 362 vport->fc_sparam.portName.u.wwn); 363 364 /* 365 * If the name is empty or there exists a soft name 366 * then copy the service params name, otherwise use the fc name 367 */ 368 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 369 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 370 sizeof(struct lpfc_name)); 371 else 372 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 373 sizeof(struct lpfc_name)); 374 375 /* 376 * If the port name has changed, then set the Param changes flag 377 * to unreg the login 378 */ 379 if (vport->fc_portname.u.wwn[0] != 0 && 380 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 381 sizeof(struct lpfc_name))) 382 vport->vport_flag |= FAWWPN_PARAM_CHG; 383 384 if (vport->fc_portname.u.wwn[0] == 0 || 385 vport->phba->cfg_soft_wwpn || 386 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 387 vport->vport_flag & FAWWPN_SET) { 388 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 389 sizeof(struct lpfc_name)); 390 vport->vport_flag &= ~FAWWPN_SET; 391 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 392 vport->vport_flag |= FAWWPN_SET; 393 } 394 else 395 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 396 sizeof(struct lpfc_name)); 397 } 398 399 /** 400 * lpfc_config_port_post - Perform lpfc initialization after config port 401 * @phba: pointer to lpfc hba data structure. 402 * 403 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 404 * command call. It performs all internal resource and state setups on the 405 * port: post IOCB buffers, enable appropriate host interrupt attentions, 406 * ELS ring timers, etc. 407 * 408 * Return codes 409 * 0 - success. 410 * Any other value - error. 411 **/ 412 int 413 lpfc_config_port_post(struct lpfc_hba *phba) 414 { 415 struct lpfc_vport *vport = phba->pport; 416 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 417 LPFC_MBOXQ_t *pmb; 418 MAILBOX_t *mb; 419 struct lpfc_dmabuf *mp; 420 struct lpfc_sli *psli = &phba->sli; 421 uint32_t status, timeout; 422 int i, j; 423 int rc; 424 425 spin_lock_irq(&phba->hbalock); 426 /* 427 * If the Config port completed correctly the HBA is not 428 * over heated any more. 429 */ 430 if (phba->over_temp_state == HBA_OVER_TEMP) 431 phba->over_temp_state = HBA_NORMAL_TEMP; 432 spin_unlock_irq(&phba->hbalock); 433 434 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 435 if (!pmb) { 436 phba->link_state = LPFC_HBA_ERROR; 437 return -ENOMEM; 438 } 439 mb = &pmb->u.mb; 440 441 /* Get login parameters for NID. */ 442 rc = lpfc_read_sparam(phba, pmb, 0); 443 if (rc) { 444 mempool_free(pmb, phba->mbox_mem_pool); 445 return -ENOMEM; 446 } 447 448 pmb->vport = vport; 449 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 450 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 451 "0448 Adapter failed init, mbxCmd x%x " 452 "READ_SPARM mbxStatus x%x\n", 453 mb->mbxCommand, mb->mbxStatus); 454 phba->link_state = LPFC_HBA_ERROR; 455 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 456 mempool_free(pmb, phba->mbox_mem_pool); 457 lpfc_mbuf_free(phba, mp->virt, mp->phys); 458 kfree(mp); 459 return -EIO; 460 } 461 462 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 463 464 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 465 lpfc_mbuf_free(phba, mp->virt, mp->phys); 466 kfree(mp); 467 pmb->ctx_buf = NULL; 468 lpfc_update_vport_wwn(vport); 469 470 /* Update the fc_host data structures with new wwn. */ 471 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 472 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 473 fc_host_max_npiv_vports(shost) = phba->max_vpi; 474 475 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 476 /* This should be consolidated into parse_vpd ? - mr */ 477 if (phba->SerialNumber[0] == 0) { 478 uint8_t *outptr; 479 480 outptr = &vport->fc_nodename.u.s.IEEE[0]; 481 for (i = 0; i < 12; i++) { 482 status = *outptr++; 483 j = ((status & 0xf0) >> 4); 484 if (j <= 9) 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x30 + (uint8_t) j); 487 else 488 phba->SerialNumber[i] = 489 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 490 i++; 491 j = (status & 0xf); 492 if (j <= 9) 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x30 + (uint8_t) j); 495 else 496 phba->SerialNumber[i] = 497 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 498 } 499 } 500 501 lpfc_read_config(phba, pmb); 502 pmb->vport = vport; 503 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 505 "0453 Adapter failed to init, mbxCmd x%x " 506 "READ_CONFIG, mbxStatus x%x\n", 507 mb->mbxCommand, mb->mbxStatus); 508 phba->link_state = LPFC_HBA_ERROR; 509 mempool_free( pmb, phba->mbox_mem_pool); 510 return -EIO; 511 } 512 513 /* Check if the port is disabled */ 514 lpfc_sli_read_link_ste(phba); 515 516 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 517 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 519 "3359 HBA queue depth changed from %d to %d\n", 520 phba->cfg_hba_queue_depth, 521 mb->un.varRdConfig.max_xri); 522 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 523 } 524 525 phba->lmt = mb->un.varRdConfig.lmt; 526 527 /* Get the default values for Model Name and Description */ 528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 529 530 phba->link_state = LPFC_LINK_DOWN; 531 532 /* Only process IOCBs on ELS ring till hba_state is READY */ 533 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 536 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 537 538 /* Post receive buffers for desired rings */ 539 if (phba->sli_rev != 3) 540 lpfc_post_rcv_buf(phba); 541 542 /* 543 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 544 */ 545 if (phba->intr_type == MSIX) { 546 rc = lpfc_config_msi(phba, pmb); 547 if (rc) { 548 mempool_free(pmb, phba->mbox_mem_pool); 549 return -EIO; 550 } 551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 552 if (rc != MBX_SUCCESS) { 553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 554 "0352 Config MSI mailbox command " 555 "failed, mbxCmd x%x, mbxStatus x%x\n", 556 pmb->u.mb.mbxCommand, 557 pmb->u.mb.mbxStatus); 558 mempool_free(pmb, phba->mbox_mem_pool); 559 return -EIO; 560 } 561 } 562 563 spin_lock_irq(&phba->hbalock); 564 /* Initialize ERATT handling flag */ 565 phba->hba_flag &= ~HBA_ERATT_HANDLED; 566 567 /* Enable appropriate host interrupts */ 568 if (lpfc_readl(phba->HCregaddr, &status)) { 569 spin_unlock_irq(&phba->hbalock); 570 return -EIO; 571 } 572 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 573 if (psli->num_rings > 0) 574 status |= HC_R0INT_ENA; 575 if (psli->num_rings > 1) 576 status |= HC_R1INT_ENA; 577 if (psli->num_rings > 2) 578 status |= HC_R2INT_ENA; 579 if (psli->num_rings > 3) 580 status |= HC_R3INT_ENA; 581 582 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 583 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 584 status &= ~(HC_R0INT_ENA); 585 586 writel(status, phba->HCregaddr); 587 readl(phba->HCregaddr); /* flush */ 588 spin_unlock_irq(&phba->hbalock); 589 590 /* Set up ring-0 (ELS) timer */ 591 timeout = phba->fc_ratov * 2; 592 mod_timer(&vport->els_tmofunc, 593 jiffies + msecs_to_jiffies(1000 * timeout)); 594 /* Set up heart beat (HB) timer */ 595 mod_timer(&phba->hb_tmofunc, 596 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 597 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 598 phba->last_completion_time = jiffies; 599 /* Set up error attention (ERATT) polling timer */ 600 mod_timer(&phba->eratt_poll, 601 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 602 603 if (phba->hba_flag & LINK_DISABLED) { 604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 605 "2598 Adapter Link is disabled.\n"); 606 lpfc_down_link(phba, pmb); 607 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 608 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 609 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 611 "2599 Adapter failed to issue DOWN_LINK" 612 " mbox command rc 0x%x\n", rc); 613 614 mempool_free(pmb, phba->mbox_mem_pool); 615 return -EIO; 616 } 617 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 618 mempool_free(pmb, phba->mbox_mem_pool); 619 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 620 if (rc) 621 return rc; 622 } 623 /* MBOX buffer will be freed in mbox compl */ 624 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 625 if (!pmb) { 626 phba->link_state = LPFC_HBA_ERROR; 627 return -ENOMEM; 628 } 629 630 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 631 pmb->mbox_cmpl = lpfc_config_async_cmpl; 632 pmb->vport = phba->pport; 633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 634 635 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 637 "0456 Adapter failed to issue " 638 "ASYNCEVT_ENABLE mbox status x%x\n", 639 rc); 640 mempool_free(pmb, phba->mbox_mem_pool); 641 } 642 643 /* Get Option rom version */ 644 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 645 if (!pmb) { 646 phba->link_state = LPFC_HBA_ERROR; 647 return -ENOMEM; 648 } 649 650 lpfc_dump_wakeup_param(phba, pmb); 651 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 652 pmb->vport = phba->pport; 653 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 654 655 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 656 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 657 "0435 Adapter failed " 658 "to get Option ROM version status x%x\n", rc); 659 mempool_free(pmb, phba->mbox_mem_pool); 660 } 661 662 return 0; 663 } 664 665 /** 666 * lpfc_sli4_refresh_params - update driver copy of params. 667 * @phba: Pointer to HBA context object. 668 * 669 * This is called to refresh driver copy of dynamic fields from the 670 * common_get_sli4_parameters descriptor. 671 **/ 672 int 673 lpfc_sli4_refresh_params(struct lpfc_hba *phba) 674 { 675 LPFC_MBOXQ_t *mboxq; 676 struct lpfc_mqe *mqe; 677 struct lpfc_sli4_parameters *mbx_sli4_parameters; 678 int length, rc; 679 680 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 681 if (!mboxq) 682 return -ENOMEM; 683 684 mqe = &mboxq->u.mqe; 685 /* Read the port's SLI4 Config Parameters */ 686 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 687 sizeof(struct lpfc_sli4_cfg_mhdr)); 688 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 689 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 690 length, LPFC_SLI4_MBX_EMBED); 691 692 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 693 if (unlikely(rc)) { 694 mempool_free(mboxq, phba->mbox_mem_pool); 695 return rc; 696 } 697 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 698 phba->sli4_hba.pc_sli4_params.mi_ver = 699 bf_get(cfg_mi_ver, mbx_sli4_parameters); 700 phba->sli4_hba.pc_sli4_params.cmf = 701 bf_get(cfg_cmf, mbx_sli4_parameters); 702 phba->sli4_hba.pc_sli4_params.pls = 703 bf_get(cfg_pvl, mbx_sli4_parameters); 704 705 mempool_free(mboxq, phba->mbox_mem_pool); 706 return rc; 707 } 708 709 /** 710 * lpfc_hba_init_link - Initialize the FC link 711 * @phba: pointer to lpfc hba data structure. 712 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 713 * 714 * This routine will issue the INIT_LINK mailbox command call. 715 * It is available to other drivers through the lpfc_hba data 716 * structure for use as a delayed link up mechanism with the 717 * module parameter lpfc_suppress_link_up. 718 * 719 * Return code 720 * 0 - success 721 * Any other value - error 722 **/ 723 static int 724 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 725 { 726 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 727 } 728 729 /** 730 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 731 * @phba: pointer to lpfc hba data structure. 732 * @fc_topology: desired fc topology. 733 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 734 * 735 * This routine will issue the INIT_LINK mailbox command call. 736 * It is available to other drivers through the lpfc_hba data 737 * structure for use as a delayed link up mechanism with the 738 * module parameter lpfc_suppress_link_up. 739 * 740 * Return code 741 * 0 - success 742 * Any other value - error 743 **/ 744 int 745 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 746 uint32_t flag) 747 { 748 struct lpfc_vport *vport = phba->pport; 749 LPFC_MBOXQ_t *pmb; 750 MAILBOX_t *mb; 751 int rc; 752 753 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 754 if (!pmb) { 755 phba->link_state = LPFC_HBA_ERROR; 756 return -ENOMEM; 757 } 758 mb = &pmb->u.mb; 759 pmb->vport = vport; 760 761 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 762 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 763 !(phba->lmt & LMT_1Gb)) || 764 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 765 !(phba->lmt & LMT_2Gb)) || 766 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 767 !(phba->lmt & LMT_4Gb)) || 768 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 769 !(phba->lmt & LMT_8Gb)) || 770 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 771 !(phba->lmt & LMT_10Gb)) || 772 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 773 !(phba->lmt & LMT_16Gb)) || 774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 775 !(phba->lmt & LMT_32Gb)) || 776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 777 !(phba->lmt & LMT_64Gb))) { 778 /* Reset link speed to auto */ 779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 780 "1302 Invalid speed for this board:%d " 781 "Reset link speed to auto.\n", 782 phba->cfg_link_speed); 783 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 784 } 785 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 786 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 787 if (phba->sli_rev < LPFC_SLI_REV4) 788 lpfc_set_loopback_flag(phba); 789 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 790 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 792 "0498 Adapter failed to init, mbxCmd x%x " 793 "INIT_LINK, mbxStatus x%x\n", 794 mb->mbxCommand, mb->mbxStatus); 795 if (phba->sli_rev <= LPFC_SLI_REV3) { 796 /* Clear all interrupt enable conditions */ 797 writel(0, phba->HCregaddr); 798 readl(phba->HCregaddr); /* flush */ 799 /* Clear all pending interrupts */ 800 writel(0xffffffff, phba->HAregaddr); 801 readl(phba->HAregaddr); /* flush */ 802 } 803 phba->link_state = LPFC_HBA_ERROR; 804 if (rc != MBX_BUSY || flag == MBX_POLL) 805 mempool_free(pmb, phba->mbox_mem_pool); 806 return -EIO; 807 } 808 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 809 if (flag == MBX_POLL) 810 mempool_free(pmb, phba->mbox_mem_pool); 811 812 return 0; 813 } 814 815 /** 816 * lpfc_hba_down_link - this routine downs the FC link 817 * @phba: pointer to lpfc hba data structure. 818 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 819 * 820 * This routine will issue the DOWN_LINK mailbox command call. 821 * It is available to other drivers through the lpfc_hba data 822 * structure for use to stop the link. 823 * 824 * Return code 825 * 0 - success 826 * Any other value - error 827 **/ 828 static int 829 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 830 { 831 LPFC_MBOXQ_t *pmb; 832 int rc; 833 834 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 835 if (!pmb) { 836 phba->link_state = LPFC_HBA_ERROR; 837 return -ENOMEM; 838 } 839 840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 841 "0491 Adapter Link is disabled.\n"); 842 lpfc_down_link(phba, pmb); 843 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 844 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 845 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 846 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 847 "2522 Adapter failed to issue DOWN_LINK" 848 " mbox command rc 0x%x\n", rc); 849 850 mempool_free(pmb, phba->mbox_mem_pool); 851 return -EIO; 852 } 853 if (flag == MBX_POLL) 854 mempool_free(pmb, phba->mbox_mem_pool); 855 856 return 0; 857 } 858 859 /** 860 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 861 * @phba: pointer to lpfc HBA data structure. 862 * 863 * This routine will do LPFC uninitialization before the HBA is reset when 864 * bringing down the SLI Layer. 865 * 866 * Return codes 867 * 0 - success. 868 * Any other value - error. 869 **/ 870 int 871 lpfc_hba_down_prep(struct lpfc_hba *phba) 872 { 873 struct lpfc_vport **vports; 874 int i; 875 876 if (phba->sli_rev <= LPFC_SLI_REV3) { 877 /* Disable interrupts */ 878 writel(0, phba->HCregaddr); 879 readl(phba->HCregaddr); /* flush */ 880 } 881 882 if (phba->pport->load_flag & FC_UNLOADING) 883 lpfc_cleanup_discovery_resources(phba->pport); 884 else { 885 vports = lpfc_create_vport_work_array(phba); 886 if (vports != NULL) 887 for (i = 0; i <= phba->max_vports && 888 vports[i] != NULL; i++) 889 lpfc_cleanup_discovery_resources(vports[i]); 890 lpfc_destroy_vport_work_array(phba, vports); 891 } 892 return 0; 893 } 894 895 /** 896 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 897 * rspiocb which got deferred 898 * 899 * @phba: pointer to lpfc HBA data structure. 900 * 901 * This routine will cleanup completed slow path events after HBA is reset 902 * when bringing down the SLI Layer. 903 * 904 * 905 * Return codes 906 * void. 907 **/ 908 static void 909 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 910 { 911 struct lpfc_iocbq *rspiocbq; 912 struct hbq_dmabuf *dmabuf; 913 struct lpfc_cq_event *cq_event; 914 915 spin_lock_irq(&phba->hbalock); 916 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 917 spin_unlock_irq(&phba->hbalock); 918 919 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 920 /* Get the response iocb from the head of work queue */ 921 spin_lock_irq(&phba->hbalock); 922 list_remove_head(&phba->sli4_hba.sp_queue_event, 923 cq_event, struct lpfc_cq_event, list); 924 spin_unlock_irq(&phba->hbalock); 925 926 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 927 case CQE_CODE_COMPL_WQE: 928 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 929 cq_event); 930 lpfc_sli_release_iocbq(phba, rspiocbq); 931 break; 932 case CQE_CODE_RECEIVE: 933 case CQE_CODE_RECEIVE_V1: 934 dmabuf = container_of(cq_event, struct hbq_dmabuf, 935 cq_event); 936 lpfc_in_buf_free(phba, &dmabuf->dbuf); 937 } 938 } 939 } 940 941 /** 942 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 943 * @phba: pointer to lpfc HBA data structure. 944 * 945 * This routine will cleanup posted ELS buffers after the HBA is reset 946 * when bringing down the SLI Layer. 947 * 948 * 949 * Return codes 950 * void. 951 **/ 952 static void 953 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 954 { 955 struct lpfc_sli *psli = &phba->sli; 956 struct lpfc_sli_ring *pring; 957 struct lpfc_dmabuf *mp, *next_mp; 958 LIST_HEAD(buflist); 959 int count; 960 961 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 962 lpfc_sli_hbqbuf_free_all(phba); 963 else { 964 /* Cleanup preposted buffers on the ELS ring */ 965 pring = &psli->sli3_ring[LPFC_ELS_RING]; 966 spin_lock_irq(&phba->hbalock); 967 list_splice_init(&pring->postbufq, &buflist); 968 spin_unlock_irq(&phba->hbalock); 969 970 count = 0; 971 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 972 list_del(&mp->list); 973 count++; 974 lpfc_mbuf_free(phba, mp->virt, mp->phys); 975 kfree(mp); 976 } 977 978 spin_lock_irq(&phba->hbalock); 979 pring->postbufq_cnt -= count; 980 spin_unlock_irq(&phba->hbalock); 981 } 982 } 983 984 /** 985 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 986 * @phba: pointer to lpfc HBA data structure. 987 * 988 * This routine will cleanup the txcmplq after the HBA is reset when bringing 989 * down the SLI Layer. 990 * 991 * Return codes 992 * void 993 **/ 994 static void 995 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 996 { 997 struct lpfc_sli *psli = &phba->sli; 998 struct lpfc_queue *qp = NULL; 999 struct lpfc_sli_ring *pring; 1000 LIST_HEAD(completions); 1001 int i; 1002 struct lpfc_iocbq *piocb, *next_iocb; 1003 1004 if (phba->sli_rev != LPFC_SLI_REV4) { 1005 for (i = 0; i < psli->num_rings; i++) { 1006 pring = &psli->sli3_ring[i]; 1007 spin_lock_irq(&phba->hbalock); 1008 /* At this point in time the HBA is either reset or DOA 1009 * Nothing should be on txcmplq as it will 1010 * NEVER complete. 1011 */ 1012 list_splice_init(&pring->txcmplq, &completions); 1013 pring->txcmplq_cnt = 0; 1014 spin_unlock_irq(&phba->hbalock); 1015 1016 lpfc_sli_abort_iocb_ring(phba, pring); 1017 } 1018 /* Cancel all the IOCBs from the completions list */ 1019 lpfc_sli_cancel_iocbs(phba, &completions, 1020 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1021 return; 1022 } 1023 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1024 pring = qp->pring; 1025 if (!pring) 1026 continue; 1027 spin_lock_irq(&pring->ring_lock); 1028 list_for_each_entry_safe(piocb, next_iocb, 1029 &pring->txcmplq, list) 1030 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 1031 list_splice_init(&pring->txcmplq, &completions); 1032 pring->txcmplq_cnt = 0; 1033 spin_unlock_irq(&pring->ring_lock); 1034 lpfc_sli_abort_iocb_ring(phba, pring); 1035 } 1036 /* Cancel all the IOCBs from the completions list */ 1037 lpfc_sli_cancel_iocbs(phba, &completions, 1038 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1039 } 1040 1041 /** 1042 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1043 * @phba: pointer to lpfc HBA data structure. 1044 * 1045 * This routine will do uninitialization after the HBA is reset when bring 1046 * down the SLI Layer. 1047 * 1048 * Return codes 1049 * 0 - success. 1050 * Any other value - error. 1051 **/ 1052 static int 1053 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1054 { 1055 lpfc_hba_free_post_buf(phba); 1056 lpfc_hba_clean_txcmplq(phba); 1057 return 0; 1058 } 1059 1060 /** 1061 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1062 * @phba: pointer to lpfc HBA data structure. 1063 * 1064 * This routine will do uninitialization after the HBA is reset when bring 1065 * down the SLI Layer. 1066 * 1067 * Return codes 1068 * 0 - success. 1069 * Any other value - error. 1070 **/ 1071 static int 1072 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1073 { 1074 struct lpfc_io_buf *psb, *psb_next; 1075 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1076 struct lpfc_sli4_hdw_queue *qp; 1077 LIST_HEAD(aborts); 1078 LIST_HEAD(nvme_aborts); 1079 LIST_HEAD(nvmet_aborts); 1080 struct lpfc_sglq *sglq_entry = NULL; 1081 int cnt, idx; 1082 1083 1084 lpfc_sli_hbqbuf_free_all(phba); 1085 lpfc_hba_clean_txcmplq(phba); 1086 1087 /* At this point in time the HBA is either reset or DOA. Either 1088 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1089 * on the lpfc_els_sgl_list so that it can either be freed if the 1090 * driver is unloading or reposted if the driver is restarting 1091 * the port. 1092 */ 1093 1094 /* sgl_list_lock required because worker thread uses this 1095 * list. 1096 */ 1097 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1098 list_for_each_entry(sglq_entry, 1099 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1100 sglq_entry->state = SGL_FREED; 1101 1102 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1103 &phba->sli4_hba.lpfc_els_sgl_list); 1104 1105 1106 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1107 1108 /* abts_xxxx_buf_list_lock required because worker thread uses this 1109 * list. 1110 */ 1111 spin_lock_irq(&phba->hbalock); 1112 cnt = 0; 1113 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1114 qp = &phba->sli4_hba.hdwq[idx]; 1115 1116 spin_lock(&qp->abts_io_buf_list_lock); 1117 list_splice_init(&qp->lpfc_abts_io_buf_list, 1118 &aborts); 1119 1120 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1121 psb->pCmd = NULL; 1122 psb->status = IOSTAT_SUCCESS; 1123 cnt++; 1124 } 1125 spin_lock(&qp->io_buf_list_put_lock); 1126 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1127 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1128 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1129 qp->abts_scsi_io_bufs = 0; 1130 qp->abts_nvme_io_bufs = 0; 1131 spin_unlock(&qp->io_buf_list_put_lock); 1132 spin_unlock(&qp->abts_io_buf_list_lock); 1133 } 1134 spin_unlock_irq(&phba->hbalock); 1135 1136 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1137 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1138 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1139 &nvmet_aborts); 1140 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1141 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1142 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1143 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1144 } 1145 } 1146 1147 lpfc_sli4_free_sp_events(phba); 1148 return cnt; 1149 } 1150 1151 /** 1152 * lpfc_hba_down_post - Wrapper func for hba down post routine 1153 * @phba: pointer to lpfc HBA data structure. 1154 * 1155 * This routine wraps the actual SLI3 or SLI4 routine for performing 1156 * uninitialization after the HBA is reset when bring down the SLI Layer. 1157 * 1158 * Return codes 1159 * 0 - success. 1160 * Any other value - error. 1161 **/ 1162 int 1163 lpfc_hba_down_post(struct lpfc_hba *phba) 1164 { 1165 return (*phba->lpfc_hba_down_post)(phba); 1166 } 1167 1168 /** 1169 * lpfc_hb_timeout - The HBA-timer timeout handler 1170 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1171 * 1172 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1173 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1174 * work-port-events bitmap and the worker thread is notified. This timeout 1175 * event will be used by the worker thread to invoke the actual timeout 1176 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1177 * be performed in the timeout handler and the HBA timeout event bit shall 1178 * be cleared by the worker thread after it has taken the event bitmap out. 1179 **/ 1180 static void 1181 lpfc_hb_timeout(struct timer_list *t) 1182 { 1183 struct lpfc_hba *phba; 1184 uint32_t tmo_posted; 1185 unsigned long iflag; 1186 1187 phba = from_timer(phba, t, hb_tmofunc); 1188 1189 /* Check for heart beat timeout conditions */ 1190 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1191 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1192 if (!tmo_posted) 1193 phba->pport->work_port_events |= WORKER_HB_TMO; 1194 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1195 1196 /* Tell the worker thread there is work to do */ 1197 if (!tmo_posted) 1198 lpfc_worker_wake_up(phba); 1199 return; 1200 } 1201 1202 /** 1203 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1204 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1205 * 1206 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1207 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1208 * work-port-events bitmap and the worker thread is notified. This timeout 1209 * event will be used by the worker thread to invoke the actual timeout 1210 * handler routine, lpfc_rrq_handler. Any periodical operations will 1211 * be performed in the timeout handler and the RRQ timeout event bit shall 1212 * be cleared by the worker thread after it has taken the event bitmap out. 1213 **/ 1214 static void 1215 lpfc_rrq_timeout(struct timer_list *t) 1216 { 1217 struct lpfc_hba *phba; 1218 unsigned long iflag; 1219 1220 phba = from_timer(phba, t, rrq_tmr); 1221 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1222 if (!(phba->pport->load_flag & FC_UNLOADING)) 1223 phba->hba_flag |= HBA_RRQ_ACTIVE; 1224 else 1225 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1226 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1227 1228 if (!(phba->pport->load_flag & FC_UNLOADING)) 1229 lpfc_worker_wake_up(phba); 1230 } 1231 1232 /** 1233 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1234 * @phba: pointer to lpfc hba data structure. 1235 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1236 * 1237 * This is the callback function to the lpfc heart-beat mailbox command. 1238 * If configured, the lpfc driver issues the heart-beat mailbox command to 1239 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1240 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1241 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1242 * heart-beat outstanding state. Once the mailbox command comes back and 1243 * no error conditions detected, the heart-beat mailbox command timer is 1244 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1245 * state is cleared for the next heart-beat. If the timer expired with the 1246 * heart-beat outstanding state set, the driver will put the HBA offline. 1247 **/ 1248 static void 1249 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1250 { 1251 unsigned long drvr_flag; 1252 1253 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1254 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1255 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1256 1257 /* Check and reset heart-beat timer if necessary */ 1258 mempool_free(pmboxq, phba->mbox_mem_pool); 1259 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1260 !(phba->link_state == LPFC_HBA_ERROR) && 1261 !(phba->pport->load_flag & FC_UNLOADING)) 1262 mod_timer(&phba->hb_tmofunc, 1263 jiffies + 1264 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1265 return; 1266 } 1267 1268 /* 1269 * lpfc_idle_stat_delay_work - idle_stat tracking 1270 * 1271 * This routine tracks per-cq idle_stat and determines polling decisions. 1272 * 1273 * Return codes: 1274 * None 1275 **/ 1276 static void 1277 lpfc_idle_stat_delay_work(struct work_struct *work) 1278 { 1279 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1280 struct lpfc_hba, 1281 idle_stat_delay_work); 1282 struct lpfc_queue *cq; 1283 struct lpfc_sli4_hdw_queue *hdwq; 1284 struct lpfc_idle_stat *idle_stat; 1285 u32 i, idle_percent; 1286 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1287 1288 if (phba->pport->load_flag & FC_UNLOADING) 1289 return; 1290 1291 if (phba->link_state == LPFC_HBA_ERROR || 1292 phba->pport->fc_flag & FC_OFFLINE_MODE || 1293 phba->cmf_active_mode != LPFC_CFG_OFF) 1294 goto requeue; 1295 1296 for_each_present_cpu(i) { 1297 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1298 cq = hdwq->io_cq; 1299 1300 /* Skip if we've already handled this cq's primary CPU */ 1301 if (cq->chann != i) 1302 continue; 1303 1304 idle_stat = &phba->sli4_hba.idle_stat[i]; 1305 1306 /* get_cpu_idle_time returns values as running counters. Thus, 1307 * to know the amount for this period, the prior counter values 1308 * need to be subtracted from the current counter values. 1309 * From there, the idle time stat can be calculated as a 1310 * percentage of 100 - the sum of the other consumption times. 1311 */ 1312 wall_idle = get_cpu_idle_time(i, &wall, 1); 1313 diff_idle = wall_idle - idle_stat->prev_idle; 1314 diff_wall = wall - idle_stat->prev_wall; 1315 1316 if (diff_wall <= diff_idle) 1317 busy_time = 0; 1318 else 1319 busy_time = diff_wall - diff_idle; 1320 1321 idle_percent = div64_u64(100 * busy_time, diff_wall); 1322 idle_percent = 100 - idle_percent; 1323 1324 if (idle_percent < 15) 1325 cq->poll_mode = LPFC_QUEUE_WORK; 1326 else 1327 cq->poll_mode = LPFC_IRQ_POLL; 1328 1329 idle_stat->prev_idle = wall_idle; 1330 idle_stat->prev_wall = wall; 1331 } 1332 1333 requeue: 1334 schedule_delayed_work(&phba->idle_stat_delay_work, 1335 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1336 } 1337 1338 static void 1339 lpfc_hb_eq_delay_work(struct work_struct *work) 1340 { 1341 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1342 struct lpfc_hba, eq_delay_work); 1343 struct lpfc_eq_intr_info *eqi, *eqi_new; 1344 struct lpfc_queue *eq, *eq_next; 1345 unsigned char *ena_delay = NULL; 1346 uint32_t usdelay; 1347 int i; 1348 1349 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1350 return; 1351 1352 if (phba->link_state == LPFC_HBA_ERROR || 1353 phba->pport->fc_flag & FC_OFFLINE_MODE) 1354 goto requeue; 1355 1356 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1357 GFP_KERNEL); 1358 if (!ena_delay) 1359 goto requeue; 1360 1361 for (i = 0; i < phba->cfg_irq_chann; i++) { 1362 /* Get the EQ corresponding to the IRQ vector */ 1363 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1364 if (!eq) 1365 continue; 1366 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1367 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1368 ena_delay[eq->last_cpu] = 1; 1369 } 1370 } 1371 1372 for_each_present_cpu(i) { 1373 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1374 if (ena_delay[i]) { 1375 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1376 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1377 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1378 } else { 1379 usdelay = 0; 1380 } 1381 1382 eqi->icnt = 0; 1383 1384 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1385 if (unlikely(eq->last_cpu != i)) { 1386 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1387 eq->last_cpu); 1388 list_move_tail(&eq->cpu_list, &eqi_new->list); 1389 continue; 1390 } 1391 if (usdelay != eq->q_mode) 1392 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1393 usdelay); 1394 } 1395 } 1396 1397 kfree(ena_delay); 1398 1399 requeue: 1400 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1401 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1402 } 1403 1404 /** 1405 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1406 * @phba: pointer to lpfc hba data structure. 1407 * 1408 * For each heartbeat, this routine does some heuristic methods to adjust 1409 * XRI distribution. The goal is to fully utilize free XRIs. 1410 **/ 1411 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1412 { 1413 u32 i; 1414 u32 hwq_count; 1415 1416 hwq_count = phba->cfg_hdw_queue; 1417 for (i = 0; i < hwq_count; i++) { 1418 /* Adjust XRIs in private pool */ 1419 lpfc_adjust_pvt_pool_count(phba, i); 1420 1421 /* Adjust high watermark */ 1422 lpfc_adjust_high_watermark(phba, i); 1423 1424 #ifdef LPFC_MXP_STAT 1425 /* Snapshot pbl, pvt and busy count */ 1426 lpfc_snapshot_mxp(phba, i); 1427 #endif 1428 } 1429 } 1430 1431 /** 1432 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1433 * @phba: pointer to lpfc hba data structure. 1434 * 1435 * If a HB mbox is not already in progrees, this routine will allocate 1436 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1437 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1438 **/ 1439 int 1440 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1441 { 1442 LPFC_MBOXQ_t *pmboxq; 1443 int retval; 1444 1445 /* Is a Heartbeat mbox already in progress */ 1446 if (phba->hba_flag & HBA_HBEAT_INP) 1447 return 0; 1448 1449 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1450 if (!pmboxq) 1451 return -ENOMEM; 1452 1453 lpfc_heart_beat(phba, pmboxq); 1454 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1455 pmboxq->vport = phba->pport; 1456 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1457 1458 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1459 mempool_free(pmboxq, phba->mbox_mem_pool); 1460 return -ENXIO; 1461 } 1462 phba->hba_flag |= HBA_HBEAT_INP; 1463 1464 return 0; 1465 } 1466 1467 /** 1468 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1469 * @phba: pointer to lpfc hba data structure. 1470 * 1471 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1472 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1473 * of the value of lpfc_enable_hba_heartbeat. 1474 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1475 * try to issue a MBX_HEARTBEAT mbox command. 1476 **/ 1477 void 1478 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1479 { 1480 if (phba->cfg_enable_hba_heartbeat) 1481 return; 1482 phba->hba_flag |= HBA_HBEAT_TMO; 1483 } 1484 1485 /** 1486 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1487 * @phba: pointer to lpfc hba data structure. 1488 * 1489 * This is the actual HBA-timer timeout handler to be invoked by the worker 1490 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1491 * handler performs any periodic operations needed for the device. If such 1492 * periodic event has already been attended to either in the interrupt handler 1493 * or by processing slow-ring or fast-ring events within the HBA-timer 1494 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1495 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1496 * is configured and there is no heart-beat mailbox command outstanding, a 1497 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1498 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1499 * to offline. 1500 **/ 1501 void 1502 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1503 { 1504 struct lpfc_vport **vports; 1505 struct lpfc_dmabuf *buf_ptr; 1506 int retval = 0; 1507 int i, tmo; 1508 struct lpfc_sli *psli = &phba->sli; 1509 LIST_HEAD(completions); 1510 1511 if (phba->cfg_xri_rebalancing) { 1512 /* Multi-XRI pools handler */ 1513 lpfc_hb_mxp_handler(phba); 1514 } 1515 1516 vports = lpfc_create_vport_work_array(phba); 1517 if (vports != NULL) 1518 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1519 lpfc_rcv_seq_check_edtov(vports[i]); 1520 lpfc_fdmi_change_check(vports[i]); 1521 } 1522 lpfc_destroy_vport_work_array(phba, vports); 1523 1524 if ((phba->link_state == LPFC_HBA_ERROR) || 1525 (phba->pport->load_flag & FC_UNLOADING) || 1526 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1527 return; 1528 1529 if (phba->elsbuf_cnt && 1530 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1531 spin_lock_irq(&phba->hbalock); 1532 list_splice_init(&phba->elsbuf, &completions); 1533 phba->elsbuf_cnt = 0; 1534 phba->elsbuf_prev_cnt = 0; 1535 spin_unlock_irq(&phba->hbalock); 1536 1537 while (!list_empty(&completions)) { 1538 list_remove_head(&completions, buf_ptr, 1539 struct lpfc_dmabuf, list); 1540 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1541 kfree(buf_ptr); 1542 } 1543 } 1544 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1545 1546 /* If there is no heart beat outstanding, issue a heartbeat command */ 1547 if (phba->cfg_enable_hba_heartbeat) { 1548 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1549 spin_lock_irq(&phba->pport->work_port_lock); 1550 if (time_after(phba->last_completion_time + 1551 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1552 jiffies)) { 1553 spin_unlock_irq(&phba->pport->work_port_lock); 1554 if (phba->hba_flag & HBA_HBEAT_INP) 1555 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1556 else 1557 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1558 goto out; 1559 } 1560 spin_unlock_irq(&phba->pport->work_port_lock); 1561 1562 /* Check if a MBX_HEARTBEAT is already in progress */ 1563 if (phba->hba_flag & HBA_HBEAT_INP) { 1564 /* 1565 * If heart beat timeout called with HBA_HBEAT_INP set 1566 * we need to give the hb mailbox cmd a chance to 1567 * complete or TMO. 1568 */ 1569 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1570 "0459 Adapter heartbeat still outstanding: " 1571 "last compl time was %d ms.\n", 1572 jiffies_to_msecs(jiffies 1573 - phba->last_completion_time)); 1574 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1575 } else { 1576 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1577 (list_empty(&psli->mboxq))) { 1578 1579 retval = lpfc_issue_hb_mbox(phba); 1580 if (retval) { 1581 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1582 goto out; 1583 } 1584 phba->skipped_hb = 0; 1585 } else if (time_before_eq(phba->last_completion_time, 1586 phba->skipped_hb)) { 1587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1588 "2857 Last completion time not " 1589 " updated in %d ms\n", 1590 jiffies_to_msecs(jiffies 1591 - phba->last_completion_time)); 1592 } else 1593 phba->skipped_hb = jiffies; 1594 1595 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1596 goto out; 1597 } 1598 } else { 1599 /* Check to see if we want to force a MBX_HEARTBEAT */ 1600 if (phba->hba_flag & HBA_HBEAT_TMO) { 1601 retval = lpfc_issue_hb_mbox(phba); 1602 if (retval) 1603 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1604 else 1605 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1606 goto out; 1607 } 1608 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1609 } 1610 out: 1611 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1612 } 1613 1614 /** 1615 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1616 * @phba: pointer to lpfc hba data structure. 1617 * 1618 * This routine is called to bring the HBA offline when HBA hardware error 1619 * other than Port Error 6 has been detected. 1620 **/ 1621 static void 1622 lpfc_offline_eratt(struct lpfc_hba *phba) 1623 { 1624 struct lpfc_sli *psli = &phba->sli; 1625 1626 spin_lock_irq(&phba->hbalock); 1627 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1628 spin_unlock_irq(&phba->hbalock); 1629 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1630 1631 lpfc_offline(phba); 1632 lpfc_reset_barrier(phba); 1633 spin_lock_irq(&phba->hbalock); 1634 lpfc_sli_brdreset(phba); 1635 spin_unlock_irq(&phba->hbalock); 1636 lpfc_hba_down_post(phba); 1637 lpfc_sli_brdready(phba, HS_MBRDY); 1638 lpfc_unblock_mgmt_io(phba); 1639 phba->link_state = LPFC_HBA_ERROR; 1640 return; 1641 } 1642 1643 /** 1644 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1645 * @phba: pointer to lpfc hba data structure. 1646 * 1647 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1648 * other than Port Error 6 has been detected. 1649 **/ 1650 void 1651 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1652 { 1653 spin_lock_irq(&phba->hbalock); 1654 if (phba->link_state == LPFC_HBA_ERROR && 1655 phba->hba_flag & HBA_PCI_ERR) { 1656 spin_unlock_irq(&phba->hbalock); 1657 return; 1658 } 1659 phba->link_state = LPFC_HBA_ERROR; 1660 spin_unlock_irq(&phba->hbalock); 1661 1662 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1663 lpfc_sli_flush_io_rings(phba); 1664 lpfc_offline(phba); 1665 lpfc_hba_down_post(phba); 1666 lpfc_unblock_mgmt_io(phba); 1667 } 1668 1669 /** 1670 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1671 * @phba: pointer to lpfc hba data structure. 1672 * 1673 * This routine is invoked to handle the deferred HBA hardware error 1674 * conditions. This type of error is indicated by HBA by setting ER1 1675 * and another ER bit in the host status register. The driver will 1676 * wait until the ER1 bit clears before handling the error condition. 1677 **/ 1678 static void 1679 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1680 { 1681 uint32_t old_host_status = phba->work_hs; 1682 struct lpfc_sli *psli = &phba->sli; 1683 1684 /* If the pci channel is offline, ignore possible errors, 1685 * since we cannot communicate with the pci card anyway. 1686 */ 1687 if (pci_channel_offline(phba->pcidev)) { 1688 spin_lock_irq(&phba->hbalock); 1689 phba->hba_flag &= ~DEFER_ERATT; 1690 spin_unlock_irq(&phba->hbalock); 1691 return; 1692 } 1693 1694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1695 "0479 Deferred Adapter Hardware Error " 1696 "Data: x%x x%x x%x\n", 1697 phba->work_hs, phba->work_status[0], 1698 phba->work_status[1]); 1699 1700 spin_lock_irq(&phba->hbalock); 1701 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1702 spin_unlock_irq(&phba->hbalock); 1703 1704 1705 /* 1706 * Firmware stops when it triggred erratt. That could cause the I/Os 1707 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1708 * SCSI layer retry it after re-establishing link. 1709 */ 1710 lpfc_sli_abort_fcp_rings(phba); 1711 1712 /* 1713 * There was a firmware error. Take the hba offline and then 1714 * attempt to restart it. 1715 */ 1716 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1717 lpfc_offline(phba); 1718 1719 /* Wait for the ER1 bit to clear.*/ 1720 while (phba->work_hs & HS_FFER1) { 1721 msleep(100); 1722 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1723 phba->work_hs = UNPLUG_ERR ; 1724 break; 1725 } 1726 /* If driver is unloading let the worker thread continue */ 1727 if (phba->pport->load_flag & FC_UNLOADING) { 1728 phba->work_hs = 0; 1729 break; 1730 } 1731 } 1732 1733 /* 1734 * This is to ptrotect against a race condition in which 1735 * first write to the host attention register clear the 1736 * host status register. 1737 */ 1738 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1739 phba->work_hs = old_host_status & ~HS_FFER1; 1740 1741 spin_lock_irq(&phba->hbalock); 1742 phba->hba_flag &= ~DEFER_ERATT; 1743 spin_unlock_irq(&phba->hbalock); 1744 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1745 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1746 } 1747 1748 static void 1749 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1750 { 1751 struct lpfc_board_event_header board_event; 1752 struct Scsi_Host *shost; 1753 1754 board_event.event_type = FC_REG_BOARD_EVENT; 1755 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1756 shost = lpfc_shost_from_vport(phba->pport); 1757 fc_host_post_vendor_event(shost, fc_get_event_number(), 1758 sizeof(board_event), 1759 (char *) &board_event, 1760 LPFC_NL_VENDOR_ID); 1761 } 1762 1763 /** 1764 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1765 * @phba: pointer to lpfc hba data structure. 1766 * 1767 * This routine is invoked to handle the following HBA hardware error 1768 * conditions: 1769 * 1 - HBA error attention interrupt 1770 * 2 - DMA ring index out of range 1771 * 3 - Mailbox command came back as unknown 1772 **/ 1773 static void 1774 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1775 { 1776 struct lpfc_vport *vport = phba->pport; 1777 struct lpfc_sli *psli = &phba->sli; 1778 uint32_t event_data; 1779 unsigned long temperature; 1780 struct temp_event temp_event_data; 1781 struct Scsi_Host *shost; 1782 1783 /* If the pci channel is offline, ignore possible errors, 1784 * since we cannot communicate with the pci card anyway. 1785 */ 1786 if (pci_channel_offline(phba->pcidev)) { 1787 spin_lock_irq(&phba->hbalock); 1788 phba->hba_flag &= ~DEFER_ERATT; 1789 spin_unlock_irq(&phba->hbalock); 1790 return; 1791 } 1792 1793 /* If resets are disabled then leave the HBA alone and return */ 1794 if (!phba->cfg_enable_hba_reset) 1795 return; 1796 1797 /* Send an internal error event to mgmt application */ 1798 lpfc_board_errevt_to_mgmt(phba); 1799 1800 if (phba->hba_flag & DEFER_ERATT) 1801 lpfc_handle_deferred_eratt(phba); 1802 1803 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1804 if (phba->work_hs & HS_FFER6) 1805 /* Re-establishing Link */ 1806 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1807 "1301 Re-establishing Link " 1808 "Data: x%x x%x x%x\n", 1809 phba->work_hs, phba->work_status[0], 1810 phba->work_status[1]); 1811 if (phba->work_hs & HS_FFER8) 1812 /* Device Zeroization */ 1813 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1814 "2861 Host Authentication device " 1815 "zeroization Data:x%x x%x x%x\n", 1816 phba->work_hs, phba->work_status[0], 1817 phba->work_status[1]); 1818 1819 spin_lock_irq(&phba->hbalock); 1820 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1821 spin_unlock_irq(&phba->hbalock); 1822 1823 /* 1824 * Firmware stops when it triggled erratt with HS_FFER6. 1825 * That could cause the I/Os dropped by the firmware. 1826 * Error iocb (I/O) on txcmplq and let the SCSI layer 1827 * retry it after re-establishing link. 1828 */ 1829 lpfc_sli_abort_fcp_rings(phba); 1830 1831 /* 1832 * There was a firmware error. Take the hba offline and then 1833 * attempt to restart it. 1834 */ 1835 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1836 lpfc_offline(phba); 1837 lpfc_sli_brdrestart(phba); 1838 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1839 lpfc_unblock_mgmt_io(phba); 1840 return; 1841 } 1842 lpfc_unblock_mgmt_io(phba); 1843 } else if (phba->work_hs & HS_CRIT_TEMP) { 1844 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1845 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1846 temp_event_data.event_code = LPFC_CRIT_TEMP; 1847 temp_event_data.data = (uint32_t)temperature; 1848 1849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1850 "0406 Adapter maximum temperature exceeded " 1851 "(%ld), taking this port offline " 1852 "Data: x%x x%x x%x\n", 1853 temperature, phba->work_hs, 1854 phba->work_status[0], phba->work_status[1]); 1855 1856 shost = lpfc_shost_from_vport(phba->pport); 1857 fc_host_post_vendor_event(shost, fc_get_event_number(), 1858 sizeof(temp_event_data), 1859 (char *) &temp_event_data, 1860 SCSI_NL_VID_TYPE_PCI 1861 | PCI_VENDOR_ID_EMULEX); 1862 1863 spin_lock_irq(&phba->hbalock); 1864 phba->over_temp_state = HBA_OVER_TEMP; 1865 spin_unlock_irq(&phba->hbalock); 1866 lpfc_offline_eratt(phba); 1867 1868 } else { 1869 /* The if clause above forces this code path when the status 1870 * failure is a value other than FFER6. Do not call the offline 1871 * twice. This is the adapter hardware error path. 1872 */ 1873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1874 "0457 Adapter Hardware Error " 1875 "Data: x%x x%x x%x\n", 1876 phba->work_hs, 1877 phba->work_status[0], phba->work_status[1]); 1878 1879 event_data = FC_REG_DUMP_EVENT; 1880 shost = lpfc_shost_from_vport(vport); 1881 fc_host_post_vendor_event(shost, fc_get_event_number(), 1882 sizeof(event_data), (char *) &event_data, 1883 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1884 1885 lpfc_offline_eratt(phba); 1886 } 1887 return; 1888 } 1889 1890 /** 1891 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1892 * @phba: pointer to lpfc hba data structure. 1893 * @mbx_action: flag for mailbox shutdown action. 1894 * @en_rn_msg: send reset/port recovery message. 1895 * This routine is invoked to perform an SLI4 port PCI function reset in 1896 * response to port status register polling attention. It waits for port 1897 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1898 * During this process, interrupt vectors are freed and later requested 1899 * for handling possible port resource change. 1900 **/ 1901 static int 1902 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1903 bool en_rn_msg) 1904 { 1905 int rc; 1906 uint32_t intr_mode; 1907 LPFC_MBOXQ_t *mboxq; 1908 1909 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1910 LPFC_SLI_INTF_IF_TYPE_2) { 1911 /* 1912 * On error status condition, driver need to wait for port 1913 * ready before performing reset. 1914 */ 1915 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1916 if (rc) 1917 return rc; 1918 } 1919 1920 /* need reset: attempt for port recovery */ 1921 if (en_rn_msg) 1922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1923 "2887 Reset Needed: Attempting Port " 1924 "Recovery...\n"); 1925 1926 /* If we are no wait, the HBA has been reset and is not 1927 * functional, thus we should clear 1928 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1929 */ 1930 if (mbx_action == LPFC_MBX_NO_WAIT) { 1931 spin_lock_irq(&phba->hbalock); 1932 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1933 if (phba->sli.mbox_active) { 1934 mboxq = phba->sli.mbox_active; 1935 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1936 __lpfc_mbox_cmpl_put(phba, mboxq); 1937 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1938 phba->sli.mbox_active = NULL; 1939 } 1940 spin_unlock_irq(&phba->hbalock); 1941 } 1942 1943 lpfc_offline_prep(phba, mbx_action); 1944 lpfc_sli_flush_io_rings(phba); 1945 lpfc_offline(phba); 1946 /* release interrupt for possible resource change */ 1947 lpfc_sli4_disable_intr(phba); 1948 rc = lpfc_sli_brdrestart(phba); 1949 if (rc) { 1950 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1951 "6309 Failed to restart board\n"); 1952 return rc; 1953 } 1954 /* request and enable interrupt */ 1955 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1956 if (intr_mode == LPFC_INTR_ERROR) { 1957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1958 "3175 Failed to enable interrupt\n"); 1959 return -EIO; 1960 } 1961 phba->intr_mode = intr_mode; 1962 rc = lpfc_online(phba); 1963 if (rc == 0) 1964 lpfc_unblock_mgmt_io(phba); 1965 1966 return rc; 1967 } 1968 1969 /** 1970 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1971 * @phba: pointer to lpfc hba data structure. 1972 * 1973 * This routine is invoked to handle the SLI4 HBA hardware error attention 1974 * conditions. 1975 **/ 1976 static void 1977 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1978 { 1979 struct lpfc_vport *vport = phba->pport; 1980 uint32_t event_data; 1981 struct Scsi_Host *shost; 1982 uint32_t if_type; 1983 struct lpfc_register portstat_reg = {0}; 1984 uint32_t reg_err1, reg_err2; 1985 uint32_t uerrlo_reg, uemasklo_reg; 1986 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1987 bool en_rn_msg = true; 1988 struct temp_event temp_event_data; 1989 struct lpfc_register portsmphr_reg; 1990 int rc, i; 1991 1992 /* If the pci channel is offline, ignore possible errors, since 1993 * we cannot communicate with the pci card anyway. 1994 */ 1995 if (pci_channel_offline(phba->pcidev)) { 1996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1997 "3166 pci channel is offline\n"); 1998 return; 1999 } 2000 2001 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 2002 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2003 switch (if_type) { 2004 case LPFC_SLI_INTF_IF_TYPE_0: 2005 pci_rd_rc1 = lpfc_readl( 2006 phba->sli4_hba.u.if_type0.UERRLOregaddr, 2007 &uerrlo_reg); 2008 pci_rd_rc2 = lpfc_readl( 2009 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 2010 &uemasklo_reg); 2011 /* consider PCI bus read error as pci_channel_offline */ 2012 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2013 return; 2014 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2015 lpfc_sli4_offline_eratt(phba); 2016 return; 2017 } 2018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2019 "7623 Checking UE recoverable"); 2020 2021 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 2022 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2023 &portsmphr_reg.word0)) 2024 continue; 2025 2026 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 2027 &portsmphr_reg); 2028 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2029 LPFC_PORT_SEM_UE_RECOVERABLE) 2030 break; 2031 /*Sleep for 1Sec, before checking SEMAPHORE */ 2032 msleep(1000); 2033 } 2034 2035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2036 "4827 smphr_port_status x%x : Waited %dSec", 2037 smphr_port_status, i); 2038 2039 /* Recoverable UE, reset the HBA device */ 2040 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2041 LPFC_PORT_SEM_UE_RECOVERABLE) { 2042 for (i = 0; i < 20; i++) { 2043 msleep(1000); 2044 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2045 &portsmphr_reg.word0) && 2046 (LPFC_POST_STAGE_PORT_READY == 2047 bf_get(lpfc_port_smphr_port_status, 2048 &portsmphr_reg))) { 2049 rc = lpfc_sli4_port_sta_fn_reset(phba, 2050 LPFC_MBX_NO_WAIT, en_rn_msg); 2051 if (rc == 0) 2052 return; 2053 lpfc_printf_log(phba, KERN_ERR, 2054 LOG_TRACE_EVENT, 2055 "4215 Failed to recover UE"); 2056 break; 2057 } 2058 } 2059 } 2060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2061 "7624 Firmware not ready: Failing UE recovery," 2062 " waited %dSec", i); 2063 phba->link_state = LPFC_HBA_ERROR; 2064 break; 2065 2066 case LPFC_SLI_INTF_IF_TYPE_2: 2067 case LPFC_SLI_INTF_IF_TYPE_6: 2068 pci_rd_rc1 = lpfc_readl( 2069 phba->sli4_hba.u.if_type2.STATUSregaddr, 2070 &portstat_reg.word0); 2071 /* consider PCI bus read error as pci_channel_offline */ 2072 if (pci_rd_rc1 == -EIO) { 2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2074 "3151 PCI bus read access failure: x%x\n", 2075 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2076 lpfc_sli4_offline_eratt(phba); 2077 return; 2078 } 2079 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2080 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2081 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2083 "2889 Port Overtemperature event, " 2084 "taking port offline Data: x%x x%x\n", 2085 reg_err1, reg_err2); 2086 2087 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2088 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2089 temp_event_data.event_code = LPFC_CRIT_TEMP; 2090 temp_event_data.data = 0xFFFFFFFF; 2091 2092 shost = lpfc_shost_from_vport(phba->pport); 2093 fc_host_post_vendor_event(shost, fc_get_event_number(), 2094 sizeof(temp_event_data), 2095 (char *)&temp_event_data, 2096 SCSI_NL_VID_TYPE_PCI 2097 | PCI_VENDOR_ID_EMULEX); 2098 2099 spin_lock_irq(&phba->hbalock); 2100 phba->over_temp_state = HBA_OVER_TEMP; 2101 spin_unlock_irq(&phba->hbalock); 2102 lpfc_sli4_offline_eratt(phba); 2103 return; 2104 } 2105 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2106 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2108 "3143 Port Down: Firmware Update " 2109 "Detected\n"); 2110 en_rn_msg = false; 2111 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2112 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2114 "3144 Port Down: Debug Dump\n"); 2115 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2116 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2118 "3145 Port Down: Provisioning\n"); 2119 2120 /* If resets are disabled then leave the HBA alone and return */ 2121 if (!phba->cfg_enable_hba_reset) 2122 return; 2123 2124 /* Check port status register for function reset */ 2125 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2126 en_rn_msg); 2127 if (rc == 0) { 2128 /* don't report event on forced debug dump */ 2129 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2130 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2131 return; 2132 else 2133 break; 2134 } 2135 /* fall through for not able to recover */ 2136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2137 "3152 Unrecoverable error\n"); 2138 phba->link_state = LPFC_HBA_ERROR; 2139 break; 2140 case LPFC_SLI_INTF_IF_TYPE_1: 2141 default: 2142 break; 2143 } 2144 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2145 "3123 Report dump event to upper layer\n"); 2146 /* Send an internal error event to mgmt application */ 2147 lpfc_board_errevt_to_mgmt(phba); 2148 2149 event_data = FC_REG_DUMP_EVENT; 2150 shost = lpfc_shost_from_vport(vport); 2151 fc_host_post_vendor_event(shost, fc_get_event_number(), 2152 sizeof(event_data), (char *) &event_data, 2153 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2154 } 2155 2156 /** 2157 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2158 * @phba: pointer to lpfc HBA data structure. 2159 * 2160 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2161 * routine from the API jump table function pointer from the lpfc_hba struct. 2162 * 2163 * Return codes 2164 * 0 - success. 2165 * Any other value - error. 2166 **/ 2167 void 2168 lpfc_handle_eratt(struct lpfc_hba *phba) 2169 { 2170 (*phba->lpfc_handle_eratt)(phba); 2171 } 2172 2173 /** 2174 * lpfc_handle_latt - The HBA link event handler 2175 * @phba: pointer to lpfc hba data structure. 2176 * 2177 * This routine is invoked from the worker thread to handle a HBA host 2178 * attention link event. SLI3 only. 2179 **/ 2180 void 2181 lpfc_handle_latt(struct lpfc_hba *phba) 2182 { 2183 struct lpfc_vport *vport = phba->pport; 2184 struct lpfc_sli *psli = &phba->sli; 2185 LPFC_MBOXQ_t *pmb; 2186 volatile uint32_t control; 2187 struct lpfc_dmabuf *mp; 2188 int rc = 0; 2189 2190 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2191 if (!pmb) { 2192 rc = 1; 2193 goto lpfc_handle_latt_err_exit; 2194 } 2195 2196 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2197 if (!mp) { 2198 rc = 2; 2199 goto lpfc_handle_latt_free_pmb; 2200 } 2201 2202 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2203 if (!mp->virt) { 2204 rc = 3; 2205 goto lpfc_handle_latt_free_mp; 2206 } 2207 2208 /* Cleanup any outstanding ELS commands */ 2209 lpfc_els_flush_all_cmd(phba); 2210 2211 psli->slistat.link_event++; 2212 lpfc_read_topology(phba, pmb, mp); 2213 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2214 pmb->vport = vport; 2215 /* Block ELS IOCBs until we have processed this mbox command */ 2216 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2217 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2218 if (rc == MBX_NOT_FINISHED) { 2219 rc = 4; 2220 goto lpfc_handle_latt_free_mbuf; 2221 } 2222 2223 /* Clear Link Attention in HA REG */ 2224 spin_lock_irq(&phba->hbalock); 2225 writel(HA_LATT, phba->HAregaddr); 2226 readl(phba->HAregaddr); /* flush */ 2227 spin_unlock_irq(&phba->hbalock); 2228 2229 return; 2230 2231 lpfc_handle_latt_free_mbuf: 2232 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2233 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2234 lpfc_handle_latt_free_mp: 2235 kfree(mp); 2236 lpfc_handle_latt_free_pmb: 2237 mempool_free(pmb, phba->mbox_mem_pool); 2238 lpfc_handle_latt_err_exit: 2239 /* Enable Link attention interrupts */ 2240 spin_lock_irq(&phba->hbalock); 2241 psli->sli_flag |= LPFC_PROCESS_LA; 2242 control = readl(phba->HCregaddr); 2243 control |= HC_LAINT_ENA; 2244 writel(control, phba->HCregaddr); 2245 readl(phba->HCregaddr); /* flush */ 2246 2247 /* Clear Link Attention in HA REG */ 2248 writel(HA_LATT, phba->HAregaddr); 2249 readl(phba->HAregaddr); /* flush */ 2250 spin_unlock_irq(&phba->hbalock); 2251 lpfc_linkdown(phba); 2252 phba->link_state = LPFC_HBA_ERROR; 2253 2254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2255 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2256 2257 return; 2258 } 2259 2260 /** 2261 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2262 * @phba: pointer to lpfc hba data structure. 2263 * @vpd: pointer to the vital product data. 2264 * @len: length of the vital product data in bytes. 2265 * 2266 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2267 * an array of characters. In this routine, the ModelName, ProgramType, and 2268 * ModelDesc, etc. fields of the phba data structure will be populated. 2269 * 2270 * Return codes 2271 * 0 - pointer to the VPD passed in is NULL 2272 * 1 - success 2273 **/ 2274 int 2275 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2276 { 2277 uint8_t lenlo, lenhi; 2278 int Length; 2279 int i, j; 2280 int finished = 0; 2281 int index = 0; 2282 2283 if (!vpd) 2284 return 0; 2285 2286 /* Vital Product */ 2287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2288 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2289 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2290 (uint32_t) vpd[3]); 2291 while (!finished && (index < (len - 4))) { 2292 switch (vpd[index]) { 2293 case 0x82: 2294 case 0x91: 2295 index += 1; 2296 lenlo = vpd[index]; 2297 index += 1; 2298 lenhi = vpd[index]; 2299 index += 1; 2300 i = ((((unsigned short)lenhi) << 8) + lenlo); 2301 index += i; 2302 break; 2303 case 0x90: 2304 index += 1; 2305 lenlo = vpd[index]; 2306 index += 1; 2307 lenhi = vpd[index]; 2308 index += 1; 2309 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2310 if (Length > len - index) 2311 Length = len - index; 2312 while (Length > 0) { 2313 /* Look for Serial Number */ 2314 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2315 index += 2; 2316 i = vpd[index]; 2317 index += 1; 2318 j = 0; 2319 Length -= (3+i); 2320 while(i--) { 2321 phba->SerialNumber[j++] = vpd[index++]; 2322 if (j == 31) 2323 break; 2324 } 2325 phba->SerialNumber[j] = 0; 2326 continue; 2327 } 2328 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2329 phba->vpd_flag |= VPD_MODEL_DESC; 2330 index += 2; 2331 i = vpd[index]; 2332 index += 1; 2333 j = 0; 2334 Length -= (3+i); 2335 while(i--) { 2336 phba->ModelDesc[j++] = vpd[index++]; 2337 if (j == 255) 2338 break; 2339 } 2340 phba->ModelDesc[j] = 0; 2341 continue; 2342 } 2343 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2344 phba->vpd_flag |= VPD_MODEL_NAME; 2345 index += 2; 2346 i = vpd[index]; 2347 index += 1; 2348 j = 0; 2349 Length -= (3+i); 2350 while(i--) { 2351 phba->ModelName[j++] = vpd[index++]; 2352 if (j == 79) 2353 break; 2354 } 2355 phba->ModelName[j] = 0; 2356 continue; 2357 } 2358 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2359 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2360 index += 2; 2361 i = vpd[index]; 2362 index += 1; 2363 j = 0; 2364 Length -= (3+i); 2365 while(i--) { 2366 phba->ProgramType[j++] = vpd[index++]; 2367 if (j == 255) 2368 break; 2369 } 2370 phba->ProgramType[j] = 0; 2371 continue; 2372 } 2373 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2374 phba->vpd_flag |= VPD_PORT; 2375 index += 2; 2376 i = vpd[index]; 2377 index += 1; 2378 j = 0; 2379 Length -= (3+i); 2380 while(i--) { 2381 if ((phba->sli_rev == LPFC_SLI_REV4) && 2382 (phba->sli4_hba.pport_name_sta == 2383 LPFC_SLI4_PPNAME_GET)) { 2384 j++; 2385 index++; 2386 } else 2387 phba->Port[j++] = vpd[index++]; 2388 if (j == 19) 2389 break; 2390 } 2391 if ((phba->sli_rev != LPFC_SLI_REV4) || 2392 (phba->sli4_hba.pport_name_sta == 2393 LPFC_SLI4_PPNAME_NON)) 2394 phba->Port[j] = 0; 2395 continue; 2396 } 2397 else { 2398 index += 2; 2399 i = vpd[index]; 2400 index += 1; 2401 index += i; 2402 Length -= (3 + i); 2403 } 2404 } 2405 finished = 0; 2406 break; 2407 case 0x78: 2408 finished = 1; 2409 break; 2410 default: 2411 index ++; 2412 break; 2413 } 2414 } 2415 2416 return(1); 2417 } 2418 2419 /** 2420 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2421 * @phba: pointer to lpfc hba data structure. 2422 * @mdp: pointer to the data structure to hold the derived model name. 2423 * @descp: pointer to the data structure to hold the derived description. 2424 * 2425 * This routine retrieves HBA's description based on its registered PCI device 2426 * ID. The @descp passed into this function points to an array of 256 chars. It 2427 * shall be returned with the model name, maximum speed, and the host bus type. 2428 * The @mdp passed into this function points to an array of 80 chars. When the 2429 * function returns, the @mdp will be filled with the model name. 2430 **/ 2431 static void 2432 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2433 { 2434 lpfc_vpd_t *vp; 2435 uint16_t dev_id = phba->pcidev->device; 2436 int max_speed; 2437 int GE = 0; 2438 int oneConnect = 0; /* default is not a oneConnect */ 2439 struct { 2440 char *name; 2441 char *bus; 2442 char *function; 2443 } m = {"<Unknown>", "", ""}; 2444 2445 if (mdp && mdp[0] != '\0' 2446 && descp && descp[0] != '\0') 2447 return; 2448 2449 if (phba->lmt & LMT_64Gb) 2450 max_speed = 64; 2451 else if (phba->lmt & LMT_32Gb) 2452 max_speed = 32; 2453 else if (phba->lmt & LMT_16Gb) 2454 max_speed = 16; 2455 else if (phba->lmt & LMT_10Gb) 2456 max_speed = 10; 2457 else if (phba->lmt & LMT_8Gb) 2458 max_speed = 8; 2459 else if (phba->lmt & LMT_4Gb) 2460 max_speed = 4; 2461 else if (phba->lmt & LMT_2Gb) 2462 max_speed = 2; 2463 else if (phba->lmt & LMT_1Gb) 2464 max_speed = 1; 2465 else 2466 max_speed = 0; 2467 2468 vp = &phba->vpd; 2469 2470 switch (dev_id) { 2471 case PCI_DEVICE_ID_FIREFLY: 2472 m = (typeof(m)){"LP6000", "PCI", 2473 "Obsolete, Unsupported Fibre Channel Adapter"}; 2474 break; 2475 case PCI_DEVICE_ID_SUPERFLY: 2476 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2477 m = (typeof(m)){"LP7000", "PCI", ""}; 2478 else 2479 m = (typeof(m)){"LP7000E", "PCI", ""}; 2480 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2481 break; 2482 case PCI_DEVICE_ID_DRAGONFLY: 2483 m = (typeof(m)){"LP8000", "PCI", 2484 "Obsolete, Unsupported Fibre Channel Adapter"}; 2485 break; 2486 case PCI_DEVICE_ID_CENTAUR: 2487 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2488 m = (typeof(m)){"LP9002", "PCI", ""}; 2489 else 2490 m = (typeof(m)){"LP9000", "PCI", ""}; 2491 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2492 break; 2493 case PCI_DEVICE_ID_RFLY: 2494 m = (typeof(m)){"LP952", "PCI", 2495 "Obsolete, Unsupported Fibre Channel Adapter"}; 2496 break; 2497 case PCI_DEVICE_ID_PEGASUS: 2498 m = (typeof(m)){"LP9802", "PCI-X", 2499 "Obsolete, Unsupported Fibre Channel Adapter"}; 2500 break; 2501 case PCI_DEVICE_ID_THOR: 2502 m = (typeof(m)){"LP10000", "PCI-X", 2503 "Obsolete, Unsupported Fibre Channel Adapter"}; 2504 break; 2505 case PCI_DEVICE_ID_VIPER: 2506 m = (typeof(m)){"LPX1000", "PCI-X", 2507 "Obsolete, Unsupported Fibre Channel Adapter"}; 2508 break; 2509 case PCI_DEVICE_ID_PFLY: 2510 m = (typeof(m)){"LP982", "PCI-X", 2511 "Obsolete, Unsupported Fibre Channel Adapter"}; 2512 break; 2513 case PCI_DEVICE_ID_TFLY: 2514 m = (typeof(m)){"LP1050", "PCI-X", 2515 "Obsolete, Unsupported Fibre Channel Adapter"}; 2516 break; 2517 case PCI_DEVICE_ID_HELIOS: 2518 m = (typeof(m)){"LP11000", "PCI-X2", 2519 "Obsolete, Unsupported Fibre Channel Adapter"}; 2520 break; 2521 case PCI_DEVICE_ID_HELIOS_SCSP: 2522 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2523 "Obsolete, Unsupported Fibre Channel Adapter"}; 2524 break; 2525 case PCI_DEVICE_ID_HELIOS_DCSP: 2526 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2527 "Obsolete, Unsupported Fibre Channel Adapter"}; 2528 break; 2529 case PCI_DEVICE_ID_NEPTUNE: 2530 m = (typeof(m)){"LPe1000", "PCIe", 2531 "Obsolete, Unsupported Fibre Channel Adapter"}; 2532 break; 2533 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2534 m = (typeof(m)){"LPe1000-SP", "PCIe", 2535 "Obsolete, Unsupported Fibre Channel Adapter"}; 2536 break; 2537 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2538 m = (typeof(m)){"LPe1002-SP", "PCIe", 2539 "Obsolete, Unsupported Fibre Channel Adapter"}; 2540 break; 2541 case PCI_DEVICE_ID_BMID: 2542 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2543 break; 2544 case PCI_DEVICE_ID_BSMB: 2545 m = (typeof(m)){"LP111", "PCI-X2", 2546 "Obsolete, Unsupported Fibre Channel Adapter"}; 2547 break; 2548 case PCI_DEVICE_ID_ZEPHYR: 2549 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2550 break; 2551 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2552 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2553 break; 2554 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2555 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2556 GE = 1; 2557 break; 2558 case PCI_DEVICE_ID_ZMID: 2559 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2560 break; 2561 case PCI_DEVICE_ID_ZSMB: 2562 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2563 break; 2564 case PCI_DEVICE_ID_LP101: 2565 m = (typeof(m)){"LP101", "PCI-X", 2566 "Obsolete, Unsupported Fibre Channel Adapter"}; 2567 break; 2568 case PCI_DEVICE_ID_LP10000S: 2569 m = (typeof(m)){"LP10000-S", "PCI", 2570 "Obsolete, Unsupported Fibre Channel Adapter"}; 2571 break; 2572 case PCI_DEVICE_ID_LP11000S: 2573 m = (typeof(m)){"LP11000-S", "PCI-X2", 2574 "Obsolete, Unsupported Fibre Channel Adapter"}; 2575 break; 2576 case PCI_DEVICE_ID_LPE11000S: 2577 m = (typeof(m)){"LPe11000-S", "PCIe", 2578 "Obsolete, Unsupported Fibre Channel Adapter"}; 2579 break; 2580 case PCI_DEVICE_ID_SAT: 2581 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2582 break; 2583 case PCI_DEVICE_ID_SAT_MID: 2584 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2585 break; 2586 case PCI_DEVICE_ID_SAT_SMB: 2587 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2588 break; 2589 case PCI_DEVICE_ID_SAT_DCSP: 2590 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2591 break; 2592 case PCI_DEVICE_ID_SAT_SCSP: 2593 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2594 break; 2595 case PCI_DEVICE_ID_SAT_S: 2596 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2597 break; 2598 case PCI_DEVICE_ID_HORNET: 2599 m = (typeof(m)){"LP21000", "PCIe", 2600 "Obsolete, Unsupported FCoE Adapter"}; 2601 GE = 1; 2602 break; 2603 case PCI_DEVICE_ID_PROTEUS_VF: 2604 m = (typeof(m)){"LPev12000", "PCIe IOV", 2605 "Obsolete, Unsupported Fibre Channel Adapter"}; 2606 break; 2607 case PCI_DEVICE_ID_PROTEUS_PF: 2608 m = (typeof(m)){"LPev12000", "PCIe IOV", 2609 "Obsolete, Unsupported Fibre Channel Adapter"}; 2610 break; 2611 case PCI_DEVICE_ID_PROTEUS_S: 2612 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2613 "Obsolete, Unsupported Fibre Channel Adapter"}; 2614 break; 2615 case PCI_DEVICE_ID_TIGERSHARK: 2616 oneConnect = 1; 2617 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2618 break; 2619 case PCI_DEVICE_ID_TOMCAT: 2620 oneConnect = 1; 2621 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2622 break; 2623 case PCI_DEVICE_ID_FALCON: 2624 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2625 "EmulexSecure Fibre"}; 2626 break; 2627 case PCI_DEVICE_ID_BALIUS: 2628 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2629 "Obsolete, Unsupported Fibre Channel Adapter"}; 2630 break; 2631 case PCI_DEVICE_ID_LANCER_FC: 2632 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2633 break; 2634 case PCI_DEVICE_ID_LANCER_FC_VF: 2635 m = (typeof(m)){"LPe16000", "PCIe", 2636 "Obsolete, Unsupported Fibre Channel Adapter"}; 2637 break; 2638 case PCI_DEVICE_ID_LANCER_FCOE: 2639 oneConnect = 1; 2640 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2641 break; 2642 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2643 oneConnect = 1; 2644 m = (typeof(m)){"OCe15100", "PCIe", 2645 "Obsolete, Unsupported FCoE"}; 2646 break; 2647 case PCI_DEVICE_ID_LANCER_G6_FC: 2648 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2649 break; 2650 case PCI_DEVICE_ID_LANCER_G7_FC: 2651 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2652 break; 2653 case PCI_DEVICE_ID_LANCER_G7P_FC: 2654 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2655 break; 2656 case PCI_DEVICE_ID_SKYHAWK: 2657 case PCI_DEVICE_ID_SKYHAWK_VF: 2658 oneConnect = 1; 2659 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2660 break; 2661 default: 2662 m = (typeof(m)){"Unknown", "", ""}; 2663 break; 2664 } 2665 2666 if (mdp && mdp[0] == '\0') 2667 snprintf(mdp, 79,"%s", m.name); 2668 /* 2669 * oneConnect hba requires special processing, they are all initiators 2670 * and we put the port number on the end 2671 */ 2672 if (descp && descp[0] == '\0') { 2673 if (oneConnect) 2674 snprintf(descp, 255, 2675 "Emulex OneConnect %s, %s Initiator %s", 2676 m.name, m.function, 2677 phba->Port); 2678 else if (max_speed == 0) 2679 snprintf(descp, 255, 2680 "Emulex %s %s %s", 2681 m.name, m.bus, m.function); 2682 else 2683 snprintf(descp, 255, 2684 "Emulex %s %d%s %s %s", 2685 m.name, max_speed, (GE) ? "GE" : "Gb", 2686 m.bus, m.function); 2687 } 2688 } 2689 2690 /** 2691 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2692 * @phba: pointer to lpfc hba data structure. 2693 * @pring: pointer to a IOCB ring. 2694 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2695 * 2696 * This routine posts a given number of IOCBs with the associated DMA buffer 2697 * descriptors specified by the cnt argument to the given IOCB ring. 2698 * 2699 * Return codes 2700 * The number of IOCBs NOT able to be posted to the IOCB ring. 2701 **/ 2702 int 2703 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2704 { 2705 IOCB_t *icmd; 2706 struct lpfc_iocbq *iocb; 2707 struct lpfc_dmabuf *mp1, *mp2; 2708 2709 cnt += pring->missbufcnt; 2710 2711 /* While there are buffers to post */ 2712 while (cnt > 0) { 2713 /* Allocate buffer for command iocb */ 2714 iocb = lpfc_sli_get_iocbq(phba); 2715 if (iocb == NULL) { 2716 pring->missbufcnt = cnt; 2717 return cnt; 2718 } 2719 icmd = &iocb->iocb; 2720 2721 /* 2 buffers can be posted per command */ 2722 /* Allocate buffer to post */ 2723 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2724 if (mp1) 2725 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2726 if (!mp1 || !mp1->virt) { 2727 kfree(mp1); 2728 lpfc_sli_release_iocbq(phba, iocb); 2729 pring->missbufcnt = cnt; 2730 return cnt; 2731 } 2732 2733 INIT_LIST_HEAD(&mp1->list); 2734 /* Allocate buffer to post */ 2735 if (cnt > 1) { 2736 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2737 if (mp2) 2738 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2739 &mp2->phys); 2740 if (!mp2 || !mp2->virt) { 2741 kfree(mp2); 2742 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2743 kfree(mp1); 2744 lpfc_sli_release_iocbq(phba, iocb); 2745 pring->missbufcnt = cnt; 2746 return cnt; 2747 } 2748 2749 INIT_LIST_HEAD(&mp2->list); 2750 } else { 2751 mp2 = NULL; 2752 } 2753 2754 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2755 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2756 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2757 icmd->ulpBdeCount = 1; 2758 cnt--; 2759 if (mp2) { 2760 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2761 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2762 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2763 cnt--; 2764 icmd->ulpBdeCount = 2; 2765 } 2766 2767 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2768 icmd->ulpLe = 1; 2769 2770 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2771 IOCB_ERROR) { 2772 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2773 kfree(mp1); 2774 cnt++; 2775 if (mp2) { 2776 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2777 kfree(mp2); 2778 cnt++; 2779 } 2780 lpfc_sli_release_iocbq(phba, iocb); 2781 pring->missbufcnt = cnt; 2782 return cnt; 2783 } 2784 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2785 if (mp2) 2786 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2787 } 2788 pring->missbufcnt = 0; 2789 return 0; 2790 } 2791 2792 /** 2793 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2794 * @phba: pointer to lpfc hba data structure. 2795 * 2796 * This routine posts initial receive IOCB buffers to the ELS ring. The 2797 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2798 * set to 64 IOCBs. SLI3 only. 2799 * 2800 * Return codes 2801 * 0 - success (currently always success) 2802 **/ 2803 static int 2804 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2805 { 2806 struct lpfc_sli *psli = &phba->sli; 2807 2808 /* Ring 0, ELS / CT buffers */ 2809 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2810 /* Ring 2 - FCP no buffers needed */ 2811 2812 return 0; 2813 } 2814 2815 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2816 2817 /** 2818 * lpfc_sha_init - Set up initial array of hash table entries 2819 * @HashResultPointer: pointer to an array as hash table. 2820 * 2821 * This routine sets up the initial values to the array of hash table entries 2822 * for the LC HBAs. 2823 **/ 2824 static void 2825 lpfc_sha_init(uint32_t * HashResultPointer) 2826 { 2827 HashResultPointer[0] = 0x67452301; 2828 HashResultPointer[1] = 0xEFCDAB89; 2829 HashResultPointer[2] = 0x98BADCFE; 2830 HashResultPointer[3] = 0x10325476; 2831 HashResultPointer[4] = 0xC3D2E1F0; 2832 } 2833 2834 /** 2835 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2836 * @HashResultPointer: pointer to an initial/result hash table. 2837 * @HashWorkingPointer: pointer to an working hash table. 2838 * 2839 * This routine iterates an initial hash table pointed by @HashResultPointer 2840 * with the values from the working hash table pointeed by @HashWorkingPointer. 2841 * The results are putting back to the initial hash table, returned through 2842 * the @HashResultPointer as the result hash table. 2843 **/ 2844 static void 2845 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2846 { 2847 int t; 2848 uint32_t TEMP; 2849 uint32_t A, B, C, D, E; 2850 t = 16; 2851 do { 2852 HashWorkingPointer[t] = 2853 S(1, 2854 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2855 8] ^ 2856 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2857 } while (++t <= 79); 2858 t = 0; 2859 A = HashResultPointer[0]; 2860 B = HashResultPointer[1]; 2861 C = HashResultPointer[2]; 2862 D = HashResultPointer[3]; 2863 E = HashResultPointer[4]; 2864 2865 do { 2866 if (t < 20) { 2867 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2868 } else if (t < 40) { 2869 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2870 } else if (t < 60) { 2871 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2872 } else { 2873 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2874 } 2875 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2876 E = D; 2877 D = C; 2878 C = S(30, B); 2879 B = A; 2880 A = TEMP; 2881 } while (++t <= 79); 2882 2883 HashResultPointer[0] += A; 2884 HashResultPointer[1] += B; 2885 HashResultPointer[2] += C; 2886 HashResultPointer[3] += D; 2887 HashResultPointer[4] += E; 2888 2889 } 2890 2891 /** 2892 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2893 * @RandomChallenge: pointer to the entry of host challenge random number array. 2894 * @HashWorking: pointer to the entry of the working hash array. 2895 * 2896 * This routine calculates the working hash array referred by @HashWorking 2897 * from the challenge random numbers associated with the host, referred by 2898 * @RandomChallenge. The result is put into the entry of the working hash 2899 * array and returned by reference through @HashWorking. 2900 **/ 2901 static void 2902 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2903 { 2904 *HashWorking = (*RandomChallenge ^ *HashWorking); 2905 } 2906 2907 /** 2908 * lpfc_hba_init - Perform special handling for LC HBA initialization 2909 * @phba: pointer to lpfc hba data structure. 2910 * @hbainit: pointer to an array of unsigned 32-bit integers. 2911 * 2912 * This routine performs the special handling for LC HBA initialization. 2913 **/ 2914 void 2915 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2916 { 2917 int t; 2918 uint32_t *HashWorking; 2919 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2920 2921 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2922 if (!HashWorking) 2923 return; 2924 2925 HashWorking[0] = HashWorking[78] = *pwwnn++; 2926 HashWorking[1] = HashWorking[79] = *pwwnn; 2927 2928 for (t = 0; t < 7; t++) 2929 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2930 2931 lpfc_sha_init(hbainit); 2932 lpfc_sha_iterate(hbainit, HashWorking); 2933 kfree(HashWorking); 2934 } 2935 2936 /** 2937 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2938 * @vport: pointer to a virtual N_Port data structure. 2939 * 2940 * This routine performs the necessary cleanups before deleting the @vport. 2941 * It invokes the discovery state machine to perform necessary state 2942 * transitions and to release the ndlps associated with the @vport. Note, 2943 * the physical port is treated as @vport 0. 2944 **/ 2945 void 2946 lpfc_cleanup(struct lpfc_vport *vport) 2947 { 2948 struct lpfc_hba *phba = vport->phba; 2949 struct lpfc_nodelist *ndlp, *next_ndlp; 2950 int i = 0; 2951 2952 if (phba->link_state > LPFC_LINK_DOWN) 2953 lpfc_port_link_failure(vport); 2954 2955 /* Clean up VMID resources */ 2956 if (lpfc_is_vmid_enabled(phba)) 2957 lpfc_vmid_vport_cleanup(vport); 2958 2959 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2960 if (vport->port_type != LPFC_PHYSICAL_PORT && 2961 ndlp->nlp_DID == Fabric_DID) { 2962 /* Just free up ndlp with Fabric_DID for vports */ 2963 lpfc_nlp_put(ndlp); 2964 continue; 2965 } 2966 2967 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2968 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2969 lpfc_nlp_put(ndlp); 2970 continue; 2971 } 2972 2973 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2974 * DEVICE_RM event. 2975 */ 2976 if (ndlp->nlp_type & NLP_FABRIC && 2977 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2978 lpfc_disc_state_machine(vport, ndlp, NULL, 2979 NLP_EVT_DEVICE_RECOVERY); 2980 2981 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2982 lpfc_disc_state_machine(vport, ndlp, NULL, 2983 NLP_EVT_DEVICE_RM); 2984 } 2985 2986 /* At this point, ALL ndlp's should be gone 2987 * because of the previous NLP_EVT_DEVICE_RM. 2988 * Lets wait for this to happen, if needed. 2989 */ 2990 while (!list_empty(&vport->fc_nodes)) { 2991 if (i++ > 3000) { 2992 lpfc_printf_vlog(vport, KERN_ERR, 2993 LOG_TRACE_EVENT, 2994 "0233 Nodelist not empty\n"); 2995 list_for_each_entry_safe(ndlp, next_ndlp, 2996 &vport->fc_nodes, nlp_listp) { 2997 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2998 LOG_TRACE_EVENT, 2999 "0282 did:x%x ndlp:x%px " 3000 "refcnt:%d xflags x%x nflag x%x\n", 3001 ndlp->nlp_DID, (void *)ndlp, 3002 kref_read(&ndlp->kref), 3003 ndlp->fc4_xpt_flags, 3004 ndlp->nlp_flag); 3005 } 3006 break; 3007 } 3008 3009 /* Wait for any activity on ndlps to settle */ 3010 msleep(10); 3011 } 3012 lpfc_cleanup_vports_rrqs(vport, NULL); 3013 } 3014 3015 /** 3016 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 3017 * @vport: pointer to a virtual N_Port data structure. 3018 * 3019 * This routine stops all the timers associated with a @vport. This function 3020 * is invoked before disabling or deleting a @vport. Note that the physical 3021 * port is treated as @vport 0. 3022 **/ 3023 void 3024 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3025 { 3026 del_timer_sync(&vport->els_tmofunc); 3027 del_timer_sync(&vport->delayed_disc_tmo); 3028 lpfc_can_disctmo(vport); 3029 return; 3030 } 3031 3032 /** 3033 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3034 * @phba: pointer to lpfc hba data structure. 3035 * 3036 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 3037 * caller of this routine should already hold the host lock. 3038 **/ 3039 void 3040 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3041 { 3042 /* Clear pending FCF rediscovery wait flag */ 3043 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3044 3045 /* Now, try to stop the timer */ 3046 del_timer(&phba->fcf.redisc_wait); 3047 } 3048 3049 /** 3050 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3051 * @phba: pointer to lpfc hba data structure. 3052 * 3053 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3054 * checks whether the FCF rediscovery wait timer is pending with the host 3055 * lock held before proceeding with disabling the timer and clearing the 3056 * wait timer pendig flag. 3057 **/ 3058 void 3059 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3060 { 3061 spin_lock_irq(&phba->hbalock); 3062 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3063 /* FCF rediscovery timer already fired or stopped */ 3064 spin_unlock_irq(&phba->hbalock); 3065 return; 3066 } 3067 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3068 /* Clear failover in progress flags */ 3069 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3070 spin_unlock_irq(&phba->hbalock); 3071 } 3072 3073 /** 3074 * lpfc_cmf_stop - Stop CMF processing 3075 * @phba: pointer to lpfc hba data structure. 3076 * 3077 * This is called when the link goes down or if CMF mode is turned OFF. 3078 * It is also called when going offline or unloaded just before the 3079 * congestion info buffer is unregistered. 3080 **/ 3081 void 3082 lpfc_cmf_stop(struct lpfc_hba *phba) 3083 { 3084 int cpu; 3085 struct lpfc_cgn_stat *cgs; 3086 3087 /* We only do something if CMF is enabled */ 3088 if (!phba->sli4_hba.pc_sli4_params.cmf) 3089 return; 3090 3091 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3092 "6221 Stop CMF / Cancel Timer\n"); 3093 3094 /* Cancel the CMF timer */ 3095 hrtimer_cancel(&phba->cmf_timer); 3096 3097 /* Zero CMF counters */ 3098 atomic_set(&phba->cmf_busy, 0); 3099 for_each_present_cpu(cpu) { 3100 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3101 atomic64_set(&cgs->total_bytes, 0); 3102 atomic64_set(&cgs->rcv_bytes, 0); 3103 atomic_set(&cgs->rx_io_cnt, 0); 3104 atomic64_set(&cgs->rx_latency, 0); 3105 } 3106 atomic_set(&phba->cmf_bw_wait, 0); 3107 3108 /* Resume any blocked IO - Queue unblock on workqueue */ 3109 queue_work(phba->wq, &phba->unblock_request_work); 3110 } 3111 3112 static inline uint64_t 3113 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3114 { 3115 uint64_t rate = lpfc_sli_port_speed_get(phba); 3116 3117 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3118 } 3119 3120 void 3121 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3122 { 3123 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3124 "6223 Signal CMF init\n"); 3125 3126 /* Use the new fc_linkspeed to recalculate */ 3127 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3128 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3129 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3130 phba->cmf_interval_rate, 1000); 3131 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3132 3133 /* This is a signal to firmware to sync up CMF BW with link speed */ 3134 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3135 } 3136 3137 /** 3138 * lpfc_cmf_start - Start CMF processing 3139 * @phba: pointer to lpfc hba data structure. 3140 * 3141 * This is called when the link comes up or if CMF mode is turned OFF 3142 * to Monitor or Managed. 3143 **/ 3144 void 3145 lpfc_cmf_start(struct lpfc_hba *phba) 3146 { 3147 struct lpfc_cgn_stat *cgs; 3148 int cpu; 3149 3150 /* We only do something if CMF is enabled */ 3151 if (!phba->sli4_hba.pc_sli4_params.cmf || 3152 phba->cmf_active_mode == LPFC_CFG_OFF) 3153 return; 3154 3155 /* Reinitialize congestion buffer info */ 3156 lpfc_init_congestion_buf(phba); 3157 3158 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3159 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3160 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3161 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3162 3163 atomic_set(&phba->cmf_busy, 0); 3164 for_each_present_cpu(cpu) { 3165 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3166 atomic64_set(&cgs->total_bytes, 0); 3167 atomic64_set(&cgs->rcv_bytes, 0); 3168 atomic_set(&cgs->rx_io_cnt, 0); 3169 atomic64_set(&cgs->rx_latency, 0); 3170 } 3171 phba->cmf_latency.tv_sec = 0; 3172 phba->cmf_latency.tv_nsec = 0; 3173 3174 lpfc_cmf_signal_init(phba); 3175 3176 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3177 "6222 Start CMF / Timer\n"); 3178 3179 phba->cmf_timer_cnt = 0; 3180 hrtimer_start(&phba->cmf_timer, 3181 ktime_set(0, LPFC_CMF_INTERVAL * 1000000), 3182 HRTIMER_MODE_REL); 3183 /* Setup for latency check in IO cmpl routines */ 3184 ktime_get_real_ts64(&phba->cmf_latency); 3185 3186 atomic_set(&phba->cmf_bw_wait, 0); 3187 atomic_set(&phba->cmf_stop_io, 0); 3188 } 3189 3190 /** 3191 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3192 * @phba: pointer to lpfc hba data structure. 3193 * 3194 * This routine stops all the timers associated with a HBA. This function is 3195 * invoked before either putting a HBA offline or unloading the driver. 3196 **/ 3197 void 3198 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3199 { 3200 if (phba->pport) 3201 lpfc_stop_vport_timers(phba->pport); 3202 cancel_delayed_work_sync(&phba->eq_delay_work); 3203 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3204 del_timer_sync(&phba->sli.mbox_tmo); 3205 del_timer_sync(&phba->fabric_block_timer); 3206 del_timer_sync(&phba->eratt_poll); 3207 del_timer_sync(&phba->hb_tmofunc); 3208 if (phba->sli_rev == LPFC_SLI_REV4) { 3209 del_timer_sync(&phba->rrq_tmr); 3210 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3211 } 3212 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3213 3214 switch (phba->pci_dev_grp) { 3215 case LPFC_PCI_DEV_LP: 3216 /* Stop any LightPulse device specific driver timers */ 3217 del_timer_sync(&phba->fcp_poll_timer); 3218 break; 3219 case LPFC_PCI_DEV_OC: 3220 /* Stop any OneConnect device specific driver timers */ 3221 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3222 break; 3223 default: 3224 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3225 "0297 Invalid device group (x%x)\n", 3226 phba->pci_dev_grp); 3227 break; 3228 } 3229 return; 3230 } 3231 3232 /** 3233 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3234 * @phba: pointer to lpfc hba data structure. 3235 * @mbx_action: flag for mailbox no wait action. 3236 * 3237 * This routine marks a HBA's management interface as blocked. Once the HBA's 3238 * management interface is marked as blocked, all the user space access to 3239 * the HBA, whether they are from sysfs interface or libdfc interface will 3240 * all be blocked. The HBA is set to block the management interface when the 3241 * driver prepares the HBA interface for online or offline. 3242 **/ 3243 static void 3244 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3245 { 3246 unsigned long iflag; 3247 uint8_t actcmd = MBX_HEARTBEAT; 3248 unsigned long timeout; 3249 3250 spin_lock_irqsave(&phba->hbalock, iflag); 3251 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3252 spin_unlock_irqrestore(&phba->hbalock, iflag); 3253 if (mbx_action == LPFC_MBX_NO_WAIT) 3254 return; 3255 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3256 spin_lock_irqsave(&phba->hbalock, iflag); 3257 if (phba->sli.mbox_active) { 3258 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3259 /* Determine how long we might wait for the active mailbox 3260 * command to be gracefully completed by firmware. 3261 */ 3262 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3263 phba->sli.mbox_active) * 1000) + jiffies; 3264 } 3265 spin_unlock_irqrestore(&phba->hbalock, iflag); 3266 3267 /* Wait for the outstnading mailbox command to complete */ 3268 while (phba->sli.mbox_active) { 3269 /* Check active mailbox complete status every 2ms */ 3270 msleep(2); 3271 if (time_after(jiffies, timeout)) { 3272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3273 "2813 Mgmt IO is Blocked %x " 3274 "- mbox cmd %x still active\n", 3275 phba->sli.sli_flag, actcmd); 3276 break; 3277 } 3278 } 3279 } 3280 3281 /** 3282 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3283 * @phba: pointer to lpfc hba data structure. 3284 * 3285 * Allocate RPIs for all active remote nodes. This is needed whenever 3286 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3287 * is to fixup the temporary rpi assignments. 3288 **/ 3289 void 3290 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3291 { 3292 struct lpfc_nodelist *ndlp, *next_ndlp; 3293 struct lpfc_vport **vports; 3294 int i, rpi; 3295 3296 if (phba->sli_rev != LPFC_SLI_REV4) 3297 return; 3298 3299 vports = lpfc_create_vport_work_array(phba); 3300 if (vports == NULL) 3301 return; 3302 3303 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3304 if (vports[i]->load_flag & FC_UNLOADING) 3305 continue; 3306 3307 list_for_each_entry_safe(ndlp, next_ndlp, 3308 &vports[i]->fc_nodes, 3309 nlp_listp) { 3310 rpi = lpfc_sli4_alloc_rpi(phba); 3311 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3312 /* TODO print log? */ 3313 continue; 3314 } 3315 ndlp->nlp_rpi = rpi; 3316 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3317 LOG_NODE | LOG_DISCOVERY, 3318 "0009 Assign RPI x%x to ndlp x%px " 3319 "DID:x%06x flg:x%x\n", 3320 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3321 ndlp->nlp_flag); 3322 } 3323 } 3324 lpfc_destroy_vport_work_array(phba, vports); 3325 } 3326 3327 /** 3328 * lpfc_create_expedite_pool - create expedite pool 3329 * @phba: pointer to lpfc hba data structure. 3330 * 3331 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3332 * to expedite pool. Mark them as expedite. 3333 **/ 3334 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3335 { 3336 struct lpfc_sli4_hdw_queue *qp; 3337 struct lpfc_io_buf *lpfc_ncmd; 3338 struct lpfc_io_buf *lpfc_ncmd_next; 3339 struct lpfc_epd_pool *epd_pool; 3340 unsigned long iflag; 3341 3342 epd_pool = &phba->epd_pool; 3343 qp = &phba->sli4_hba.hdwq[0]; 3344 3345 spin_lock_init(&epd_pool->lock); 3346 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3347 spin_lock(&epd_pool->lock); 3348 INIT_LIST_HEAD(&epd_pool->list); 3349 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3350 &qp->lpfc_io_buf_list_put, list) { 3351 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3352 lpfc_ncmd->expedite = true; 3353 qp->put_io_bufs--; 3354 epd_pool->count++; 3355 if (epd_pool->count >= XRI_BATCH) 3356 break; 3357 } 3358 spin_unlock(&epd_pool->lock); 3359 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3360 } 3361 3362 /** 3363 * lpfc_destroy_expedite_pool - destroy expedite pool 3364 * @phba: pointer to lpfc hba data structure. 3365 * 3366 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3367 * of HWQ 0. Clear the mark. 3368 **/ 3369 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3370 { 3371 struct lpfc_sli4_hdw_queue *qp; 3372 struct lpfc_io_buf *lpfc_ncmd; 3373 struct lpfc_io_buf *lpfc_ncmd_next; 3374 struct lpfc_epd_pool *epd_pool; 3375 unsigned long iflag; 3376 3377 epd_pool = &phba->epd_pool; 3378 qp = &phba->sli4_hba.hdwq[0]; 3379 3380 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3381 spin_lock(&epd_pool->lock); 3382 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3383 &epd_pool->list, list) { 3384 list_move_tail(&lpfc_ncmd->list, 3385 &qp->lpfc_io_buf_list_put); 3386 lpfc_ncmd->flags = false; 3387 qp->put_io_bufs++; 3388 epd_pool->count--; 3389 } 3390 spin_unlock(&epd_pool->lock); 3391 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3392 } 3393 3394 /** 3395 * lpfc_create_multixri_pools - create multi-XRI pools 3396 * @phba: pointer to lpfc hba data structure. 3397 * 3398 * This routine initialize public, private per HWQ. Then, move XRIs from 3399 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3400 * Initialized. 3401 **/ 3402 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3403 { 3404 u32 i, j; 3405 u32 hwq_count; 3406 u32 count_per_hwq; 3407 struct lpfc_io_buf *lpfc_ncmd; 3408 struct lpfc_io_buf *lpfc_ncmd_next; 3409 unsigned long iflag; 3410 struct lpfc_sli4_hdw_queue *qp; 3411 struct lpfc_multixri_pool *multixri_pool; 3412 struct lpfc_pbl_pool *pbl_pool; 3413 struct lpfc_pvt_pool *pvt_pool; 3414 3415 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3416 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3417 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3418 phba->sli4_hba.io_xri_cnt); 3419 3420 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3421 lpfc_create_expedite_pool(phba); 3422 3423 hwq_count = phba->cfg_hdw_queue; 3424 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3425 3426 for (i = 0; i < hwq_count; i++) { 3427 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3428 3429 if (!multixri_pool) { 3430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3431 "1238 Failed to allocate memory for " 3432 "multixri_pool\n"); 3433 3434 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3435 lpfc_destroy_expedite_pool(phba); 3436 3437 j = 0; 3438 while (j < i) { 3439 qp = &phba->sli4_hba.hdwq[j]; 3440 kfree(qp->p_multixri_pool); 3441 j++; 3442 } 3443 phba->cfg_xri_rebalancing = 0; 3444 return; 3445 } 3446 3447 qp = &phba->sli4_hba.hdwq[i]; 3448 qp->p_multixri_pool = multixri_pool; 3449 3450 multixri_pool->xri_limit = count_per_hwq; 3451 multixri_pool->rrb_next_hwqid = i; 3452 3453 /* Deal with public free xri pool */ 3454 pbl_pool = &multixri_pool->pbl_pool; 3455 spin_lock_init(&pbl_pool->lock); 3456 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3457 spin_lock(&pbl_pool->lock); 3458 INIT_LIST_HEAD(&pbl_pool->list); 3459 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3460 &qp->lpfc_io_buf_list_put, list) { 3461 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3462 qp->put_io_bufs--; 3463 pbl_pool->count++; 3464 } 3465 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3466 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3467 pbl_pool->count, i); 3468 spin_unlock(&pbl_pool->lock); 3469 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3470 3471 /* Deal with private free xri pool */ 3472 pvt_pool = &multixri_pool->pvt_pool; 3473 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3474 pvt_pool->low_watermark = XRI_BATCH; 3475 spin_lock_init(&pvt_pool->lock); 3476 spin_lock_irqsave(&pvt_pool->lock, iflag); 3477 INIT_LIST_HEAD(&pvt_pool->list); 3478 pvt_pool->count = 0; 3479 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3480 } 3481 } 3482 3483 /** 3484 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3485 * @phba: pointer to lpfc hba data structure. 3486 * 3487 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3488 **/ 3489 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3490 { 3491 u32 i; 3492 u32 hwq_count; 3493 struct lpfc_io_buf *lpfc_ncmd; 3494 struct lpfc_io_buf *lpfc_ncmd_next; 3495 unsigned long iflag; 3496 struct lpfc_sli4_hdw_queue *qp; 3497 struct lpfc_multixri_pool *multixri_pool; 3498 struct lpfc_pbl_pool *pbl_pool; 3499 struct lpfc_pvt_pool *pvt_pool; 3500 3501 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3502 lpfc_destroy_expedite_pool(phba); 3503 3504 if (!(phba->pport->load_flag & FC_UNLOADING)) 3505 lpfc_sli_flush_io_rings(phba); 3506 3507 hwq_count = phba->cfg_hdw_queue; 3508 3509 for (i = 0; i < hwq_count; i++) { 3510 qp = &phba->sli4_hba.hdwq[i]; 3511 multixri_pool = qp->p_multixri_pool; 3512 if (!multixri_pool) 3513 continue; 3514 3515 qp->p_multixri_pool = NULL; 3516 3517 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3518 3519 /* Deal with public free xri pool */ 3520 pbl_pool = &multixri_pool->pbl_pool; 3521 spin_lock(&pbl_pool->lock); 3522 3523 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3524 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3525 pbl_pool->count, i); 3526 3527 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3528 &pbl_pool->list, list) { 3529 list_move_tail(&lpfc_ncmd->list, 3530 &qp->lpfc_io_buf_list_put); 3531 qp->put_io_bufs++; 3532 pbl_pool->count--; 3533 } 3534 3535 INIT_LIST_HEAD(&pbl_pool->list); 3536 pbl_pool->count = 0; 3537 3538 spin_unlock(&pbl_pool->lock); 3539 3540 /* Deal with private free xri pool */ 3541 pvt_pool = &multixri_pool->pvt_pool; 3542 spin_lock(&pvt_pool->lock); 3543 3544 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3545 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3546 pvt_pool->count, i); 3547 3548 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3549 &pvt_pool->list, list) { 3550 list_move_tail(&lpfc_ncmd->list, 3551 &qp->lpfc_io_buf_list_put); 3552 qp->put_io_bufs++; 3553 pvt_pool->count--; 3554 } 3555 3556 INIT_LIST_HEAD(&pvt_pool->list); 3557 pvt_pool->count = 0; 3558 3559 spin_unlock(&pvt_pool->lock); 3560 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3561 3562 kfree(multixri_pool); 3563 } 3564 } 3565 3566 /** 3567 * lpfc_online - Initialize and bring a HBA online 3568 * @phba: pointer to lpfc hba data structure. 3569 * 3570 * This routine initializes the HBA and brings a HBA online. During this 3571 * process, the management interface is blocked to prevent user space access 3572 * to the HBA interfering with the driver initialization. 3573 * 3574 * Return codes 3575 * 0 - successful 3576 * 1 - failed 3577 **/ 3578 int 3579 lpfc_online(struct lpfc_hba *phba) 3580 { 3581 struct lpfc_vport *vport; 3582 struct lpfc_vport **vports; 3583 int i, error = 0; 3584 bool vpis_cleared = false; 3585 3586 if (!phba) 3587 return 0; 3588 vport = phba->pport; 3589 3590 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3591 return 0; 3592 3593 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3594 "0458 Bring Adapter online\n"); 3595 3596 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3597 3598 if (phba->sli_rev == LPFC_SLI_REV4) { 3599 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3600 lpfc_unblock_mgmt_io(phba); 3601 return 1; 3602 } 3603 spin_lock_irq(&phba->hbalock); 3604 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3605 vpis_cleared = true; 3606 spin_unlock_irq(&phba->hbalock); 3607 3608 /* Reestablish the local initiator port. 3609 * The offline process destroyed the previous lport. 3610 */ 3611 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3612 !phba->nvmet_support) { 3613 error = lpfc_nvme_create_localport(phba->pport); 3614 if (error) 3615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3616 "6132 NVME restore reg failed " 3617 "on nvmei error x%x\n", error); 3618 } 3619 } else { 3620 lpfc_sli_queue_init(phba); 3621 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3622 lpfc_unblock_mgmt_io(phba); 3623 return 1; 3624 } 3625 } 3626 3627 vports = lpfc_create_vport_work_array(phba); 3628 if (vports != NULL) { 3629 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3630 struct Scsi_Host *shost; 3631 shost = lpfc_shost_from_vport(vports[i]); 3632 spin_lock_irq(shost->host_lock); 3633 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3634 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3635 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3636 if (phba->sli_rev == LPFC_SLI_REV4) { 3637 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3638 if ((vpis_cleared) && 3639 (vports[i]->port_type != 3640 LPFC_PHYSICAL_PORT)) 3641 vports[i]->vpi = 0; 3642 } 3643 spin_unlock_irq(shost->host_lock); 3644 } 3645 } 3646 lpfc_destroy_vport_work_array(phba, vports); 3647 3648 if (phba->cfg_xri_rebalancing) 3649 lpfc_create_multixri_pools(phba); 3650 3651 lpfc_cpuhp_add(phba); 3652 3653 lpfc_unblock_mgmt_io(phba); 3654 return 0; 3655 } 3656 3657 /** 3658 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3659 * @phba: pointer to lpfc hba data structure. 3660 * 3661 * This routine marks a HBA's management interface as not blocked. Once the 3662 * HBA's management interface is marked as not blocked, all the user space 3663 * access to the HBA, whether they are from sysfs interface or libdfc 3664 * interface will be allowed. The HBA is set to block the management interface 3665 * when the driver prepares the HBA interface for online or offline and then 3666 * set to unblock the management interface afterwards. 3667 **/ 3668 void 3669 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3670 { 3671 unsigned long iflag; 3672 3673 spin_lock_irqsave(&phba->hbalock, iflag); 3674 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3675 spin_unlock_irqrestore(&phba->hbalock, iflag); 3676 } 3677 3678 /** 3679 * lpfc_offline_prep - Prepare a HBA to be brought offline 3680 * @phba: pointer to lpfc hba data structure. 3681 * @mbx_action: flag for mailbox shutdown action. 3682 * 3683 * This routine is invoked to prepare a HBA to be brought offline. It performs 3684 * unregistration login to all the nodes on all vports and flushes the mailbox 3685 * queue to make it ready to be brought offline. 3686 **/ 3687 void 3688 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3689 { 3690 struct lpfc_vport *vport = phba->pport; 3691 struct lpfc_nodelist *ndlp, *next_ndlp; 3692 struct lpfc_vport **vports; 3693 struct Scsi_Host *shost; 3694 int i; 3695 int offline = 0; 3696 3697 if (vport->fc_flag & FC_OFFLINE_MODE) 3698 return; 3699 3700 lpfc_block_mgmt_io(phba, mbx_action); 3701 3702 lpfc_linkdown(phba); 3703 3704 offline = pci_channel_offline(phba->pcidev); 3705 3706 /* Issue an unreg_login to all nodes on all vports */ 3707 vports = lpfc_create_vport_work_array(phba); 3708 if (vports != NULL) { 3709 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3710 if (vports[i]->load_flag & FC_UNLOADING) 3711 continue; 3712 shost = lpfc_shost_from_vport(vports[i]); 3713 spin_lock_irq(shost->host_lock); 3714 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3715 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3716 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3717 spin_unlock_irq(shost->host_lock); 3718 3719 shost = lpfc_shost_from_vport(vports[i]); 3720 list_for_each_entry_safe(ndlp, next_ndlp, 3721 &vports[i]->fc_nodes, 3722 nlp_listp) { 3723 3724 spin_lock_irq(&ndlp->lock); 3725 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3726 spin_unlock_irq(&ndlp->lock); 3727 3728 if (offline) { 3729 spin_lock_irq(&ndlp->lock); 3730 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3731 NLP_RPI_REGISTERED); 3732 spin_unlock_irq(&ndlp->lock); 3733 } else { 3734 lpfc_unreg_rpi(vports[i], ndlp); 3735 } 3736 /* 3737 * Whenever an SLI4 port goes offline, free the 3738 * RPI. Get a new RPI when the adapter port 3739 * comes back online. 3740 */ 3741 if (phba->sli_rev == LPFC_SLI_REV4) { 3742 lpfc_printf_vlog(vports[i], KERN_INFO, 3743 LOG_NODE | LOG_DISCOVERY, 3744 "0011 Free RPI x%x on " 3745 "ndlp: x%px did x%x\n", 3746 ndlp->nlp_rpi, ndlp, 3747 ndlp->nlp_DID); 3748 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3749 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3750 } 3751 3752 if (ndlp->nlp_type & NLP_FABRIC) { 3753 lpfc_disc_state_machine(vports[i], ndlp, 3754 NULL, NLP_EVT_DEVICE_RECOVERY); 3755 3756 /* Don't remove the node unless the node 3757 * has been unregistered with the 3758 * transport, and we're not in recovery 3759 * before dev_loss_tmo triggered. 3760 * Otherwise, let dev_loss take care of 3761 * the node. 3762 */ 3763 if (!(ndlp->save_flags & 3764 NLP_IN_RECOV_POST_DEV_LOSS) && 3765 !(ndlp->fc4_xpt_flags & 3766 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3767 lpfc_disc_state_machine 3768 (vports[i], ndlp, 3769 NULL, 3770 NLP_EVT_DEVICE_RM); 3771 } 3772 } 3773 } 3774 } 3775 lpfc_destroy_vport_work_array(phba, vports); 3776 3777 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3778 3779 if (phba->wq) 3780 flush_workqueue(phba->wq); 3781 } 3782 3783 /** 3784 * lpfc_offline - Bring a HBA offline 3785 * @phba: pointer to lpfc hba data structure. 3786 * 3787 * This routine actually brings a HBA offline. It stops all the timers 3788 * associated with the HBA, brings down the SLI layer, and eventually 3789 * marks the HBA as in offline state for the upper layer protocol. 3790 **/ 3791 void 3792 lpfc_offline(struct lpfc_hba *phba) 3793 { 3794 struct Scsi_Host *shost; 3795 struct lpfc_vport **vports; 3796 int i; 3797 3798 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3799 return; 3800 3801 /* stop port and all timers associated with this hba */ 3802 lpfc_stop_port(phba); 3803 3804 /* Tear down the local and target port registrations. The 3805 * nvme transports need to cleanup. 3806 */ 3807 lpfc_nvmet_destroy_targetport(phba); 3808 lpfc_nvme_destroy_localport(phba->pport); 3809 3810 vports = lpfc_create_vport_work_array(phba); 3811 if (vports != NULL) 3812 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3813 lpfc_stop_vport_timers(vports[i]); 3814 lpfc_destroy_vport_work_array(phba, vports); 3815 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3816 "0460 Bring Adapter offline\n"); 3817 /* Bring down the SLI Layer and cleanup. The HBA is offline 3818 now. */ 3819 lpfc_sli_hba_down(phba); 3820 spin_lock_irq(&phba->hbalock); 3821 phba->work_ha = 0; 3822 spin_unlock_irq(&phba->hbalock); 3823 vports = lpfc_create_vport_work_array(phba); 3824 if (vports != NULL) 3825 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3826 shost = lpfc_shost_from_vport(vports[i]); 3827 spin_lock_irq(shost->host_lock); 3828 vports[i]->work_port_events = 0; 3829 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3830 spin_unlock_irq(shost->host_lock); 3831 } 3832 lpfc_destroy_vport_work_array(phba, vports); 3833 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3834 * in hba_unset 3835 */ 3836 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3837 __lpfc_cpuhp_remove(phba); 3838 3839 if (phba->cfg_xri_rebalancing) 3840 lpfc_destroy_multixri_pools(phba); 3841 } 3842 3843 /** 3844 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3845 * @phba: pointer to lpfc hba data structure. 3846 * 3847 * This routine is to free all the SCSI buffers and IOCBs from the driver 3848 * list back to kernel. It is called from lpfc_pci_remove_one to free 3849 * the internal resources before the device is removed from the system. 3850 **/ 3851 static void 3852 lpfc_scsi_free(struct lpfc_hba *phba) 3853 { 3854 struct lpfc_io_buf *sb, *sb_next; 3855 3856 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3857 return; 3858 3859 spin_lock_irq(&phba->hbalock); 3860 3861 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3862 3863 spin_lock(&phba->scsi_buf_list_put_lock); 3864 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3865 list) { 3866 list_del(&sb->list); 3867 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3868 sb->dma_handle); 3869 kfree(sb); 3870 phba->total_scsi_bufs--; 3871 } 3872 spin_unlock(&phba->scsi_buf_list_put_lock); 3873 3874 spin_lock(&phba->scsi_buf_list_get_lock); 3875 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3876 list) { 3877 list_del(&sb->list); 3878 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3879 sb->dma_handle); 3880 kfree(sb); 3881 phba->total_scsi_bufs--; 3882 } 3883 spin_unlock(&phba->scsi_buf_list_get_lock); 3884 spin_unlock_irq(&phba->hbalock); 3885 } 3886 3887 /** 3888 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3889 * @phba: pointer to lpfc hba data structure. 3890 * 3891 * This routine is to free all the IO buffers and IOCBs from the driver 3892 * list back to kernel. It is called from lpfc_pci_remove_one to free 3893 * the internal resources before the device is removed from the system. 3894 **/ 3895 void 3896 lpfc_io_free(struct lpfc_hba *phba) 3897 { 3898 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3899 struct lpfc_sli4_hdw_queue *qp; 3900 int idx; 3901 3902 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3903 qp = &phba->sli4_hba.hdwq[idx]; 3904 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3905 spin_lock(&qp->io_buf_list_put_lock); 3906 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3907 &qp->lpfc_io_buf_list_put, 3908 list) { 3909 list_del(&lpfc_ncmd->list); 3910 qp->put_io_bufs--; 3911 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3912 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3913 if (phba->cfg_xpsgl && !phba->nvmet_support) 3914 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3915 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3916 kfree(lpfc_ncmd); 3917 qp->total_io_bufs--; 3918 } 3919 spin_unlock(&qp->io_buf_list_put_lock); 3920 3921 spin_lock(&qp->io_buf_list_get_lock); 3922 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3923 &qp->lpfc_io_buf_list_get, 3924 list) { 3925 list_del(&lpfc_ncmd->list); 3926 qp->get_io_bufs--; 3927 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3928 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3929 if (phba->cfg_xpsgl && !phba->nvmet_support) 3930 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3931 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3932 kfree(lpfc_ncmd); 3933 qp->total_io_bufs--; 3934 } 3935 spin_unlock(&qp->io_buf_list_get_lock); 3936 } 3937 } 3938 3939 /** 3940 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3941 * @phba: pointer to lpfc hba data structure. 3942 * 3943 * This routine first calculates the sizes of the current els and allocated 3944 * scsi sgl lists, and then goes through all sgls to updates the physical 3945 * XRIs assigned due to port function reset. During port initialization, the 3946 * current els and allocated scsi sgl lists are 0s. 3947 * 3948 * Return codes 3949 * 0 - successful (for now, it always returns 0) 3950 **/ 3951 int 3952 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3953 { 3954 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3955 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3956 LIST_HEAD(els_sgl_list); 3957 int rc; 3958 3959 /* 3960 * update on pci function's els xri-sgl list 3961 */ 3962 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3963 3964 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3965 /* els xri-sgl expanded */ 3966 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3967 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3968 "3157 ELS xri-sgl count increased from " 3969 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3970 els_xri_cnt); 3971 /* allocate the additional els sgls */ 3972 for (i = 0; i < xri_cnt; i++) { 3973 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3974 GFP_KERNEL); 3975 if (sglq_entry == NULL) { 3976 lpfc_printf_log(phba, KERN_ERR, 3977 LOG_TRACE_EVENT, 3978 "2562 Failure to allocate an " 3979 "ELS sgl entry:%d\n", i); 3980 rc = -ENOMEM; 3981 goto out_free_mem; 3982 } 3983 sglq_entry->buff_type = GEN_BUFF_TYPE; 3984 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3985 &sglq_entry->phys); 3986 if (sglq_entry->virt == NULL) { 3987 kfree(sglq_entry); 3988 lpfc_printf_log(phba, KERN_ERR, 3989 LOG_TRACE_EVENT, 3990 "2563 Failure to allocate an " 3991 "ELS mbuf:%d\n", i); 3992 rc = -ENOMEM; 3993 goto out_free_mem; 3994 } 3995 sglq_entry->sgl = sglq_entry->virt; 3996 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3997 sglq_entry->state = SGL_FREED; 3998 list_add_tail(&sglq_entry->list, &els_sgl_list); 3999 } 4000 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4001 list_splice_init(&els_sgl_list, 4002 &phba->sli4_hba.lpfc_els_sgl_list); 4003 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4004 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 4005 /* els xri-sgl shrinked */ 4006 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 4007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4008 "3158 ELS xri-sgl count decreased from " 4009 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4010 els_xri_cnt); 4011 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4012 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 4013 &els_sgl_list); 4014 /* release extra els sgls from list */ 4015 for (i = 0; i < xri_cnt; i++) { 4016 list_remove_head(&els_sgl_list, 4017 sglq_entry, struct lpfc_sglq, list); 4018 if (sglq_entry) { 4019 __lpfc_mbuf_free(phba, sglq_entry->virt, 4020 sglq_entry->phys); 4021 kfree(sglq_entry); 4022 } 4023 } 4024 list_splice_init(&els_sgl_list, 4025 &phba->sli4_hba.lpfc_els_sgl_list); 4026 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4027 } else 4028 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4029 "3163 ELS xri-sgl count unchanged: %d\n", 4030 els_xri_cnt); 4031 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 4032 4033 /* update xris to els sgls on the list */ 4034 sglq_entry = NULL; 4035 sglq_entry_next = NULL; 4036 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4037 &phba->sli4_hba.lpfc_els_sgl_list, list) { 4038 lxri = lpfc_sli4_next_xritag(phba); 4039 if (lxri == NO_XRI) { 4040 lpfc_printf_log(phba, KERN_ERR, 4041 LOG_TRACE_EVENT, 4042 "2400 Failed to allocate xri for " 4043 "ELS sgl\n"); 4044 rc = -ENOMEM; 4045 goto out_free_mem; 4046 } 4047 sglq_entry->sli4_lxritag = lxri; 4048 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4049 } 4050 return 0; 4051 4052 out_free_mem: 4053 lpfc_free_els_sgl_list(phba); 4054 return rc; 4055 } 4056 4057 /** 4058 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4059 * @phba: pointer to lpfc hba data structure. 4060 * 4061 * This routine first calculates the sizes of the current els and allocated 4062 * scsi sgl lists, and then goes through all sgls to updates the physical 4063 * XRIs assigned due to port function reset. During port initialization, the 4064 * current els and allocated scsi sgl lists are 0s. 4065 * 4066 * Return codes 4067 * 0 - successful (for now, it always returns 0) 4068 **/ 4069 int 4070 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4071 { 4072 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4073 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4074 uint16_t nvmet_xri_cnt; 4075 LIST_HEAD(nvmet_sgl_list); 4076 int rc; 4077 4078 /* 4079 * update on pci function's nvmet xri-sgl list 4080 */ 4081 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4082 4083 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4084 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4085 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4086 /* els xri-sgl expanded */ 4087 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4088 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4089 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4090 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4091 /* allocate the additional nvmet sgls */ 4092 for (i = 0; i < xri_cnt; i++) { 4093 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4094 GFP_KERNEL); 4095 if (sglq_entry == NULL) { 4096 lpfc_printf_log(phba, KERN_ERR, 4097 LOG_TRACE_EVENT, 4098 "6303 Failure to allocate an " 4099 "NVMET sgl entry:%d\n", i); 4100 rc = -ENOMEM; 4101 goto out_free_mem; 4102 } 4103 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4104 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4105 &sglq_entry->phys); 4106 if (sglq_entry->virt == NULL) { 4107 kfree(sglq_entry); 4108 lpfc_printf_log(phba, KERN_ERR, 4109 LOG_TRACE_EVENT, 4110 "6304 Failure to allocate an " 4111 "NVMET buf:%d\n", i); 4112 rc = -ENOMEM; 4113 goto out_free_mem; 4114 } 4115 sglq_entry->sgl = sglq_entry->virt; 4116 memset(sglq_entry->sgl, 0, 4117 phba->cfg_sg_dma_buf_size); 4118 sglq_entry->state = SGL_FREED; 4119 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4120 } 4121 spin_lock_irq(&phba->hbalock); 4122 spin_lock(&phba->sli4_hba.sgl_list_lock); 4123 list_splice_init(&nvmet_sgl_list, 4124 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4125 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4126 spin_unlock_irq(&phba->hbalock); 4127 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4128 /* nvmet xri-sgl shrunk */ 4129 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4130 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4131 "6305 NVMET xri-sgl count decreased from " 4132 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4133 nvmet_xri_cnt); 4134 spin_lock_irq(&phba->hbalock); 4135 spin_lock(&phba->sli4_hba.sgl_list_lock); 4136 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4137 &nvmet_sgl_list); 4138 /* release extra nvmet sgls from list */ 4139 for (i = 0; i < xri_cnt; i++) { 4140 list_remove_head(&nvmet_sgl_list, 4141 sglq_entry, struct lpfc_sglq, list); 4142 if (sglq_entry) { 4143 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4144 sglq_entry->phys); 4145 kfree(sglq_entry); 4146 } 4147 } 4148 list_splice_init(&nvmet_sgl_list, 4149 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4150 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4151 spin_unlock_irq(&phba->hbalock); 4152 } else 4153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4154 "6306 NVMET xri-sgl count unchanged: %d\n", 4155 nvmet_xri_cnt); 4156 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4157 4158 /* update xris to nvmet sgls on the list */ 4159 sglq_entry = NULL; 4160 sglq_entry_next = NULL; 4161 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4162 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4163 lxri = lpfc_sli4_next_xritag(phba); 4164 if (lxri == NO_XRI) { 4165 lpfc_printf_log(phba, KERN_ERR, 4166 LOG_TRACE_EVENT, 4167 "6307 Failed to allocate xri for " 4168 "NVMET sgl\n"); 4169 rc = -ENOMEM; 4170 goto out_free_mem; 4171 } 4172 sglq_entry->sli4_lxritag = lxri; 4173 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4174 } 4175 return 0; 4176 4177 out_free_mem: 4178 lpfc_free_nvmet_sgl_list(phba); 4179 return rc; 4180 } 4181 4182 int 4183 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4184 { 4185 LIST_HEAD(blist); 4186 struct lpfc_sli4_hdw_queue *qp; 4187 struct lpfc_io_buf *lpfc_cmd; 4188 struct lpfc_io_buf *iobufp, *prev_iobufp; 4189 int idx, cnt, xri, inserted; 4190 4191 cnt = 0; 4192 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4193 qp = &phba->sli4_hba.hdwq[idx]; 4194 spin_lock_irq(&qp->io_buf_list_get_lock); 4195 spin_lock(&qp->io_buf_list_put_lock); 4196 4197 /* Take everything off the get and put lists */ 4198 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4199 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4200 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4201 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4202 cnt += qp->get_io_bufs + qp->put_io_bufs; 4203 qp->get_io_bufs = 0; 4204 qp->put_io_bufs = 0; 4205 qp->total_io_bufs = 0; 4206 spin_unlock(&qp->io_buf_list_put_lock); 4207 spin_unlock_irq(&qp->io_buf_list_get_lock); 4208 } 4209 4210 /* 4211 * Take IO buffers off blist and put on cbuf sorted by XRI. 4212 * This is because POST_SGL takes a sequential range of XRIs 4213 * to post to the firmware. 4214 */ 4215 for (idx = 0; idx < cnt; idx++) { 4216 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4217 if (!lpfc_cmd) 4218 return cnt; 4219 if (idx == 0) { 4220 list_add_tail(&lpfc_cmd->list, cbuf); 4221 continue; 4222 } 4223 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4224 inserted = 0; 4225 prev_iobufp = NULL; 4226 list_for_each_entry(iobufp, cbuf, list) { 4227 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4228 if (prev_iobufp) 4229 list_add(&lpfc_cmd->list, 4230 &prev_iobufp->list); 4231 else 4232 list_add(&lpfc_cmd->list, cbuf); 4233 inserted = 1; 4234 break; 4235 } 4236 prev_iobufp = iobufp; 4237 } 4238 if (!inserted) 4239 list_add_tail(&lpfc_cmd->list, cbuf); 4240 } 4241 return cnt; 4242 } 4243 4244 int 4245 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4246 { 4247 struct lpfc_sli4_hdw_queue *qp; 4248 struct lpfc_io_buf *lpfc_cmd; 4249 int idx, cnt; 4250 4251 qp = phba->sli4_hba.hdwq; 4252 cnt = 0; 4253 while (!list_empty(cbuf)) { 4254 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4255 list_remove_head(cbuf, lpfc_cmd, 4256 struct lpfc_io_buf, list); 4257 if (!lpfc_cmd) 4258 return cnt; 4259 cnt++; 4260 qp = &phba->sli4_hba.hdwq[idx]; 4261 lpfc_cmd->hdwq_no = idx; 4262 lpfc_cmd->hdwq = qp; 4263 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 4264 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 4265 spin_lock(&qp->io_buf_list_put_lock); 4266 list_add_tail(&lpfc_cmd->list, 4267 &qp->lpfc_io_buf_list_put); 4268 qp->put_io_bufs++; 4269 qp->total_io_bufs++; 4270 spin_unlock(&qp->io_buf_list_put_lock); 4271 } 4272 } 4273 return cnt; 4274 } 4275 4276 /** 4277 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4278 * @phba: pointer to lpfc hba data structure. 4279 * 4280 * This routine first calculates the sizes of the current els and allocated 4281 * scsi sgl lists, and then goes through all sgls to updates the physical 4282 * XRIs assigned due to port function reset. During port initialization, the 4283 * current els and allocated scsi sgl lists are 0s. 4284 * 4285 * Return codes 4286 * 0 - successful (for now, it always returns 0) 4287 **/ 4288 int 4289 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4290 { 4291 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4292 uint16_t i, lxri, els_xri_cnt; 4293 uint16_t io_xri_cnt, io_xri_max; 4294 LIST_HEAD(io_sgl_list); 4295 int rc, cnt; 4296 4297 /* 4298 * update on pci function's allocated nvme xri-sgl list 4299 */ 4300 4301 /* maximum number of xris available for nvme buffers */ 4302 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4303 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4304 phba->sli4_hba.io_xri_max = io_xri_max; 4305 4306 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4307 "6074 Current allocated XRI sgl count:%d, " 4308 "maximum XRI count:%d\n", 4309 phba->sli4_hba.io_xri_cnt, 4310 phba->sli4_hba.io_xri_max); 4311 4312 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4313 4314 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4315 /* max nvme xri shrunk below the allocated nvme buffers */ 4316 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4317 phba->sli4_hba.io_xri_max; 4318 /* release the extra allocated nvme buffers */ 4319 for (i = 0; i < io_xri_cnt; i++) { 4320 list_remove_head(&io_sgl_list, lpfc_ncmd, 4321 struct lpfc_io_buf, list); 4322 if (lpfc_ncmd) { 4323 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4324 lpfc_ncmd->data, 4325 lpfc_ncmd->dma_handle); 4326 kfree(lpfc_ncmd); 4327 } 4328 } 4329 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4330 } 4331 4332 /* update xris associated to remaining allocated nvme buffers */ 4333 lpfc_ncmd = NULL; 4334 lpfc_ncmd_next = NULL; 4335 phba->sli4_hba.io_xri_cnt = cnt; 4336 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4337 &io_sgl_list, list) { 4338 lxri = lpfc_sli4_next_xritag(phba); 4339 if (lxri == NO_XRI) { 4340 lpfc_printf_log(phba, KERN_ERR, 4341 LOG_TRACE_EVENT, 4342 "6075 Failed to allocate xri for " 4343 "nvme buffer\n"); 4344 rc = -ENOMEM; 4345 goto out_free_mem; 4346 } 4347 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4348 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4349 } 4350 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4351 return 0; 4352 4353 out_free_mem: 4354 lpfc_io_free(phba); 4355 return rc; 4356 } 4357 4358 /** 4359 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4360 * @phba: Pointer to lpfc hba data structure. 4361 * @num_to_alloc: The requested number of buffers to allocate. 4362 * 4363 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4364 * the nvme buffer contains all the necessary information needed to initiate 4365 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4366 * them on a list, it post them to the port by using SGL block post. 4367 * 4368 * Return codes: 4369 * int - number of IO buffers that were allocated and posted. 4370 * 0 = failure, less than num_to_alloc is a partial failure. 4371 **/ 4372 int 4373 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4374 { 4375 struct lpfc_io_buf *lpfc_ncmd; 4376 struct lpfc_iocbq *pwqeq; 4377 uint16_t iotag, lxri = 0; 4378 int bcnt, num_posted; 4379 LIST_HEAD(prep_nblist); 4380 LIST_HEAD(post_nblist); 4381 LIST_HEAD(nvme_nblist); 4382 4383 phba->sli4_hba.io_xri_cnt = 0; 4384 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4385 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4386 if (!lpfc_ncmd) 4387 break; 4388 /* 4389 * Get memory from the pci pool to map the virt space to 4390 * pci bus space for an I/O. The DMA buffer includes the 4391 * number of SGE's necessary to support the sg_tablesize. 4392 */ 4393 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4394 GFP_KERNEL, 4395 &lpfc_ncmd->dma_handle); 4396 if (!lpfc_ncmd->data) { 4397 kfree(lpfc_ncmd); 4398 break; 4399 } 4400 4401 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4402 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4403 } else { 4404 /* 4405 * 4K Page alignment is CRITICAL to BlockGuard, double 4406 * check to be sure. 4407 */ 4408 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4409 (((unsigned long)(lpfc_ncmd->data) & 4410 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4411 lpfc_printf_log(phba, KERN_ERR, 4412 LOG_TRACE_EVENT, 4413 "3369 Memory alignment err: " 4414 "addr=%lx\n", 4415 (unsigned long)lpfc_ncmd->data); 4416 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4417 lpfc_ncmd->data, 4418 lpfc_ncmd->dma_handle); 4419 kfree(lpfc_ncmd); 4420 break; 4421 } 4422 } 4423 4424 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4425 4426 lxri = lpfc_sli4_next_xritag(phba); 4427 if (lxri == NO_XRI) { 4428 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4429 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4430 kfree(lpfc_ncmd); 4431 break; 4432 } 4433 pwqeq = &lpfc_ncmd->cur_iocbq; 4434 4435 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4436 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4437 if (iotag == 0) { 4438 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4439 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4440 kfree(lpfc_ncmd); 4441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4442 "6121 Failed to allocate IOTAG for" 4443 " XRI:0x%x\n", lxri); 4444 lpfc_sli4_free_xri(phba, lxri); 4445 break; 4446 } 4447 pwqeq->sli4_lxritag = lxri; 4448 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4449 pwqeq->context1 = lpfc_ncmd; 4450 4451 /* Initialize local short-hand pointers. */ 4452 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4453 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4454 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4455 spin_lock_init(&lpfc_ncmd->buf_lock); 4456 4457 /* add the nvme buffer to a post list */ 4458 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4459 phba->sli4_hba.io_xri_cnt++; 4460 } 4461 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4462 "6114 Allocate %d out of %d requested new NVME " 4463 "buffers\n", bcnt, num_to_alloc); 4464 4465 /* post the list of nvme buffer sgls to port if available */ 4466 if (!list_empty(&post_nblist)) 4467 num_posted = lpfc_sli4_post_io_sgl_list( 4468 phba, &post_nblist, bcnt); 4469 else 4470 num_posted = 0; 4471 4472 return num_posted; 4473 } 4474 4475 static uint64_t 4476 lpfc_get_wwpn(struct lpfc_hba *phba) 4477 { 4478 uint64_t wwn; 4479 int rc; 4480 LPFC_MBOXQ_t *mboxq; 4481 MAILBOX_t *mb; 4482 4483 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4484 GFP_KERNEL); 4485 if (!mboxq) 4486 return (uint64_t)-1; 4487 4488 /* First get WWN of HBA instance */ 4489 lpfc_read_nv(phba, mboxq); 4490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4491 if (rc != MBX_SUCCESS) { 4492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4493 "6019 Mailbox failed , mbxCmd x%x " 4494 "READ_NV, mbxStatus x%x\n", 4495 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4496 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4497 mempool_free(mboxq, phba->mbox_mem_pool); 4498 return (uint64_t) -1; 4499 } 4500 mb = &mboxq->u.mb; 4501 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4502 /* wwn is WWPN of HBA instance */ 4503 mempool_free(mboxq, phba->mbox_mem_pool); 4504 if (phba->sli_rev == LPFC_SLI_REV4) 4505 return be64_to_cpu(wwn); 4506 else 4507 return rol64(wwn, 32); 4508 } 4509 4510 /** 4511 * lpfc_vmid_res_alloc - Allocates resources for VMID 4512 * @phba: pointer to lpfc hba data structure. 4513 * @vport: pointer to vport data structure 4514 * 4515 * This routine allocated the resources needed for the VMID. 4516 * 4517 * Return codes 4518 * 0 on Success 4519 * Non-0 on Failure 4520 */ 4521 static int 4522 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4523 { 4524 /* VMID feature is supported only on SLI4 */ 4525 if (phba->sli_rev == LPFC_SLI_REV3) { 4526 phba->cfg_vmid_app_header = 0; 4527 phba->cfg_vmid_priority_tagging = 0; 4528 } 4529 4530 if (lpfc_is_vmid_enabled(phba)) { 4531 vport->vmid = 4532 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4533 GFP_KERNEL); 4534 if (!vport->vmid) 4535 return -ENOMEM; 4536 4537 rwlock_init(&vport->vmid_lock); 4538 4539 /* Set the VMID parameters for the vport */ 4540 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4541 vport->vmid_inactivity_timeout = 4542 phba->cfg_vmid_inactivity_timeout; 4543 vport->max_vmid = phba->cfg_max_vmid; 4544 vport->cur_vmid_cnt = 0; 4545 4546 vport->vmid_priority_range = bitmap_zalloc 4547 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4548 4549 if (!vport->vmid_priority_range) { 4550 kfree(vport->vmid); 4551 return -ENOMEM; 4552 } 4553 4554 hash_init(vport->hash_table); 4555 } 4556 return 0; 4557 } 4558 4559 /** 4560 * lpfc_create_port - Create an FC port 4561 * @phba: pointer to lpfc hba data structure. 4562 * @instance: a unique integer ID to this FC port. 4563 * @dev: pointer to the device data structure. 4564 * 4565 * This routine creates a FC port for the upper layer protocol. The FC port 4566 * can be created on top of either a physical port or a virtual port provided 4567 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4568 * and associates the FC port created before adding the shost into the SCSI 4569 * layer. 4570 * 4571 * Return codes 4572 * @vport - pointer to the virtual N_Port data structure. 4573 * NULL - port create failed. 4574 **/ 4575 struct lpfc_vport * 4576 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4577 { 4578 struct lpfc_vport *vport; 4579 struct Scsi_Host *shost = NULL; 4580 struct scsi_host_template *template; 4581 int error = 0; 4582 int i; 4583 uint64_t wwn; 4584 bool use_no_reset_hba = false; 4585 int rc; 4586 4587 if (lpfc_no_hba_reset_cnt) { 4588 if (phba->sli_rev < LPFC_SLI_REV4 && 4589 dev == &phba->pcidev->dev) { 4590 /* Reset the port first */ 4591 lpfc_sli_brdrestart(phba); 4592 rc = lpfc_sli_chipset_init(phba); 4593 if (rc) 4594 return NULL; 4595 } 4596 wwn = lpfc_get_wwpn(phba); 4597 } 4598 4599 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4600 if (wwn == lpfc_no_hba_reset[i]) { 4601 lpfc_printf_log(phba, KERN_ERR, 4602 LOG_TRACE_EVENT, 4603 "6020 Setting use_no_reset port=%llx\n", 4604 wwn); 4605 use_no_reset_hba = true; 4606 break; 4607 } 4608 } 4609 4610 /* Seed template for SCSI host registration */ 4611 if (dev == &phba->pcidev->dev) { 4612 template = &phba->port_template; 4613 4614 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4615 /* Seed physical port template */ 4616 memcpy(template, &lpfc_template, sizeof(*template)); 4617 4618 if (use_no_reset_hba) 4619 /* template is for a no reset SCSI Host */ 4620 template->eh_host_reset_handler = NULL; 4621 4622 /* Template for all vports this physical port creates */ 4623 memcpy(&phba->vport_template, &lpfc_template, 4624 sizeof(*template)); 4625 phba->vport_template.shost_groups = lpfc_vport_groups; 4626 phba->vport_template.eh_bus_reset_handler = NULL; 4627 phba->vport_template.eh_host_reset_handler = NULL; 4628 phba->vport_template.vendor_id = 0; 4629 4630 /* Initialize the host templates with updated value */ 4631 if (phba->sli_rev == LPFC_SLI_REV4) { 4632 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4633 phba->vport_template.sg_tablesize = 4634 phba->cfg_scsi_seg_cnt; 4635 } else { 4636 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4637 phba->vport_template.sg_tablesize = 4638 phba->cfg_sg_seg_cnt; 4639 } 4640 4641 } else { 4642 /* NVMET is for physical port only */ 4643 memcpy(template, &lpfc_template_nvme, 4644 sizeof(*template)); 4645 } 4646 } else { 4647 template = &phba->vport_template; 4648 } 4649 4650 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4651 if (!shost) 4652 goto out; 4653 4654 vport = (struct lpfc_vport *) shost->hostdata; 4655 vport->phba = phba; 4656 vport->load_flag |= FC_LOADING; 4657 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4658 vport->fc_rscn_flush = 0; 4659 lpfc_get_vport_cfgparam(vport); 4660 4661 /* Adjust value in vport */ 4662 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4663 4664 shost->unique_id = instance; 4665 shost->max_id = LPFC_MAX_TARGET; 4666 shost->max_lun = vport->cfg_max_luns; 4667 shost->this_id = -1; 4668 shost->max_cmd_len = 16; 4669 4670 if (phba->sli_rev == LPFC_SLI_REV4) { 4671 if (!phba->cfg_fcp_mq_threshold || 4672 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4673 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4674 4675 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4676 phba->cfg_fcp_mq_threshold); 4677 4678 shost->dma_boundary = 4679 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4680 4681 if (phba->cfg_xpsgl && !phba->nvmet_support) 4682 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4683 else 4684 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4685 } else 4686 /* SLI-3 has a limited number of hardware queues (3), 4687 * thus there is only one for FCP processing. 4688 */ 4689 shost->nr_hw_queues = 1; 4690 4691 /* 4692 * Set initial can_queue value since 0 is no longer supported and 4693 * scsi_add_host will fail. This will be adjusted later based on the 4694 * max xri value determined in hba setup. 4695 */ 4696 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4697 if (dev != &phba->pcidev->dev) { 4698 shost->transportt = lpfc_vport_transport_template; 4699 vport->port_type = LPFC_NPIV_PORT; 4700 } else { 4701 shost->transportt = lpfc_transport_template; 4702 vport->port_type = LPFC_PHYSICAL_PORT; 4703 } 4704 4705 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4706 "9081 CreatePort TMPLATE type %x TBLsize %d " 4707 "SEGcnt %d/%d\n", 4708 vport->port_type, shost->sg_tablesize, 4709 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4710 4711 /* Allocate the resources for VMID */ 4712 rc = lpfc_vmid_res_alloc(phba, vport); 4713 4714 if (rc) 4715 goto out; 4716 4717 /* Initialize all internally managed lists. */ 4718 INIT_LIST_HEAD(&vport->fc_nodes); 4719 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4720 spin_lock_init(&vport->work_port_lock); 4721 4722 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4723 4724 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4725 4726 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4727 4728 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4729 lpfc_setup_bg(phba, shost); 4730 4731 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4732 if (error) 4733 goto out_put_shost; 4734 4735 spin_lock_irq(&phba->port_list_lock); 4736 list_add_tail(&vport->listentry, &phba->port_list); 4737 spin_unlock_irq(&phba->port_list_lock); 4738 return vport; 4739 4740 out_put_shost: 4741 kfree(vport->vmid); 4742 bitmap_free(vport->vmid_priority_range); 4743 scsi_host_put(shost); 4744 out: 4745 return NULL; 4746 } 4747 4748 /** 4749 * destroy_port - destroy an FC port 4750 * @vport: pointer to an lpfc virtual N_Port data structure. 4751 * 4752 * This routine destroys a FC port from the upper layer protocol. All the 4753 * resources associated with the port are released. 4754 **/ 4755 void 4756 destroy_port(struct lpfc_vport *vport) 4757 { 4758 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4759 struct lpfc_hba *phba = vport->phba; 4760 4761 lpfc_debugfs_terminate(vport); 4762 fc_remove_host(shost); 4763 scsi_remove_host(shost); 4764 4765 spin_lock_irq(&phba->port_list_lock); 4766 list_del_init(&vport->listentry); 4767 spin_unlock_irq(&phba->port_list_lock); 4768 4769 lpfc_cleanup(vport); 4770 return; 4771 } 4772 4773 /** 4774 * lpfc_get_instance - Get a unique integer ID 4775 * 4776 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4777 * uses the kernel idr facility to perform the task. 4778 * 4779 * Return codes: 4780 * instance - a unique integer ID allocated as the new instance. 4781 * -1 - lpfc get instance failed. 4782 **/ 4783 int 4784 lpfc_get_instance(void) 4785 { 4786 int ret; 4787 4788 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4789 return ret < 0 ? -1 : ret; 4790 } 4791 4792 /** 4793 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4794 * @shost: pointer to SCSI host data structure. 4795 * @time: elapsed time of the scan in jiffies. 4796 * 4797 * This routine is called by the SCSI layer with a SCSI host to determine 4798 * whether the scan host is finished. 4799 * 4800 * Note: there is no scan_start function as adapter initialization will have 4801 * asynchronously kicked off the link initialization. 4802 * 4803 * Return codes 4804 * 0 - SCSI host scan is not over yet. 4805 * 1 - SCSI host scan is over. 4806 **/ 4807 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4808 { 4809 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4810 struct lpfc_hba *phba = vport->phba; 4811 int stat = 0; 4812 4813 spin_lock_irq(shost->host_lock); 4814 4815 if (vport->load_flag & FC_UNLOADING) { 4816 stat = 1; 4817 goto finished; 4818 } 4819 if (time >= msecs_to_jiffies(30 * 1000)) { 4820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4821 "0461 Scanning longer than 30 " 4822 "seconds. Continuing initialization\n"); 4823 stat = 1; 4824 goto finished; 4825 } 4826 if (time >= msecs_to_jiffies(15 * 1000) && 4827 phba->link_state <= LPFC_LINK_DOWN) { 4828 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4829 "0465 Link down longer than 15 " 4830 "seconds. Continuing initialization\n"); 4831 stat = 1; 4832 goto finished; 4833 } 4834 4835 if (vport->port_state != LPFC_VPORT_READY) 4836 goto finished; 4837 if (vport->num_disc_nodes || vport->fc_prli_sent) 4838 goto finished; 4839 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4840 goto finished; 4841 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4842 goto finished; 4843 4844 stat = 1; 4845 4846 finished: 4847 spin_unlock_irq(shost->host_lock); 4848 return stat; 4849 } 4850 4851 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4852 { 4853 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4854 struct lpfc_hba *phba = vport->phba; 4855 4856 fc_host_supported_speeds(shost) = 0; 4857 /* 4858 * Avoid reporting supported link speed for FCoE as it can't be 4859 * controlled via FCoE. 4860 */ 4861 if (phba->hba_flag & HBA_FCOE_MODE) 4862 return; 4863 4864 if (phba->lmt & LMT_256Gb) 4865 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4866 if (phba->lmt & LMT_128Gb) 4867 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4868 if (phba->lmt & LMT_64Gb) 4869 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4870 if (phba->lmt & LMT_32Gb) 4871 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4872 if (phba->lmt & LMT_16Gb) 4873 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4874 if (phba->lmt & LMT_10Gb) 4875 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4876 if (phba->lmt & LMT_8Gb) 4877 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4878 if (phba->lmt & LMT_4Gb) 4879 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4880 if (phba->lmt & LMT_2Gb) 4881 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4882 if (phba->lmt & LMT_1Gb) 4883 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4884 } 4885 4886 /** 4887 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4888 * @shost: pointer to SCSI host data structure. 4889 * 4890 * This routine initializes a given SCSI host attributes on a FC port. The 4891 * SCSI host can be either on top of a physical port or a virtual port. 4892 **/ 4893 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4894 { 4895 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4896 struct lpfc_hba *phba = vport->phba; 4897 /* 4898 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4899 */ 4900 4901 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4902 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4903 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4904 4905 memset(fc_host_supported_fc4s(shost), 0, 4906 sizeof(fc_host_supported_fc4s(shost))); 4907 fc_host_supported_fc4s(shost)[2] = 1; 4908 fc_host_supported_fc4s(shost)[7] = 1; 4909 4910 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4911 sizeof fc_host_symbolic_name(shost)); 4912 4913 lpfc_host_supported_speeds_set(shost); 4914 4915 fc_host_maxframe_size(shost) = 4916 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4917 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4918 4919 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4920 4921 /* This value is also unchanging */ 4922 memset(fc_host_active_fc4s(shost), 0, 4923 sizeof(fc_host_active_fc4s(shost))); 4924 fc_host_active_fc4s(shost)[2] = 1; 4925 fc_host_active_fc4s(shost)[7] = 1; 4926 4927 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4928 spin_lock_irq(shost->host_lock); 4929 vport->load_flag &= ~FC_LOADING; 4930 spin_unlock_irq(shost->host_lock); 4931 } 4932 4933 /** 4934 * lpfc_stop_port_s3 - Stop SLI3 device port 4935 * @phba: pointer to lpfc hba data structure. 4936 * 4937 * This routine is invoked to stop an SLI3 device port, it stops the device 4938 * from generating interrupts and stops the device driver's timers for the 4939 * device. 4940 **/ 4941 static void 4942 lpfc_stop_port_s3(struct lpfc_hba *phba) 4943 { 4944 /* Clear all interrupt enable conditions */ 4945 writel(0, phba->HCregaddr); 4946 readl(phba->HCregaddr); /* flush */ 4947 /* Clear all pending interrupts */ 4948 writel(0xffffffff, phba->HAregaddr); 4949 readl(phba->HAregaddr); /* flush */ 4950 4951 /* Reset some HBA SLI setup states */ 4952 lpfc_stop_hba_timers(phba); 4953 phba->pport->work_port_events = 0; 4954 } 4955 4956 /** 4957 * lpfc_stop_port_s4 - Stop SLI4 device port 4958 * @phba: pointer to lpfc hba data structure. 4959 * 4960 * This routine is invoked to stop an SLI4 device port, it stops the device 4961 * from generating interrupts and stops the device driver's timers for the 4962 * device. 4963 **/ 4964 static void 4965 lpfc_stop_port_s4(struct lpfc_hba *phba) 4966 { 4967 /* Reset some HBA SLI4 setup states */ 4968 lpfc_stop_hba_timers(phba); 4969 if (phba->pport) 4970 phba->pport->work_port_events = 0; 4971 phba->sli4_hba.intr_enable = 0; 4972 } 4973 4974 /** 4975 * lpfc_stop_port - Wrapper function for stopping hba port 4976 * @phba: Pointer to HBA context object. 4977 * 4978 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4979 * the API jump table function pointer from the lpfc_hba struct. 4980 **/ 4981 void 4982 lpfc_stop_port(struct lpfc_hba *phba) 4983 { 4984 phba->lpfc_stop_port(phba); 4985 4986 if (phba->wq) 4987 flush_workqueue(phba->wq); 4988 } 4989 4990 /** 4991 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4992 * @phba: Pointer to hba for which this call is being executed. 4993 * 4994 * This routine starts the timer waiting for the FCF rediscovery to complete. 4995 **/ 4996 void 4997 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4998 { 4999 unsigned long fcf_redisc_wait_tmo = 5000 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 5001 /* Start fcf rediscovery wait period timer */ 5002 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 5003 spin_lock_irq(&phba->hbalock); 5004 /* Allow action to new fcf asynchronous event */ 5005 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 5006 /* Mark the FCF rediscovery pending state */ 5007 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 5008 spin_unlock_irq(&phba->hbalock); 5009 } 5010 5011 /** 5012 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 5013 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5014 * 5015 * This routine is invoked when waiting for FCF table rediscover has been 5016 * timed out. If new FCF record(s) has (have) been discovered during the 5017 * wait period, a new FCF event shall be added to the FCOE async event 5018 * list, and then worker thread shall be waked up for processing from the 5019 * worker thread context. 5020 **/ 5021 static void 5022 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 5023 { 5024 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 5025 5026 /* Don't send FCF rediscovery event if timer cancelled */ 5027 spin_lock_irq(&phba->hbalock); 5028 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 5029 spin_unlock_irq(&phba->hbalock); 5030 return; 5031 } 5032 /* Clear FCF rediscovery timer pending flag */ 5033 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 5034 /* FCF rediscovery event to worker thread */ 5035 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 5036 spin_unlock_irq(&phba->hbalock); 5037 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 5038 "2776 FCF rediscover quiescent timer expired\n"); 5039 /* wake up worker thread */ 5040 lpfc_worker_wake_up(phba); 5041 } 5042 5043 /** 5044 * lpfc_vmid_poll - VMID timeout detection 5045 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5046 * 5047 * This routine is invoked when there is no I/O on by a VM for the specified 5048 * amount of time. When this situation is detected, the VMID has to be 5049 * deregistered from the switch and all the local resources freed. The VMID 5050 * will be reassigned to the VM once the I/O begins. 5051 **/ 5052 static void 5053 lpfc_vmid_poll(struct timer_list *t) 5054 { 5055 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5056 u32 wake_up = 0; 5057 5058 /* check if there is a need to issue QFPA */ 5059 if (phba->pport->vmid_priority_tagging) { 5060 wake_up = 1; 5061 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5062 } 5063 5064 /* Is the vmid inactivity timer enabled */ 5065 if (phba->pport->vmid_inactivity_timeout || 5066 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 5067 wake_up = 1; 5068 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5069 } 5070 5071 if (wake_up) 5072 lpfc_worker_wake_up(phba); 5073 5074 /* restart the timer for the next iteration */ 5075 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5076 LPFC_VMID_TIMER)); 5077 } 5078 5079 /** 5080 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5081 * @phba: pointer to lpfc hba data structure. 5082 * @acqe_link: pointer to the async link completion queue entry. 5083 * 5084 * This routine is to parse the SLI4 link-attention link fault code. 5085 **/ 5086 static void 5087 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5088 struct lpfc_acqe_link *acqe_link) 5089 { 5090 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5091 case LPFC_ASYNC_LINK_FAULT_NONE: 5092 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5093 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5094 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5095 break; 5096 default: 5097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5098 "0398 Unknown link fault code: x%x\n", 5099 bf_get(lpfc_acqe_link_fault, acqe_link)); 5100 break; 5101 } 5102 } 5103 5104 /** 5105 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5106 * @phba: pointer to lpfc hba data structure. 5107 * @acqe_link: pointer to the async link completion queue entry. 5108 * 5109 * This routine is to parse the SLI4 link attention type and translate it 5110 * into the base driver's link attention type coding. 5111 * 5112 * Return: Link attention type in terms of base driver's coding. 5113 **/ 5114 static uint8_t 5115 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5116 struct lpfc_acqe_link *acqe_link) 5117 { 5118 uint8_t att_type; 5119 5120 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5121 case LPFC_ASYNC_LINK_STATUS_DOWN: 5122 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5123 att_type = LPFC_ATT_LINK_DOWN; 5124 break; 5125 case LPFC_ASYNC_LINK_STATUS_UP: 5126 /* Ignore physical link up events - wait for logical link up */ 5127 att_type = LPFC_ATT_RESERVED; 5128 break; 5129 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5130 att_type = LPFC_ATT_LINK_UP; 5131 break; 5132 default: 5133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5134 "0399 Invalid link attention type: x%x\n", 5135 bf_get(lpfc_acqe_link_status, acqe_link)); 5136 att_type = LPFC_ATT_RESERVED; 5137 break; 5138 } 5139 return att_type; 5140 } 5141 5142 /** 5143 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5144 * @phba: pointer to lpfc hba data structure. 5145 * 5146 * This routine is to get an SLI3 FC port's link speed in Mbps. 5147 * 5148 * Return: link speed in terms of Mbps. 5149 **/ 5150 uint32_t 5151 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5152 { 5153 uint32_t link_speed; 5154 5155 if (!lpfc_is_link_up(phba)) 5156 return 0; 5157 5158 if (phba->sli_rev <= LPFC_SLI_REV3) { 5159 switch (phba->fc_linkspeed) { 5160 case LPFC_LINK_SPEED_1GHZ: 5161 link_speed = 1000; 5162 break; 5163 case LPFC_LINK_SPEED_2GHZ: 5164 link_speed = 2000; 5165 break; 5166 case LPFC_LINK_SPEED_4GHZ: 5167 link_speed = 4000; 5168 break; 5169 case LPFC_LINK_SPEED_8GHZ: 5170 link_speed = 8000; 5171 break; 5172 case LPFC_LINK_SPEED_10GHZ: 5173 link_speed = 10000; 5174 break; 5175 case LPFC_LINK_SPEED_16GHZ: 5176 link_speed = 16000; 5177 break; 5178 default: 5179 link_speed = 0; 5180 } 5181 } else { 5182 if (phba->sli4_hba.link_state.logical_speed) 5183 link_speed = 5184 phba->sli4_hba.link_state.logical_speed; 5185 else 5186 link_speed = phba->sli4_hba.link_state.speed; 5187 } 5188 return link_speed; 5189 } 5190 5191 /** 5192 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5193 * @phba: pointer to lpfc hba data structure. 5194 * @evt_code: asynchronous event code. 5195 * @speed_code: asynchronous event link speed code. 5196 * 5197 * This routine is to parse the giving SLI4 async event link speed code into 5198 * value of Mbps for the link speed. 5199 * 5200 * Return: link speed in terms of Mbps. 5201 **/ 5202 static uint32_t 5203 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5204 uint8_t speed_code) 5205 { 5206 uint32_t port_speed; 5207 5208 switch (evt_code) { 5209 case LPFC_TRAILER_CODE_LINK: 5210 switch (speed_code) { 5211 case LPFC_ASYNC_LINK_SPEED_ZERO: 5212 port_speed = 0; 5213 break; 5214 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5215 port_speed = 10; 5216 break; 5217 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5218 port_speed = 100; 5219 break; 5220 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5221 port_speed = 1000; 5222 break; 5223 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5224 port_speed = 10000; 5225 break; 5226 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5227 port_speed = 20000; 5228 break; 5229 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5230 port_speed = 25000; 5231 break; 5232 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5233 port_speed = 40000; 5234 break; 5235 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5236 port_speed = 100000; 5237 break; 5238 default: 5239 port_speed = 0; 5240 } 5241 break; 5242 case LPFC_TRAILER_CODE_FC: 5243 switch (speed_code) { 5244 case LPFC_FC_LA_SPEED_UNKNOWN: 5245 port_speed = 0; 5246 break; 5247 case LPFC_FC_LA_SPEED_1G: 5248 port_speed = 1000; 5249 break; 5250 case LPFC_FC_LA_SPEED_2G: 5251 port_speed = 2000; 5252 break; 5253 case LPFC_FC_LA_SPEED_4G: 5254 port_speed = 4000; 5255 break; 5256 case LPFC_FC_LA_SPEED_8G: 5257 port_speed = 8000; 5258 break; 5259 case LPFC_FC_LA_SPEED_10G: 5260 port_speed = 10000; 5261 break; 5262 case LPFC_FC_LA_SPEED_16G: 5263 port_speed = 16000; 5264 break; 5265 case LPFC_FC_LA_SPEED_32G: 5266 port_speed = 32000; 5267 break; 5268 case LPFC_FC_LA_SPEED_64G: 5269 port_speed = 64000; 5270 break; 5271 case LPFC_FC_LA_SPEED_128G: 5272 port_speed = 128000; 5273 break; 5274 case LPFC_FC_LA_SPEED_256G: 5275 port_speed = 256000; 5276 break; 5277 default: 5278 port_speed = 0; 5279 } 5280 break; 5281 default: 5282 port_speed = 0; 5283 } 5284 return port_speed; 5285 } 5286 5287 /** 5288 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5289 * @phba: pointer to lpfc hba data structure. 5290 * @acqe_link: pointer to the async link completion queue entry. 5291 * 5292 * This routine is to handle the SLI4 asynchronous FCoE link event. 5293 **/ 5294 static void 5295 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5296 struct lpfc_acqe_link *acqe_link) 5297 { 5298 struct lpfc_dmabuf *mp; 5299 LPFC_MBOXQ_t *pmb; 5300 MAILBOX_t *mb; 5301 struct lpfc_mbx_read_top *la; 5302 uint8_t att_type; 5303 int rc; 5304 5305 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5306 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5307 return; 5308 phba->fcoe_eventtag = acqe_link->event_tag; 5309 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5310 if (!pmb) { 5311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5312 "0395 The mboxq allocation failed\n"); 5313 return; 5314 } 5315 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5316 if (!mp) { 5317 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5318 "0396 The lpfc_dmabuf allocation failed\n"); 5319 goto out_free_pmb; 5320 } 5321 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5322 if (!mp->virt) { 5323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5324 "0397 The mbuf allocation failed\n"); 5325 goto out_free_dmabuf; 5326 } 5327 5328 /* Cleanup any outstanding ELS commands */ 5329 lpfc_els_flush_all_cmd(phba); 5330 5331 /* Block ELS IOCBs until we have done process link event */ 5332 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5333 5334 /* Update link event statistics */ 5335 phba->sli.slistat.link_event++; 5336 5337 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5338 lpfc_read_topology(phba, pmb, mp); 5339 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5340 pmb->vport = phba->pport; 5341 5342 /* Keep the link status for extra SLI4 state machine reference */ 5343 phba->sli4_hba.link_state.speed = 5344 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5345 bf_get(lpfc_acqe_link_speed, acqe_link)); 5346 phba->sli4_hba.link_state.duplex = 5347 bf_get(lpfc_acqe_link_duplex, acqe_link); 5348 phba->sli4_hba.link_state.status = 5349 bf_get(lpfc_acqe_link_status, acqe_link); 5350 phba->sli4_hba.link_state.type = 5351 bf_get(lpfc_acqe_link_type, acqe_link); 5352 phba->sli4_hba.link_state.number = 5353 bf_get(lpfc_acqe_link_number, acqe_link); 5354 phba->sli4_hba.link_state.fault = 5355 bf_get(lpfc_acqe_link_fault, acqe_link); 5356 phba->sli4_hba.link_state.logical_speed = 5357 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5358 5359 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5360 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5361 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5362 "Logical speed:%dMbps Fault:%d\n", 5363 phba->sli4_hba.link_state.speed, 5364 phba->sli4_hba.link_state.topology, 5365 phba->sli4_hba.link_state.status, 5366 phba->sli4_hba.link_state.type, 5367 phba->sli4_hba.link_state.number, 5368 phba->sli4_hba.link_state.logical_speed, 5369 phba->sli4_hba.link_state.fault); 5370 /* 5371 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5372 * topology info. Note: Optional for non FC-AL ports. 5373 */ 5374 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5375 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5376 if (rc == MBX_NOT_FINISHED) 5377 goto out_free_dmabuf; 5378 return; 5379 } 5380 /* 5381 * For FCoE Mode: fill in all the topology information we need and call 5382 * the READ_TOPOLOGY completion routine to continue without actually 5383 * sending the READ_TOPOLOGY mailbox command to the port. 5384 */ 5385 /* Initialize completion status */ 5386 mb = &pmb->u.mb; 5387 mb->mbxStatus = MBX_SUCCESS; 5388 5389 /* Parse port fault information field */ 5390 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5391 5392 /* Parse and translate link attention fields */ 5393 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5394 la->eventTag = acqe_link->event_tag; 5395 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5396 bf_set(lpfc_mbx_read_top_link_spd, la, 5397 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5398 5399 /* Fake the the following irrelvant fields */ 5400 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5401 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5402 bf_set(lpfc_mbx_read_top_il, la, 0); 5403 bf_set(lpfc_mbx_read_top_pb, la, 0); 5404 bf_set(lpfc_mbx_read_top_fa, la, 0); 5405 bf_set(lpfc_mbx_read_top_mm, la, 0); 5406 5407 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5408 lpfc_mbx_cmpl_read_topology(phba, pmb); 5409 5410 return; 5411 5412 out_free_dmabuf: 5413 kfree(mp); 5414 out_free_pmb: 5415 mempool_free(pmb, phba->mbox_mem_pool); 5416 } 5417 5418 /** 5419 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5420 * topology. 5421 * @phba: pointer to lpfc hba data structure. 5422 * @speed_code: asynchronous event link speed code. 5423 * 5424 * This routine is to parse the giving SLI4 async event link speed code into 5425 * value of Read topology link speed. 5426 * 5427 * Return: link speed in terms of Read topology. 5428 **/ 5429 static uint8_t 5430 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5431 { 5432 uint8_t port_speed; 5433 5434 switch (speed_code) { 5435 case LPFC_FC_LA_SPEED_1G: 5436 port_speed = LPFC_LINK_SPEED_1GHZ; 5437 break; 5438 case LPFC_FC_LA_SPEED_2G: 5439 port_speed = LPFC_LINK_SPEED_2GHZ; 5440 break; 5441 case LPFC_FC_LA_SPEED_4G: 5442 port_speed = LPFC_LINK_SPEED_4GHZ; 5443 break; 5444 case LPFC_FC_LA_SPEED_8G: 5445 port_speed = LPFC_LINK_SPEED_8GHZ; 5446 break; 5447 case LPFC_FC_LA_SPEED_16G: 5448 port_speed = LPFC_LINK_SPEED_16GHZ; 5449 break; 5450 case LPFC_FC_LA_SPEED_32G: 5451 port_speed = LPFC_LINK_SPEED_32GHZ; 5452 break; 5453 case LPFC_FC_LA_SPEED_64G: 5454 port_speed = LPFC_LINK_SPEED_64GHZ; 5455 break; 5456 case LPFC_FC_LA_SPEED_128G: 5457 port_speed = LPFC_LINK_SPEED_128GHZ; 5458 break; 5459 case LPFC_FC_LA_SPEED_256G: 5460 port_speed = LPFC_LINK_SPEED_256GHZ; 5461 break; 5462 default: 5463 port_speed = 0; 5464 break; 5465 } 5466 5467 return port_speed; 5468 } 5469 5470 void 5471 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5472 { 5473 struct rxtable_entry *entry; 5474 int cnt = 0, head, tail, last, start; 5475 5476 head = atomic_read(&phba->rxtable_idx_head); 5477 tail = atomic_read(&phba->rxtable_idx_tail); 5478 if (!phba->rxtable || head == tail) { 5479 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 5480 "4411 Rxtable is empty\n"); 5481 return; 5482 } 5483 last = tail; 5484 start = head; 5485 5486 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ 5487 while (start != last) { 5488 if (start) 5489 start--; 5490 else 5491 start = LPFC_MAX_RXMONITOR_ENTRY - 1; 5492 entry = &phba->rxtable[start]; 5493 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5494 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " 5495 "Lat %lld ASz %lld Info %02d BWUtil %d " 5496 "Int %d slot %d\n", 5497 cnt, entry->max_bytes_per_interval, 5498 entry->total_bytes, entry->rcv_bytes, 5499 entry->avg_io_latency, entry->avg_io_size, 5500 entry->cmf_info, entry->timer_utilization, 5501 entry->timer_interval, start); 5502 cnt++; 5503 if (cnt >= LPFC_MAX_RXMONITOR_DUMP) 5504 return; 5505 } 5506 } 5507 5508 /** 5509 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5510 * @phba: pointer to lpfc hba data structure. 5511 * @dtag: FPIN descriptor received 5512 * 5513 * Increment the FPIN received counter/time when it happens. 5514 */ 5515 void 5516 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5517 { 5518 struct lpfc_cgn_info *cp; 5519 struct tm broken; 5520 struct timespec64 cur_time; 5521 u32 cnt; 5522 u16 value; 5523 5524 /* Make sure we have a congestion info buffer */ 5525 if (!phba->cgn_i) 5526 return; 5527 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5528 ktime_get_real_ts64(&cur_time); 5529 time64_to_tm(cur_time.tv_sec, 0, &broken); 5530 5531 /* Update congestion statistics */ 5532 switch (dtag) { 5533 case ELS_DTAG_LNK_INTEGRITY: 5534 cnt = le32_to_cpu(cp->link_integ_notification); 5535 cnt++; 5536 cp->link_integ_notification = cpu_to_le32(cnt); 5537 5538 cp->cgn_stat_lnk_month = broken.tm_mon + 1; 5539 cp->cgn_stat_lnk_day = broken.tm_mday; 5540 cp->cgn_stat_lnk_year = broken.tm_year - 100; 5541 cp->cgn_stat_lnk_hour = broken.tm_hour; 5542 cp->cgn_stat_lnk_min = broken.tm_min; 5543 cp->cgn_stat_lnk_sec = broken.tm_sec; 5544 break; 5545 case ELS_DTAG_DELIVERY: 5546 cnt = le32_to_cpu(cp->delivery_notification); 5547 cnt++; 5548 cp->delivery_notification = cpu_to_le32(cnt); 5549 5550 cp->cgn_stat_del_month = broken.tm_mon + 1; 5551 cp->cgn_stat_del_day = broken.tm_mday; 5552 cp->cgn_stat_del_year = broken.tm_year - 100; 5553 cp->cgn_stat_del_hour = broken.tm_hour; 5554 cp->cgn_stat_del_min = broken.tm_min; 5555 cp->cgn_stat_del_sec = broken.tm_sec; 5556 break; 5557 case ELS_DTAG_PEER_CONGEST: 5558 cnt = le32_to_cpu(cp->cgn_peer_notification); 5559 cnt++; 5560 cp->cgn_peer_notification = cpu_to_le32(cnt); 5561 5562 cp->cgn_stat_peer_month = broken.tm_mon + 1; 5563 cp->cgn_stat_peer_day = broken.tm_mday; 5564 cp->cgn_stat_peer_year = broken.tm_year - 100; 5565 cp->cgn_stat_peer_hour = broken.tm_hour; 5566 cp->cgn_stat_peer_min = broken.tm_min; 5567 cp->cgn_stat_peer_sec = broken.tm_sec; 5568 break; 5569 case ELS_DTAG_CONGESTION: 5570 cnt = le32_to_cpu(cp->cgn_notification); 5571 cnt++; 5572 cp->cgn_notification = cpu_to_le32(cnt); 5573 5574 cp->cgn_stat_cgn_month = broken.tm_mon + 1; 5575 cp->cgn_stat_cgn_day = broken.tm_mday; 5576 cp->cgn_stat_cgn_year = broken.tm_year - 100; 5577 cp->cgn_stat_cgn_hour = broken.tm_hour; 5578 cp->cgn_stat_cgn_min = broken.tm_min; 5579 cp->cgn_stat_cgn_sec = broken.tm_sec; 5580 } 5581 if (phba->cgn_fpin_frequency && 5582 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5583 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5584 cp->cgn_stat_npm = value; 5585 } 5586 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5587 LPFC_CGN_CRC32_SEED); 5588 cp->cgn_info_crc = cpu_to_le32(value); 5589 } 5590 5591 /** 5592 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer 5593 * @phba: pointer to lpfc hba data structure. 5594 * 5595 * Save the congestion event data every minute. 5596 * On the hour collapse all the minute data into hour data. Every day 5597 * collapse all the hour data into daily data. Separate driver 5598 * and fabrc congestion event counters that will be saved out 5599 * to the registered congestion buffer every minute. 5600 */ 5601 static void 5602 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) 5603 { 5604 struct lpfc_cgn_info *cp; 5605 struct tm broken; 5606 struct timespec64 cur_time; 5607 uint32_t i, index; 5608 uint16_t value, mvalue; 5609 uint64_t bps; 5610 uint32_t mbps; 5611 uint32_t dvalue, wvalue, lvalue, avalue; 5612 uint64_t latsum; 5613 __le16 *ptr; 5614 __le32 *lptr; 5615 __le16 *mptr; 5616 5617 /* Make sure we have a congestion info buffer */ 5618 if (!phba->cgn_i) 5619 return; 5620 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5621 5622 if (time_before(jiffies, phba->cgn_evt_timestamp)) 5623 return; 5624 phba->cgn_evt_timestamp = jiffies + 5625 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5626 phba->cgn_evt_minute++; 5627 5628 /* We should get to this point in the routine on 1 minute intervals */ 5629 5630 ktime_get_real_ts64(&cur_time); 5631 time64_to_tm(cur_time.tv_sec, 0, &broken); 5632 5633 if (phba->cgn_fpin_frequency && 5634 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5635 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5636 cp->cgn_stat_npm = value; 5637 } 5638 5639 /* Read and clear the latency counters for this minute */ 5640 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5641 latsum = atomic64_read(&phba->cgn_latency_evt); 5642 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5643 atomic64_set(&phba->cgn_latency_evt, 0); 5644 5645 /* We need to store MB/sec bandwidth in the congestion information. 5646 * block_cnt is count of 512 byte blocks for the entire minute, 5647 * bps will get bytes per sec before finally converting to MB/sec. 5648 */ 5649 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5650 phba->rx_block_cnt = 0; 5651 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5652 5653 /* Every minute */ 5654 /* cgn parameters */ 5655 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5656 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5657 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5658 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5659 5660 /* Fill in default LUN qdepth */ 5661 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5662 cp->cgn_lunq = cpu_to_le16(value); 5663 5664 /* Record congestion buffer info - every minute 5665 * cgn_driver_evt_cnt (Driver events) 5666 * cgn_fabric_warn_cnt (Congestion Warnings) 5667 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5668 * cgn_fabric_alarm_cnt (Congestion Alarms) 5669 */ 5670 index = ++cp->cgn_index_minute; 5671 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5672 cp->cgn_index_minute = 0; 5673 index = 0; 5674 } 5675 5676 /* Get the number of driver events in this sample and reset counter */ 5677 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5678 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5679 5680 /* Get the number of warning events - FPIN and Signal for this minute */ 5681 wvalue = 0; 5682 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5683 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5684 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5685 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5686 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5687 5688 /* Get the number of alarm events - FPIN and Signal for this minute */ 5689 avalue = 0; 5690 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5691 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5692 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5693 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5694 5695 /* Collect the driver, warning, alarm and latency counts for this 5696 * minute into the driver congestion buffer. 5697 */ 5698 ptr = &cp->cgn_drvr_min[index]; 5699 value = (uint16_t)dvalue; 5700 *ptr = cpu_to_le16(value); 5701 5702 ptr = &cp->cgn_warn_min[index]; 5703 value = (uint16_t)wvalue; 5704 *ptr = cpu_to_le16(value); 5705 5706 ptr = &cp->cgn_alarm_min[index]; 5707 value = (uint16_t)avalue; 5708 *ptr = cpu_to_le16(value); 5709 5710 lptr = &cp->cgn_latency_min[index]; 5711 if (lvalue) { 5712 lvalue = (uint32_t)div_u64(latsum, lvalue); 5713 *lptr = cpu_to_le32(lvalue); 5714 } else { 5715 *lptr = 0; 5716 } 5717 5718 /* Collect the bandwidth value into the driver's congesion buffer. */ 5719 mptr = &cp->cgn_bw_min[index]; 5720 *mptr = cpu_to_le16(mvalue); 5721 5722 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5723 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5724 index, dvalue, wvalue, *lptr, mvalue, avalue); 5725 5726 /* Every hour */ 5727 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5728 /* Record congestion buffer info - every hour 5729 * Collapse all minutes into an hour 5730 */ 5731 index = ++cp->cgn_index_hour; 5732 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5733 cp->cgn_index_hour = 0; 5734 index = 0; 5735 } 5736 5737 dvalue = 0; 5738 wvalue = 0; 5739 lvalue = 0; 5740 avalue = 0; 5741 mvalue = 0; 5742 mbps = 0; 5743 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5744 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5745 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5746 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5747 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5748 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5749 } 5750 if (lvalue) /* Avg of latency averages */ 5751 lvalue /= LPFC_MIN_HOUR; 5752 if (mbps) /* Avg of Bandwidth averages */ 5753 mvalue = mbps / LPFC_MIN_HOUR; 5754 5755 lptr = &cp->cgn_drvr_hr[index]; 5756 *lptr = cpu_to_le32(dvalue); 5757 lptr = &cp->cgn_warn_hr[index]; 5758 *lptr = cpu_to_le32(wvalue); 5759 lptr = &cp->cgn_latency_hr[index]; 5760 *lptr = cpu_to_le32(lvalue); 5761 mptr = &cp->cgn_bw_hr[index]; 5762 *mptr = cpu_to_le16(mvalue); 5763 lptr = &cp->cgn_alarm_hr[index]; 5764 *lptr = cpu_to_le32(avalue); 5765 5766 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5767 "2419 Congestion Info - hour " 5768 "(%d): %d %d %d %d %d\n", 5769 index, dvalue, wvalue, lvalue, mvalue, avalue); 5770 } 5771 5772 /* Every day */ 5773 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5774 /* Record congestion buffer info - every hour 5775 * Collapse all hours into a day. Rotate days 5776 * after LPFC_MAX_CGN_DAYS. 5777 */ 5778 index = ++cp->cgn_index_day; 5779 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5780 cp->cgn_index_day = 0; 5781 index = 0; 5782 } 5783 5784 /* Anytime we overwrite daily index 0, after we wrap, 5785 * we will be overwriting the oldest day, so we must 5786 * update the congestion data start time for that day. 5787 * That start time should have previously been saved after 5788 * we wrote the last days worth of data. 5789 */ 5790 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { 5791 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); 5792 5793 cp->cgn_info_month = broken.tm_mon + 1; 5794 cp->cgn_info_day = broken.tm_mday; 5795 cp->cgn_info_year = broken.tm_year - 100; 5796 cp->cgn_info_hour = broken.tm_hour; 5797 cp->cgn_info_minute = broken.tm_min; 5798 cp->cgn_info_second = broken.tm_sec; 5799 5800 lpfc_printf_log 5801 (phba, KERN_INFO, LOG_CGN_MGMT, 5802 "2646 CGNInfo idx0 Start Time: " 5803 "%d/%d/%d %d:%d:%d\n", 5804 cp->cgn_info_day, cp->cgn_info_month, 5805 cp->cgn_info_year, cp->cgn_info_hour, 5806 cp->cgn_info_minute, cp->cgn_info_second); 5807 } 5808 5809 dvalue = 0; 5810 wvalue = 0; 5811 lvalue = 0; 5812 mvalue = 0; 5813 mbps = 0; 5814 avalue = 0; 5815 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5816 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5817 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5818 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5819 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5820 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5821 } 5822 if (lvalue) /* Avg of latency averages */ 5823 lvalue /= LPFC_HOUR_DAY; 5824 if (mbps) /* Avg of Bandwidth averages */ 5825 mvalue = mbps / LPFC_HOUR_DAY; 5826 5827 lptr = &cp->cgn_drvr_day[index]; 5828 *lptr = cpu_to_le32(dvalue); 5829 lptr = &cp->cgn_warn_day[index]; 5830 *lptr = cpu_to_le32(wvalue); 5831 lptr = &cp->cgn_latency_day[index]; 5832 *lptr = cpu_to_le32(lvalue); 5833 mptr = &cp->cgn_bw_day[index]; 5834 *mptr = cpu_to_le16(mvalue); 5835 lptr = &cp->cgn_alarm_day[index]; 5836 *lptr = cpu_to_le32(avalue); 5837 5838 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5839 "2420 Congestion Info - daily (%d): " 5840 "%d %d %d %d %d\n", 5841 index, dvalue, wvalue, lvalue, mvalue, avalue); 5842 5843 /* We just wrote LPFC_MAX_CGN_DAYS of data, 5844 * so we are wrapped on any data after this. 5845 * Save this as the start time for the next day. 5846 */ 5847 if (index == (LPFC_MAX_CGN_DAYS - 1)) { 5848 phba->hba_flag |= HBA_CGN_DAY_WRAP; 5849 ktime_get_real_ts64(&phba->cgn_daily_ts); 5850 } 5851 } 5852 5853 /* Use the frequency found in the last rcv'ed FPIN */ 5854 value = phba->cgn_fpin_frequency; 5855 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) 5856 cp->cgn_warn_freq = cpu_to_le16(value); 5857 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) 5858 cp->cgn_alarm_freq = cpu_to_le16(value); 5859 5860 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 5861 * are received by the HBA 5862 */ 5863 value = phba->cgn_sig_freq; 5864 5865 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5866 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5867 cp->cgn_warn_freq = cpu_to_le16(value); 5868 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5869 cp->cgn_alarm_freq = cpu_to_le16(value); 5870 5871 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5872 LPFC_CGN_CRC32_SEED); 5873 cp->cgn_info_crc = cpu_to_le32(lvalue); 5874 } 5875 5876 /** 5877 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5878 * @phba: The Hba for which this call is being executed. 5879 * 5880 * The routine calculates the latency from the beginning of the CMF timer 5881 * interval to the current point in time. It is called from IO completion 5882 * when we exceed our Bandwidth limitation for the time interval. 5883 */ 5884 uint32_t 5885 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5886 { 5887 struct timespec64 cmpl_time; 5888 uint32_t msec = 0; 5889 5890 ktime_get_real_ts64(&cmpl_time); 5891 5892 /* This routine works on a ms granularity so sec and usec are 5893 * converted accordingly. 5894 */ 5895 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5896 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5897 NSEC_PER_MSEC; 5898 } else { 5899 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5900 msec = (cmpl_time.tv_sec - 5901 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5902 msec += ((cmpl_time.tv_nsec - 5903 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5904 } else { 5905 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5906 1) * MSEC_PER_SEC; 5907 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5908 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5909 } 5910 } 5911 return msec; 5912 } 5913 5914 /** 5915 * lpfc_cmf_timer - This is the timer function for one congestion 5916 * rate interval. 5917 * @timer: Pointer to the high resolution timer that expired 5918 */ 5919 static enum hrtimer_restart 5920 lpfc_cmf_timer(struct hrtimer *timer) 5921 { 5922 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5923 cmf_timer); 5924 struct rxtable_entry *entry; 5925 uint32_t io_cnt; 5926 uint32_t head, tail; 5927 uint32_t busy, max_read; 5928 uint64_t total, rcv, lat, mbpi, extra; 5929 int timer_interval = LPFC_CMF_INTERVAL; 5930 uint32_t ms; 5931 struct lpfc_cgn_stat *cgs; 5932 int cpu; 5933 5934 /* Only restart the timer if congestion mgmt is on */ 5935 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5936 !phba->cmf_latency.tv_sec) { 5937 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5938 "6224 CMF timer exit: %d %lld\n", 5939 phba->cmf_active_mode, 5940 (uint64_t)phba->cmf_latency.tv_sec); 5941 return HRTIMER_NORESTART; 5942 } 5943 5944 /* If pport is not ready yet, just exit and wait for 5945 * the next timer cycle to hit. 5946 */ 5947 if (!phba->pport) 5948 goto skip; 5949 5950 /* Do not block SCSI IO while in the timer routine since 5951 * total_bytes will be cleared 5952 */ 5953 atomic_set(&phba->cmf_stop_io, 1); 5954 5955 /* First we need to calculate the actual ms between 5956 * the last timer interrupt and this one. We ask for 5957 * LPFC_CMF_INTERVAL, however the actual time may 5958 * vary depending on system overhead. 5959 */ 5960 ms = lpfc_calc_cmf_latency(phba); 5961 5962 5963 /* Immediately after we calculate the time since the last 5964 * timer interrupt, set the start time for the next 5965 * interrupt 5966 */ 5967 ktime_get_real_ts64(&phba->cmf_latency); 5968 5969 phba->cmf_link_byte_count = 5970 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 5971 5972 /* Collect all the stats from the prior timer interval */ 5973 total = 0; 5974 io_cnt = 0; 5975 lat = 0; 5976 rcv = 0; 5977 for_each_present_cpu(cpu) { 5978 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 5979 total += atomic64_xchg(&cgs->total_bytes, 0); 5980 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 5981 lat += atomic64_xchg(&cgs->rx_latency, 0); 5982 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 5983 } 5984 5985 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 5986 * returned from the last CMF_SYNC_WQE issued, from 5987 * cmf_last_sync_bw. This will be the target BW for 5988 * this next timer interval. 5989 */ 5990 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 5991 phba->link_state != LPFC_LINK_DOWN && 5992 phba->hba_flag & HBA_SETUP) { 5993 mbpi = phba->cmf_last_sync_bw; 5994 phba->cmf_last_sync_bw = 0; 5995 extra = 0; 5996 5997 /* Calculate any extra bytes needed to account for the 5998 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 5999 * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL 6000 * add an extra 2%. The goal is to equalize total with a 6001 * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1 6002 */ 6003 if (ms == LPFC_CMF_INTERVAL) 6004 extra = div_u64(total, 50); 6005 else if (ms < LPFC_CMF_INTERVAL) 6006 extra = div_u64(total, 33); 6007 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 6008 } else { 6009 /* For Monitor mode or link down we want mbpi 6010 * to be the full link speed 6011 */ 6012 mbpi = phba->cmf_link_byte_count; 6013 } 6014 phba->cmf_timer_cnt++; 6015 6016 if (io_cnt) { 6017 /* Update congestion info buffer latency in us */ 6018 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 6019 atomic64_add(lat, &phba->cgn_latency_evt); 6020 } 6021 busy = atomic_xchg(&phba->cmf_busy, 0); 6022 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 6023 6024 /* Calculate MBPI for the next timer interval */ 6025 if (mbpi) { 6026 if (mbpi > phba->cmf_link_byte_count || 6027 phba->cmf_active_mode == LPFC_CFG_MONITOR) 6028 mbpi = phba->cmf_link_byte_count; 6029 6030 /* Change max_bytes_per_interval to what the prior 6031 * CMF_SYNC_WQE cmpl indicated. 6032 */ 6033 if (mbpi != phba->cmf_max_bytes_per_interval) 6034 phba->cmf_max_bytes_per_interval = mbpi; 6035 } 6036 6037 /* Save rxmonitor information for debug */ 6038 if (phba->rxtable) { 6039 head = atomic_xchg(&phba->rxtable_idx_head, 6040 LPFC_RXMONITOR_TABLE_IN_USE); 6041 entry = &phba->rxtable[head]; 6042 entry->total_bytes = total; 6043 entry->rcv_bytes = rcv; 6044 entry->cmf_busy = busy; 6045 entry->cmf_info = phba->cmf_active_info; 6046 if (io_cnt) { 6047 entry->avg_io_latency = div_u64(lat, io_cnt); 6048 entry->avg_io_size = div_u64(rcv, io_cnt); 6049 } else { 6050 entry->avg_io_latency = 0; 6051 entry->avg_io_size = 0; 6052 } 6053 entry->max_read_cnt = max_read; 6054 entry->io_cnt = io_cnt; 6055 entry->max_bytes_per_interval = mbpi; 6056 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6057 entry->timer_utilization = phba->cmf_last_ts; 6058 else 6059 entry->timer_utilization = ms; 6060 entry->timer_interval = ms; 6061 phba->cmf_last_ts = 0; 6062 6063 /* Increment rxtable index */ 6064 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6065 tail = atomic_read(&phba->rxtable_idx_tail); 6066 if (head == tail) { 6067 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6068 atomic_set(&phba->rxtable_idx_tail, tail); 6069 } 6070 atomic_set(&phba->rxtable_idx_head, head); 6071 } 6072 6073 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6074 /* If Monitor mode, check if we are oversubscribed 6075 * against the full line rate. 6076 */ 6077 if (mbpi && total > mbpi) 6078 atomic_inc(&phba->cgn_driver_evt_cnt); 6079 } 6080 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6081 6082 /* Each minute save Fabric and Driver congestion information */ 6083 lpfc_cgn_save_evt_cnt(phba); 6084 6085 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the 6086 * minute, adjust our next timer interval, if needed, to ensure a 6087 * 1 minute granularity when we get the next timer interrupt. 6088 */ 6089 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), 6090 phba->cgn_evt_timestamp)) { 6091 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - 6092 jiffies); 6093 if (timer_interval <= 0) 6094 timer_interval = LPFC_CMF_INTERVAL; 6095 6096 /* If we adjust timer_interval, max_bytes_per_interval 6097 * needs to be adjusted as well. 6098 */ 6099 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 6100 timer_interval, 1000); 6101 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) 6102 phba->cmf_max_bytes_per_interval = 6103 phba->cmf_link_byte_count; 6104 } 6105 6106 /* Since total_bytes has already been zero'ed, its okay to unblock 6107 * after max_bytes_per_interval is setup. 6108 */ 6109 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6110 queue_work(phba->wq, &phba->unblock_request_work); 6111 6112 /* SCSI IO is now unblocked */ 6113 atomic_set(&phba->cmf_stop_io, 0); 6114 6115 skip: 6116 hrtimer_forward_now(timer, 6117 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6118 return HRTIMER_RESTART; 6119 } 6120 6121 #define trunk_link_status(__idx)\ 6122 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6123 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6124 "Link up" : "Link down") : "NA" 6125 /* Did port __idx reported an error */ 6126 #define trunk_port_fault(__idx)\ 6127 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6128 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6129 6130 static void 6131 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6132 struct lpfc_acqe_fc_la *acqe_fc) 6133 { 6134 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6135 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6136 6137 phba->sli4_hba.link_state.speed = 6138 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6139 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6140 6141 phba->sli4_hba.link_state.logical_speed = 6142 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6143 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6144 phba->fc_linkspeed = 6145 lpfc_async_link_speed_to_read_top( 6146 phba, 6147 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6148 6149 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6150 phba->trunk_link.link0.state = 6151 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6152 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6153 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6154 } 6155 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6156 phba->trunk_link.link1.state = 6157 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6158 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6159 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6160 } 6161 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6162 phba->trunk_link.link2.state = 6163 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6164 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6165 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6166 } 6167 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6168 phba->trunk_link.link3.state = 6169 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6170 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6171 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6172 } 6173 6174 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6175 "2910 Async FC Trunking Event - Speed:%d\n" 6176 "\tLogical speed:%d " 6177 "port0: %s port1: %s port2: %s port3: %s\n", 6178 phba->sli4_hba.link_state.speed, 6179 phba->sli4_hba.link_state.logical_speed, 6180 trunk_link_status(0), trunk_link_status(1), 6181 trunk_link_status(2), trunk_link_status(3)); 6182 6183 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6184 lpfc_cmf_signal_init(phba); 6185 6186 if (port_fault) 6187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6188 "3202 trunk error:0x%x (%s) seen on port0:%s " 6189 /* 6190 * SLI-4: We have only 0xA error codes 6191 * defined as of now. print an appropriate 6192 * message in case driver needs to be updated. 6193 */ 6194 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6195 "UNDEFINED. update driver." : trunk_errmsg[err], 6196 trunk_port_fault(0), trunk_port_fault(1), 6197 trunk_port_fault(2), trunk_port_fault(3)); 6198 } 6199 6200 6201 /** 6202 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6203 * @phba: pointer to lpfc hba data structure. 6204 * @acqe_fc: pointer to the async fc completion queue entry. 6205 * 6206 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6207 * that the event was received and then issue a read_topology mailbox command so 6208 * that the rest of the driver will treat it the same as SLI3. 6209 **/ 6210 static void 6211 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6212 { 6213 struct lpfc_dmabuf *mp; 6214 LPFC_MBOXQ_t *pmb; 6215 MAILBOX_t *mb; 6216 struct lpfc_mbx_read_top *la; 6217 int rc; 6218 6219 if (bf_get(lpfc_trailer_type, acqe_fc) != 6220 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6222 "2895 Non FC link Event detected.(%d)\n", 6223 bf_get(lpfc_trailer_type, acqe_fc)); 6224 return; 6225 } 6226 6227 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6228 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6229 lpfc_update_trunk_link_status(phba, acqe_fc); 6230 return; 6231 } 6232 6233 /* Keep the link status for extra SLI4 state machine reference */ 6234 phba->sli4_hba.link_state.speed = 6235 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6236 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6237 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6238 phba->sli4_hba.link_state.topology = 6239 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6240 phba->sli4_hba.link_state.status = 6241 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6242 phba->sli4_hba.link_state.type = 6243 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6244 phba->sli4_hba.link_state.number = 6245 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6246 phba->sli4_hba.link_state.fault = 6247 bf_get(lpfc_acqe_link_fault, acqe_fc); 6248 6249 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6250 LPFC_FC_LA_TYPE_LINK_DOWN) 6251 phba->sli4_hba.link_state.logical_speed = 0; 6252 else if (!phba->sli4_hba.conf_trunk) 6253 phba->sli4_hba.link_state.logical_speed = 6254 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6255 6256 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6257 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6258 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6259 "%dMbps Fault:%d\n", 6260 phba->sli4_hba.link_state.speed, 6261 phba->sli4_hba.link_state.topology, 6262 phba->sli4_hba.link_state.status, 6263 phba->sli4_hba.link_state.type, 6264 phba->sli4_hba.link_state.number, 6265 phba->sli4_hba.link_state.logical_speed, 6266 phba->sli4_hba.link_state.fault); 6267 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6268 if (!pmb) { 6269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6270 "2897 The mboxq allocation failed\n"); 6271 return; 6272 } 6273 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6274 if (!mp) { 6275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6276 "2898 The lpfc_dmabuf allocation failed\n"); 6277 goto out_free_pmb; 6278 } 6279 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 6280 if (!mp->virt) { 6281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6282 "2899 The mbuf allocation failed\n"); 6283 goto out_free_dmabuf; 6284 } 6285 6286 /* Cleanup any outstanding ELS commands */ 6287 lpfc_els_flush_all_cmd(phba); 6288 6289 /* Block ELS IOCBs until we have done process link event */ 6290 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6291 6292 /* Update link event statistics */ 6293 phba->sli.slistat.link_event++; 6294 6295 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6296 lpfc_read_topology(phba, pmb, mp); 6297 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6298 pmb->vport = phba->pport; 6299 6300 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6301 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6302 6303 switch (phba->sli4_hba.link_state.status) { 6304 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6305 phba->link_flag |= LS_MDS_LINK_DOWN; 6306 break; 6307 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6308 phba->link_flag |= LS_MDS_LOOPBACK; 6309 break; 6310 default: 6311 break; 6312 } 6313 6314 /* Initialize completion status */ 6315 mb = &pmb->u.mb; 6316 mb->mbxStatus = MBX_SUCCESS; 6317 6318 /* Parse port fault information field */ 6319 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6320 6321 /* Parse and translate link attention fields */ 6322 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6323 la->eventTag = acqe_fc->event_tag; 6324 6325 if (phba->sli4_hba.link_state.status == 6326 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6327 bf_set(lpfc_mbx_read_top_att_type, la, 6328 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6329 } else { 6330 bf_set(lpfc_mbx_read_top_att_type, la, 6331 LPFC_FC_LA_TYPE_LINK_DOWN); 6332 } 6333 /* Invoke the mailbox command callback function */ 6334 lpfc_mbx_cmpl_read_topology(phba, pmb); 6335 6336 return; 6337 } 6338 6339 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6340 if (rc == MBX_NOT_FINISHED) 6341 goto out_free_dmabuf; 6342 return; 6343 6344 out_free_dmabuf: 6345 kfree(mp); 6346 out_free_pmb: 6347 mempool_free(pmb, phba->mbox_mem_pool); 6348 } 6349 6350 /** 6351 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6352 * @phba: pointer to lpfc hba data structure. 6353 * @acqe_sli: pointer to the async SLI completion queue entry. 6354 * 6355 * This routine is to handle the SLI4 asynchronous SLI events. 6356 **/ 6357 static void 6358 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6359 { 6360 char port_name; 6361 char message[128]; 6362 uint8_t status; 6363 uint8_t evt_type; 6364 uint8_t operational = 0; 6365 struct temp_event temp_event_data; 6366 struct lpfc_acqe_misconfigured_event *misconfigured; 6367 struct lpfc_acqe_cgn_signal *cgn_signal; 6368 struct Scsi_Host *shost; 6369 struct lpfc_vport **vports; 6370 int rc, i, cnt; 6371 6372 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6373 6374 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6375 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6376 "x%08x x%08x x%08x\n", evt_type, 6377 acqe_sli->event_data1, acqe_sli->event_data2, 6378 acqe_sli->reserved, acqe_sli->trailer); 6379 6380 port_name = phba->Port[0]; 6381 if (port_name == 0x00) 6382 port_name = '?'; /* get port name is empty */ 6383 6384 switch (evt_type) { 6385 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6386 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6387 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6388 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6389 6390 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6391 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6392 acqe_sli->event_data1, port_name); 6393 6394 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6395 shost = lpfc_shost_from_vport(phba->pport); 6396 fc_host_post_vendor_event(shost, fc_get_event_number(), 6397 sizeof(temp_event_data), 6398 (char *)&temp_event_data, 6399 SCSI_NL_VID_TYPE_PCI 6400 | PCI_VENDOR_ID_EMULEX); 6401 break; 6402 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6403 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6404 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6405 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6406 6407 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6408 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6409 acqe_sli->event_data1, port_name); 6410 6411 shost = lpfc_shost_from_vport(phba->pport); 6412 fc_host_post_vendor_event(shost, fc_get_event_number(), 6413 sizeof(temp_event_data), 6414 (char *)&temp_event_data, 6415 SCSI_NL_VID_TYPE_PCI 6416 | PCI_VENDOR_ID_EMULEX); 6417 break; 6418 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6419 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6420 &acqe_sli->event_data1; 6421 6422 /* fetch the status for this port */ 6423 switch (phba->sli4_hba.lnk_info.lnk_no) { 6424 case LPFC_LINK_NUMBER_0: 6425 status = bf_get(lpfc_sli_misconfigured_port0_state, 6426 &misconfigured->theEvent); 6427 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6428 &misconfigured->theEvent); 6429 break; 6430 case LPFC_LINK_NUMBER_1: 6431 status = bf_get(lpfc_sli_misconfigured_port1_state, 6432 &misconfigured->theEvent); 6433 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6434 &misconfigured->theEvent); 6435 break; 6436 case LPFC_LINK_NUMBER_2: 6437 status = bf_get(lpfc_sli_misconfigured_port2_state, 6438 &misconfigured->theEvent); 6439 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6440 &misconfigured->theEvent); 6441 break; 6442 case LPFC_LINK_NUMBER_3: 6443 status = bf_get(lpfc_sli_misconfigured_port3_state, 6444 &misconfigured->theEvent); 6445 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6446 &misconfigured->theEvent); 6447 break; 6448 default: 6449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6450 "3296 " 6451 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6452 "event: Invalid link %d", 6453 phba->sli4_hba.lnk_info.lnk_no); 6454 return; 6455 } 6456 6457 /* Skip if optic state unchanged */ 6458 if (phba->sli4_hba.lnk_info.optic_state == status) 6459 return; 6460 6461 switch (status) { 6462 case LPFC_SLI_EVENT_STATUS_VALID: 6463 sprintf(message, "Physical Link is functional"); 6464 break; 6465 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6466 sprintf(message, "Optics faulted/incorrectly " 6467 "installed/not installed - Reseat optics, " 6468 "if issue not resolved, replace."); 6469 break; 6470 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6471 sprintf(message, 6472 "Optics of two types installed - Remove one " 6473 "optic or install matching pair of optics."); 6474 break; 6475 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6476 sprintf(message, "Incompatible optics - Replace with " 6477 "compatible optics for card to function."); 6478 break; 6479 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6480 sprintf(message, "Unqualified optics - Replace with " 6481 "Avago optics for Warranty and Technical " 6482 "Support - Link is%s operational", 6483 (operational) ? " not" : ""); 6484 break; 6485 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6486 sprintf(message, "Uncertified optics - Replace with " 6487 "Avago-certified optics to enable link " 6488 "operation - Link is%s operational", 6489 (operational) ? " not" : ""); 6490 break; 6491 default: 6492 /* firmware is reporting a status we don't know about */ 6493 sprintf(message, "Unknown event status x%02x", status); 6494 break; 6495 } 6496 6497 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6498 rc = lpfc_sli4_read_config(phba); 6499 if (rc) { 6500 phba->lmt = 0; 6501 lpfc_printf_log(phba, KERN_ERR, 6502 LOG_TRACE_EVENT, 6503 "3194 Unable to retrieve supported " 6504 "speeds, rc = 0x%x\n", rc); 6505 } 6506 rc = lpfc_sli4_refresh_params(phba); 6507 if (rc) { 6508 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6509 "3174 Unable to update pls support, " 6510 "rc x%x\n", rc); 6511 } 6512 vports = lpfc_create_vport_work_array(phba); 6513 if (vports != NULL) { 6514 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6515 i++) { 6516 shost = lpfc_shost_from_vport(vports[i]); 6517 lpfc_host_supported_speeds_set(shost); 6518 } 6519 } 6520 lpfc_destroy_vport_work_array(phba, vports); 6521 6522 phba->sli4_hba.lnk_info.optic_state = status; 6523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6524 "3176 Port Name %c %s\n", port_name, message); 6525 break; 6526 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6527 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6528 "3192 Remote DPort Test Initiated - " 6529 "Event Data1:x%08x Event Data2: x%08x\n", 6530 acqe_sli->event_data1, acqe_sli->event_data2); 6531 break; 6532 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6533 /* Call FW to obtain active parms */ 6534 lpfc_sli4_cgn_parm_chg_evt(phba); 6535 break; 6536 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6537 /* Misconfigured WWN. Reports that the SLI Port is configured 6538 * to use FA-WWN, but the attached device doesn’t support it. 6539 * No driver action is required. 6540 * Event Data1 - N.A, Event Data2 - N.A 6541 */ 6542 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 6543 "2699 Misconfigured FA-WWN - Attached device does " 6544 "not support FA-WWN\n"); 6545 break; 6546 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6547 /* EEPROM failure. No driver action is required */ 6548 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6549 "2518 EEPROM failure - " 6550 "Event Data1: x%08x Event Data2: x%08x\n", 6551 acqe_sli->event_data1, acqe_sli->event_data2); 6552 break; 6553 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6554 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6555 break; 6556 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6557 &acqe_sli->event_data1; 6558 phba->cgn_acqe_cnt++; 6559 6560 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6561 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6562 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6563 6564 /* no threshold for CMF, even 1 signal will trigger an event */ 6565 6566 /* Alarm overrides warning, so check that first */ 6567 if (cgn_signal->alarm_cnt) { 6568 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6569 /* Keep track of alarm cnt for cgn_info */ 6570 atomic_add(cgn_signal->alarm_cnt, 6571 &phba->cgn_fabric_alarm_cnt); 6572 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6573 atomic_add(cgn_signal->alarm_cnt, 6574 &phba->cgn_sync_alarm_cnt); 6575 } 6576 } else if (cnt) { 6577 /* signal action needs to be taken */ 6578 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6579 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6580 /* Keep track of warning cnt for cgn_info */ 6581 atomic_add(cnt, &phba->cgn_fabric_warn_cnt); 6582 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6583 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6584 } 6585 } 6586 break; 6587 default: 6588 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6589 "3193 Unrecognized SLI event, type: 0x%x", 6590 evt_type); 6591 break; 6592 } 6593 } 6594 6595 /** 6596 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6597 * @vport: pointer to vport data structure. 6598 * 6599 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6600 * response to a CVL event. 6601 * 6602 * Return the pointer to the ndlp with the vport if successful, otherwise 6603 * return NULL. 6604 **/ 6605 static struct lpfc_nodelist * 6606 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6607 { 6608 struct lpfc_nodelist *ndlp; 6609 struct Scsi_Host *shost; 6610 struct lpfc_hba *phba; 6611 6612 if (!vport) 6613 return NULL; 6614 phba = vport->phba; 6615 if (!phba) 6616 return NULL; 6617 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6618 if (!ndlp) { 6619 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6620 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6621 if (!ndlp) 6622 return NULL; 6623 /* Set the node type */ 6624 ndlp->nlp_type |= NLP_FABRIC; 6625 /* Put ndlp onto node list */ 6626 lpfc_enqueue_node(vport, ndlp); 6627 } 6628 if ((phba->pport->port_state < LPFC_FLOGI) && 6629 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6630 return NULL; 6631 /* If virtual link is not yet instantiated ignore CVL */ 6632 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6633 && (vport->port_state != LPFC_VPORT_FAILED)) 6634 return NULL; 6635 shost = lpfc_shost_from_vport(vport); 6636 if (!shost) 6637 return NULL; 6638 lpfc_linkdown_port(vport); 6639 lpfc_cleanup_pending_mbox(vport); 6640 spin_lock_irq(shost->host_lock); 6641 vport->fc_flag |= FC_VPORT_CVL_RCVD; 6642 spin_unlock_irq(shost->host_lock); 6643 6644 return ndlp; 6645 } 6646 6647 /** 6648 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6649 * @phba: pointer to lpfc hba data structure. 6650 * 6651 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6652 * response to a FCF dead event. 6653 **/ 6654 static void 6655 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6656 { 6657 struct lpfc_vport **vports; 6658 int i; 6659 6660 vports = lpfc_create_vport_work_array(phba); 6661 if (vports) 6662 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6663 lpfc_sli4_perform_vport_cvl(vports[i]); 6664 lpfc_destroy_vport_work_array(phba, vports); 6665 } 6666 6667 /** 6668 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6669 * @phba: pointer to lpfc hba data structure. 6670 * @acqe_fip: pointer to the async fcoe completion queue entry. 6671 * 6672 * This routine is to handle the SLI4 asynchronous fcoe event. 6673 **/ 6674 static void 6675 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6676 struct lpfc_acqe_fip *acqe_fip) 6677 { 6678 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6679 int rc; 6680 struct lpfc_vport *vport; 6681 struct lpfc_nodelist *ndlp; 6682 int active_vlink_present; 6683 struct lpfc_vport **vports; 6684 int i; 6685 6686 phba->fc_eventTag = acqe_fip->event_tag; 6687 phba->fcoe_eventtag = acqe_fip->event_tag; 6688 switch (event_type) { 6689 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6690 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6691 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6693 "2546 New FCF event, evt_tag:x%x, " 6694 "index:x%x\n", 6695 acqe_fip->event_tag, 6696 acqe_fip->index); 6697 else 6698 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6699 LOG_DISCOVERY, 6700 "2788 FCF param modified event, " 6701 "evt_tag:x%x, index:x%x\n", 6702 acqe_fip->event_tag, 6703 acqe_fip->index); 6704 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6705 /* 6706 * During period of FCF discovery, read the FCF 6707 * table record indexed by the event to update 6708 * FCF roundrobin failover eligible FCF bmask. 6709 */ 6710 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6711 LOG_DISCOVERY, 6712 "2779 Read FCF (x%x) for updating " 6713 "roundrobin FCF failover bmask\n", 6714 acqe_fip->index); 6715 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6716 } 6717 6718 /* If the FCF discovery is in progress, do nothing. */ 6719 spin_lock_irq(&phba->hbalock); 6720 if (phba->hba_flag & FCF_TS_INPROG) { 6721 spin_unlock_irq(&phba->hbalock); 6722 break; 6723 } 6724 /* If fast FCF failover rescan event is pending, do nothing */ 6725 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6726 spin_unlock_irq(&phba->hbalock); 6727 break; 6728 } 6729 6730 /* If the FCF has been in discovered state, do nothing. */ 6731 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6732 spin_unlock_irq(&phba->hbalock); 6733 break; 6734 } 6735 spin_unlock_irq(&phba->hbalock); 6736 6737 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6738 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6739 "2770 Start FCF table scan per async FCF " 6740 "event, evt_tag:x%x, index:x%x\n", 6741 acqe_fip->event_tag, acqe_fip->index); 6742 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6743 LPFC_FCOE_FCF_GET_FIRST); 6744 if (rc) 6745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6746 "2547 Issue FCF scan read FCF mailbox " 6747 "command failed (x%x)\n", rc); 6748 break; 6749 6750 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6752 "2548 FCF Table full count 0x%x tag 0x%x\n", 6753 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6754 acqe_fip->event_tag); 6755 break; 6756 6757 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6758 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6760 "2549 FCF (x%x) disconnected from network, " 6761 "tag:x%x\n", acqe_fip->index, 6762 acqe_fip->event_tag); 6763 /* 6764 * If we are in the middle of FCF failover process, clear 6765 * the corresponding FCF bit in the roundrobin bitmap. 6766 */ 6767 spin_lock_irq(&phba->hbalock); 6768 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6769 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6770 spin_unlock_irq(&phba->hbalock); 6771 /* Update FLOGI FCF failover eligible FCF bmask */ 6772 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6773 break; 6774 } 6775 spin_unlock_irq(&phba->hbalock); 6776 6777 /* If the event is not for currently used fcf do nothing */ 6778 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6779 break; 6780 6781 /* 6782 * Otherwise, request the port to rediscover the entire FCF 6783 * table for a fast recovery from case that the current FCF 6784 * is no longer valid as we are not in the middle of FCF 6785 * failover process already. 6786 */ 6787 spin_lock_irq(&phba->hbalock); 6788 /* Mark the fast failover process in progress */ 6789 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6790 spin_unlock_irq(&phba->hbalock); 6791 6792 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6793 "2771 Start FCF fast failover process due to " 6794 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6795 "\n", acqe_fip->event_tag, acqe_fip->index); 6796 rc = lpfc_sli4_redisc_fcf_table(phba); 6797 if (rc) { 6798 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6799 LOG_TRACE_EVENT, 6800 "2772 Issue FCF rediscover mailbox " 6801 "command failed, fail through to FCF " 6802 "dead event\n"); 6803 spin_lock_irq(&phba->hbalock); 6804 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6805 spin_unlock_irq(&phba->hbalock); 6806 /* 6807 * Last resort will fail over by treating this 6808 * as a link down to FCF registration. 6809 */ 6810 lpfc_sli4_fcf_dead_failthrough(phba); 6811 } else { 6812 /* Reset FCF roundrobin bmask for new discovery */ 6813 lpfc_sli4_clear_fcf_rr_bmask(phba); 6814 /* 6815 * Handling fast FCF failover to a DEAD FCF event is 6816 * considered equalivant to receiving CVL to all vports. 6817 */ 6818 lpfc_sli4_perform_all_vport_cvl(phba); 6819 } 6820 break; 6821 case LPFC_FIP_EVENT_TYPE_CVL: 6822 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6823 lpfc_printf_log(phba, KERN_ERR, 6824 LOG_TRACE_EVENT, 6825 "2718 Clear Virtual Link Received for VPI 0x%x" 6826 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6827 6828 vport = lpfc_find_vport_by_vpid(phba, 6829 acqe_fip->index); 6830 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6831 if (!ndlp) 6832 break; 6833 active_vlink_present = 0; 6834 6835 vports = lpfc_create_vport_work_array(phba); 6836 if (vports) { 6837 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6838 i++) { 6839 if ((!(vports[i]->fc_flag & 6840 FC_VPORT_CVL_RCVD)) && 6841 (vports[i]->port_state > LPFC_FDISC)) { 6842 active_vlink_present = 1; 6843 break; 6844 } 6845 } 6846 lpfc_destroy_vport_work_array(phba, vports); 6847 } 6848 6849 /* 6850 * Don't re-instantiate if vport is marked for deletion. 6851 * If we are here first then vport_delete is going to wait 6852 * for discovery to complete. 6853 */ 6854 if (!(vport->load_flag & FC_UNLOADING) && 6855 active_vlink_present) { 6856 /* 6857 * If there are other active VLinks present, 6858 * re-instantiate the Vlink using FDISC. 6859 */ 6860 mod_timer(&ndlp->nlp_delayfunc, 6861 jiffies + msecs_to_jiffies(1000)); 6862 spin_lock_irq(&ndlp->lock); 6863 ndlp->nlp_flag |= NLP_DELAY_TMO; 6864 spin_unlock_irq(&ndlp->lock); 6865 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6866 vport->port_state = LPFC_FDISC; 6867 } else { 6868 /* 6869 * Otherwise, we request port to rediscover 6870 * the entire FCF table for a fast recovery 6871 * from possible case that the current FCF 6872 * is no longer valid if we are not already 6873 * in the FCF failover process. 6874 */ 6875 spin_lock_irq(&phba->hbalock); 6876 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6877 spin_unlock_irq(&phba->hbalock); 6878 break; 6879 } 6880 /* Mark the fast failover process in progress */ 6881 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6882 spin_unlock_irq(&phba->hbalock); 6883 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6884 LOG_DISCOVERY, 6885 "2773 Start FCF failover per CVL, " 6886 "evt_tag:x%x\n", acqe_fip->event_tag); 6887 rc = lpfc_sli4_redisc_fcf_table(phba); 6888 if (rc) { 6889 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6890 LOG_TRACE_EVENT, 6891 "2774 Issue FCF rediscover " 6892 "mailbox command failed, " 6893 "through to CVL event\n"); 6894 spin_lock_irq(&phba->hbalock); 6895 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6896 spin_unlock_irq(&phba->hbalock); 6897 /* 6898 * Last resort will be re-try on the 6899 * the current registered FCF entry. 6900 */ 6901 lpfc_retry_pport_discovery(phba); 6902 } else 6903 /* 6904 * Reset FCF roundrobin bmask for new 6905 * discovery. 6906 */ 6907 lpfc_sli4_clear_fcf_rr_bmask(phba); 6908 } 6909 break; 6910 default: 6911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6912 "0288 Unknown FCoE event type 0x%x event tag " 6913 "0x%x\n", event_type, acqe_fip->event_tag); 6914 break; 6915 } 6916 } 6917 6918 /** 6919 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6920 * @phba: pointer to lpfc hba data structure. 6921 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6922 * 6923 * This routine is to handle the SLI4 asynchronous dcbx event. 6924 **/ 6925 static void 6926 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6927 struct lpfc_acqe_dcbx *acqe_dcbx) 6928 { 6929 phba->fc_eventTag = acqe_dcbx->event_tag; 6930 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6931 "0290 The SLI4 DCBX asynchronous event is not " 6932 "handled yet\n"); 6933 } 6934 6935 /** 6936 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6937 * @phba: pointer to lpfc hba data structure. 6938 * @acqe_grp5: pointer to the async grp5 completion queue entry. 6939 * 6940 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 6941 * is an asynchronous notified of a logical link speed change. The Port 6942 * reports the logical link speed in units of 10Mbps. 6943 **/ 6944 static void 6945 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 6946 struct lpfc_acqe_grp5 *acqe_grp5) 6947 { 6948 uint16_t prev_ll_spd; 6949 6950 phba->fc_eventTag = acqe_grp5->event_tag; 6951 phba->fcoe_eventtag = acqe_grp5->event_tag; 6952 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 6953 phba->sli4_hba.link_state.logical_speed = 6954 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 6955 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6956 "2789 GRP5 Async Event: Updating logical link speed " 6957 "from %dMbps to %dMbps\n", prev_ll_spd, 6958 phba->sli4_hba.link_state.logical_speed); 6959 } 6960 6961 /** 6962 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 6963 * @phba: pointer to lpfc hba data structure. 6964 * 6965 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 6966 * is an asynchronous notification of a request to reset CM stats. 6967 **/ 6968 static void 6969 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 6970 { 6971 if (!phba->cgn_i) 6972 return; 6973 lpfc_init_congestion_stat(phba); 6974 } 6975 6976 /** 6977 * lpfc_cgn_params_val - Validate FW congestion parameters. 6978 * @phba: pointer to lpfc hba data structure. 6979 * @p_cfg_param: pointer to FW provided congestion parameters. 6980 * 6981 * This routine validates the congestion parameters passed 6982 * by the FW to the driver via an ACQE event. 6983 **/ 6984 static void 6985 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 6986 { 6987 spin_lock_irq(&phba->hbalock); 6988 6989 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 6990 LPFC_CFG_MONITOR)) { 6991 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 6992 "6225 CMF mode param out of range: %d\n", 6993 p_cfg_param->cgn_param_mode); 6994 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 6995 } 6996 6997 spin_unlock_irq(&phba->hbalock); 6998 } 6999 7000 /** 7001 * lpfc_cgn_params_parse - Process a FW cong parm change event 7002 * @phba: pointer to lpfc hba data structure. 7003 * @p_cgn_param: pointer to a data buffer with the FW cong params. 7004 * @len: the size of pdata in bytes. 7005 * 7006 * This routine validates the congestion management buffer signature 7007 * from the FW, validates the contents and makes corrections for 7008 * valid, in-range values. If the signature magic is correct and 7009 * after parameter validation, the contents are copied to the driver's 7010 * @phba structure. If the magic is incorrect, an error message is 7011 * logged. 7012 **/ 7013 static void 7014 lpfc_cgn_params_parse(struct lpfc_hba *phba, 7015 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 7016 { 7017 struct lpfc_cgn_info *cp; 7018 uint32_t crc, oldmode; 7019 7020 /* Make sure the FW has encoded the correct magic number to 7021 * validate the congestion parameter in FW memory. 7022 */ 7023 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 7024 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7025 "4668 FW cgn parm buffer data: " 7026 "magic 0x%x version %d mode %d " 7027 "level0 %d level1 %d " 7028 "level2 %d byte13 %d " 7029 "byte14 %d byte15 %d " 7030 "byte11 %d byte12 %d activeMode %d\n", 7031 p_cgn_param->cgn_param_magic, 7032 p_cgn_param->cgn_param_version, 7033 p_cgn_param->cgn_param_mode, 7034 p_cgn_param->cgn_param_level0, 7035 p_cgn_param->cgn_param_level1, 7036 p_cgn_param->cgn_param_level2, 7037 p_cgn_param->byte13, 7038 p_cgn_param->byte14, 7039 p_cgn_param->byte15, 7040 p_cgn_param->byte11, 7041 p_cgn_param->byte12, 7042 phba->cmf_active_mode); 7043 7044 oldmode = phba->cmf_active_mode; 7045 7046 /* Any parameters out of range are corrected to defaults 7047 * by this routine. No need to fail. 7048 */ 7049 lpfc_cgn_params_val(phba, p_cgn_param); 7050 7051 /* Parameters are verified, move them into driver storage */ 7052 spin_lock_irq(&phba->hbalock); 7053 memcpy(&phba->cgn_p, p_cgn_param, 7054 sizeof(struct lpfc_cgn_param)); 7055 7056 /* Update parameters in congestion info buffer now */ 7057 if (phba->cgn_i) { 7058 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7059 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7060 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7061 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7062 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7063 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7064 LPFC_CGN_CRC32_SEED); 7065 cp->cgn_info_crc = cpu_to_le32(crc); 7066 } 7067 spin_unlock_irq(&phba->hbalock); 7068 7069 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7070 7071 switch (oldmode) { 7072 case LPFC_CFG_OFF: 7073 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7074 /* Turning CMF on */ 7075 lpfc_cmf_start(phba); 7076 7077 if (phba->link_state >= LPFC_LINK_UP) { 7078 phba->cgn_reg_fpin = 7079 phba->cgn_init_reg_fpin; 7080 phba->cgn_reg_signal = 7081 phba->cgn_init_reg_signal; 7082 lpfc_issue_els_edc(phba->pport, 0); 7083 } 7084 } 7085 break; 7086 case LPFC_CFG_MANAGED: 7087 switch (phba->cgn_p.cgn_param_mode) { 7088 case LPFC_CFG_OFF: 7089 /* Turning CMF off */ 7090 lpfc_cmf_stop(phba); 7091 if (phba->link_state >= LPFC_LINK_UP) 7092 lpfc_issue_els_edc(phba->pport, 0); 7093 break; 7094 case LPFC_CFG_MONITOR: 7095 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7096 "4661 Switch from MANAGED to " 7097 "`MONITOR mode\n"); 7098 phba->cmf_max_bytes_per_interval = 7099 phba->cmf_link_byte_count; 7100 7101 /* Resume blocked IO - unblock on workqueue */ 7102 queue_work(phba->wq, 7103 &phba->unblock_request_work); 7104 break; 7105 } 7106 break; 7107 case LPFC_CFG_MONITOR: 7108 switch (phba->cgn_p.cgn_param_mode) { 7109 case LPFC_CFG_OFF: 7110 /* Turning CMF off */ 7111 lpfc_cmf_stop(phba); 7112 if (phba->link_state >= LPFC_LINK_UP) 7113 lpfc_issue_els_edc(phba->pport, 0); 7114 break; 7115 case LPFC_CFG_MANAGED: 7116 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7117 "4662 Switch from MONITOR to " 7118 "MANAGED mode\n"); 7119 lpfc_cmf_signal_init(phba); 7120 break; 7121 } 7122 break; 7123 } 7124 } else { 7125 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7126 "4669 FW cgn parm buf wrong magic 0x%x " 7127 "version %d\n", p_cgn_param->cgn_param_magic, 7128 p_cgn_param->cgn_param_version); 7129 } 7130 } 7131 7132 /** 7133 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7134 * @phba: pointer to lpfc hba data structure. 7135 * 7136 * This routine issues a read_object mailbox command to 7137 * get the congestion management parameters from the FW 7138 * parses it and updates the driver maintained values. 7139 * 7140 * Returns 7141 * 0 if the object was empty 7142 * -Eval if an error was encountered 7143 * Count if bytes were read from object 7144 **/ 7145 int 7146 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7147 { 7148 int ret = 0; 7149 struct lpfc_cgn_param *p_cgn_param = NULL; 7150 u32 *pdata = NULL; 7151 u32 len = 0; 7152 7153 /* Find out if the FW has a new set of congestion parameters. */ 7154 len = sizeof(struct lpfc_cgn_param); 7155 pdata = kzalloc(len, GFP_KERNEL); 7156 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7157 pdata, len); 7158 7159 /* 0 means no data. A negative means error. A positive means 7160 * bytes were copied. 7161 */ 7162 if (!ret) { 7163 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7164 "4670 CGN RD OBJ returns no data\n"); 7165 goto rd_obj_err; 7166 } else if (ret < 0) { 7167 /* Some error. Just exit and return it to the caller.*/ 7168 goto rd_obj_err; 7169 } 7170 7171 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7172 "6234 READ CGN PARAMS Successful %d\n", len); 7173 7174 /* Parse data pointer over len and update the phba congestion 7175 * parameters with values passed back. The receive rate values 7176 * may have been altered in FW, but take no action here. 7177 */ 7178 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7179 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7180 7181 rd_obj_err: 7182 kfree(pdata); 7183 return ret; 7184 } 7185 7186 /** 7187 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7188 * @phba: pointer to lpfc hba data structure. 7189 * 7190 * The FW generated Async ACQE SLI event calls this routine when 7191 * the event type is an SLI Internal Port Event and the Event Code 7192 * indicates a change to the FW maintained congestion parameters. 7193 * 7194 * This routine executes a Read_Object mailbox call to obtain the 7195 * current congestion parameters maintained in FW and corrects 7196 * the driver's active congestion parameters. 7197 * 7198 * The acqe event is not passed because there is no further data 7199 * required. 7200 * 7201 * Returns nonzero error if event processing encountered an error. 7202 * Zero otherwise for success. 7203 **/ 7204 static int 7205 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7206 { 7207 int ret = 0; 7208 7209 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7210 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7211 "4664 Cgn Evt when E2E off. Drop event\n"); 7212 return -EACCES; 7213 } 7214 7215 /* If the event is claiming an empty object, it's ok. A write 7216 * could have cleared it. Only error is a negative return 7217 * status. 7218 */ 7219 ret = lpfc_sli4_cgn_params_read(phba); 7220 if (ret < 0) { 7221 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7222 "4667 Error reading Cgn Params (%d)\n", 7223 ret); 7224 } else if (!ret) { 7225 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7226 "4673 CGN Event empty object.\n"); 7227 } 7228 return ret; 7229 } 7230 7231 /** 7232 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7233 * @phba: pointer to lpfc hba data structure. 7234 * 7235 * This routine is invoked by the worker thread to process all the pending 7236 * SLI4 asynchronous events. 7237 **/ 7238 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7239 { 7240 struct lpfc_cq_event *cq_event; 7241 unsigned long iflags; 7242 7243 /* First, declare the async event has been handled */ 7244 spin_lock_irqsave(&phba->hbalock, iflags); 7245 phba->hba_flag &= ~ASYNC_EVENT; 7246 spin_unlock_irqrestore(&phba->hbalock, iflags); 7247 7248 /* Now, handle all the async events */ 7249 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7250 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7251 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7252 cq_event, struct lpfc_cq_event, list); 7253 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7254 iflags); 7255 7256 /* Process the asynchronous event */ 7257 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7258 case LPFC_TRAILER_CODE_LINK: 7259 lpfc_sli4_async_link_evt(phba, 7260 &cq_event->cqe.acqe_link); 7261 break; 7262 case LPFC_TRAILER_CODE_FCOE: 7263 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7264 break; 7265 case LPFC_TRAILER_CODE_DCBX: 7266 lpfc_sli4_async_dcbx_evt(phba, 7267 &cq_event->cqe.acqe_dcbx); 7268 break; 7269 case LPFC_TRAILER_CODE_GRP5: 7270 lpfc_sli4_async_grp5_evt(phba, 7271 &cq_event->cqe.acqe_grp5); 7272 break; 7273 case LPFC_TRAILER_CODE_FC: 7274 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7275 break; 7276 case LPFC_TRAILER_CODE_SLI: 7277 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7278 break; 7279 case LPFC_TRAILER_CODE_CMSTAT: 7280 lpfc_sli4_async_cmstat_evt(phba); 7281 break; 7282 default: 7283 lpfc_printf_log(phba, KERN_ERR, 7284 LOG_TRACE_EVENT, 7285 "1804 Invalid asynchronous event code: " 7286 "x%x\n", bf_get(lpfc_trailer_code, 7287 &cq_event->cqe.mcqe_cmpl)); 7288 break; 7289 } 7290 7291 /* Free the completion event processed to the free pool */ 7292 lpfc_sli4_cq_event_release(phba, cq_event); 7293 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7294 } 7295 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7296 } 7297 7298 /** 7299 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7300 * @phba: pointer to lpfc hba data structure. 7301 * 7302 * This routine is invoked by the worker thread to process FCF table 7303 * rediscovery pending completion event. 7304 **/ 7305 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7306 { 7307 int rc; 7308 7309 spin_lock_irq(&phba->hbalock); 7310 /* Clear FCF rediscovery timeout event */ 7311 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7312 /* Clear driver fast failover FCF record flag */ 7313 phba->fcf.failover_rec.flag = 0; 7314 /* Set state for FCF fast failover */ 7315 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7316 spin_unlock_irq(&phba->hbalock); 7317 7318 /* Scan FCF table from the first entry to re-discover SAN */ 7319 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7320 "2777 Start post-quiescent FCF table scan\n"); 7321 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7322 if (rc) 7323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7324 "2747 Issue FCF scan read FCF mailbox " 7325 "command failed 0x%x\n", rc); 7326 } 7327 7328 /** 7329 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7330 * @phba: pointer to lpfc hba data structure. 7331 * @dev_grp: The HBA PCI-Device group number. 7332 * 7333 * This routine is invoked to set up the per HBA PCI-Device group function 7334 * API jump table entries. 7335 * 7336 * Return: 0 if success, otherwise -ENODEV 7337 **/ 7338 int 7339 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7340 { 7341 int rc; 7342 7343 /* Set up lpfc PCI-device group */ 7344 phba->pci_dev_grp = dev_grp; 7345 7346 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7347 if (dev_grp == LPFC_PCI_DEV_OC) 7348 phba->sli_rev = LPFC_SLI_REV4; 7349 7350 /* Set up device INIT API function jump table */ 7351 rc = lpfc_init_api_table_setup(phba, dev_grp); 7352 if (rc) 7353 return -ENODEV; 7354 /* Set up SCSI API function jump table */ 7355 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7356 if (rc) 7357 return -ENODEV; 7358 /* Set up SLI API function jump table */ 7359 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7360 if (rc) 7361 return -ENODEV; 7362 /* Set up MBOX API function jump table */ 7363 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7364 if (rc) 7365 return -ENODEV; 7366 7367 return 0; 7368 } 7369 7370 /** 7371 * lpfc_log_intr_mode - Log the active interrupt mode 7372 * @phba: pointer to lpfc hba data structure. 7373 * @intr_mode: active interrupt mode adopted. 7374 * 7375 * This routine it invoked to log the currently used active interrupt mode 7376 * to the device. 7377 **/ 7378 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7379 { 7380 switch (intr_mode) { 7381 case 0: 7382 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7383 "0470 Enable INTx interrupt mode.\n"); 7384 break; 7385 case 1: 7386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7387 "0481 Enabled MSI interrupt mode.\n"); 7388 break; 7389 case 2: 7390 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7391 "0480 Enabled MSI-X interrupt mode.\n"); 7392 break; 7393 default: 7394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7395 "0482 Illegal interrupt mode.\n"); 7396 break; 7397 } 7398 return; 7399 } 7400 7401 /** 7402 * lpfc_enable_pci_dev - Enable a generic PCI device. 7403 * @phba: pointer to lpfc hba data structure. 7404 * 7405 * This routine is invoked to enable the PCI device that is common to all 7406 * PCI devices. 7407 * 7408 * Return codes 7409 * 0 - successful 7410 * other values - error 7411 **/ 7412 static int 7413 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7414 { 7415 struct pci_dev *pdev; 7416 7417 /* Obtain PCI device reference */ 7418 if (!phba->pcidev) 7419 goto out_error; 7420 else 7421 pdev = phba->pcidev; 7422 /* Enable PCI device */ 7423 if (pci_enable_device_mem(pdev)) 7424 goto out_error; 7425 /* Request PCI resource for the device */ 7426 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7427 goto out_disable_device; 7428 /* Set up device as PCI master and save state for EEH */ 7429 pci_set_master(pdev); 7430 pci_try_set_mwi(pdev); 7431 pci_save_state(pdev); 7432 7433 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7434 if (pci_is_pcie(pdev)) 7435 pdev->needs_freset = 1; 7436 7437 return 0; 7438 7439 out_disable_device: 7440 pci_disable_device(pdev); 7441 out_error: 7442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7443 "1401 Failed to enable pci device\n"); 7444 return -ENODEV; 7445 } 7446 7447 /** 7448 * lpfc_disable_pci_dev - Disable a generic PCI device. 7449 * @phba: pointer to lpfc hba data structure. 7450 * 7451 * This routine is invoked to disable the PCI device that is common to all 7452 * PCI devices. 7453 **/ 7454 static void 7455 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7456 { 7457 struct pci_dev *pdev; 7458 7459 /* Obtain PCI device reference */ 7460 if (!phba->pcidev) 7461 return; 7462 else 7463 pdev = phba->pcidev; 7464 /* Release PCI resource and disable PCI device */ 7465 pci_release_mem_regions(pdev); 7466 pci_disable_device(pdev); 7467 7468 return; 7469 } 7470 7471 /** 7472 * lpfc_reset_hba - Reset a hba 7473 * @phba: pointer to lpfc hba data structure. 7474 * 7475 * This routine is invoked to reset a hba device. It brings the HBA 7476 * offline, performs a board restart, and then brings the board back 7477 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7478 * on outstanding mailbox commands. 7479 **/ 7480 void 7481 lpfc_reset_hba(struct lpfc_hba *phba) 7482 { 7483 /* If resets are disabled then set error state and return. */ 7484 if (!phba->cfg_enable_hba_reset) { 7485 phba->link_state = LPFC_HBA_ERROR; 7486 return; 7487 } 7488 7489 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7490 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7491 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7492 } else { 7493 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7494 lpfc_sli_flush_io_rings(phba); 7495 } 7496 lpfc_offline(phba); 7497 lpfc_sli_brdrestart(phba); 7498 lpfc_online(phba); 7499 lpfc_unblock_mgmt_io(phba); 7500 } 7501 7502 /** 7503 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7504 * @phba: pointer to lpfc hba data structure. 7505 * 7506 * This function enables the PCI SR-IOV virtual functions to a physical 7507 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7508 * enable the number of virtual functions to the physical function. As 7509 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7510 * API call does not considered as an error condition for most of the device. 7511 **/ 7512 uint16_t 7513 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7514 { 7515 struct pci_dev *pdev = phba->pcidev; 7516 uint16_t nr_virtfn; 7517 int pos; 7518 7519 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7520 if (pos == 0) 7521 return 0; 7522 7523 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7524 return nr_virtfn; 7525 } 7526 7527 /** 7528 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7529 * @phba: pointer to lpfc hba data structure. 7530 * @nr_vfn: number of virtual functions to be enabled. 7531 * 7532 * This function enables the PCI SR-IOV virtual functions to a physical 7533 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7534 * enable the number of virtual functions to the physical function. As 7535 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7536 * API call does not considered as an error condition for most of the device. 7537 **/ 7538 int 7539 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7540 { 7541 struct pci_dev *pdev = phba->pcidev; 7542 uint16_t max_nr_vfn; 7543 int rc; 7544 7545 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7546 if (nr_vfn > max_nr_vfn) { 7547 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7548 "3057 Requested vfs (%d) greater than " 7549 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7550 return -EINVAL; 7551 } 7552 7553 rc = pci_enable_sriov(pdev, nr_vfn); 7554 if (rc) { 7555 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7556 "2806 Failed to enable sriov on this device " 7557 "with vfn number nr_vf:%d, rc:%d\n", 7558 nr_vfn, rc); 7559 } else 7560 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7561 "2807 Successful enable sriov on this device " 7562 "with vfn number nr_vf:%d\n", nr_vfn); 7563 return rc; 7564 } 7565 7566 static void 7567 lpfc_unblock_requests_work(struct work_struct *work) 7568 { 7569 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7570 unblock_request_work); 7571 7572 lpfc_unblock_requests(phba); 7573 } 7574 7575 /** 7576 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7577 * @phba: pointer to lpfc hba data structure. 7578 * 7579 * This routine is invoked to set up the driver internal resources before the 7580 * device specific resource setup to support the HBA device it attached to. 7581 * 7582 * Return codes 7583 * 0 - successful 7584 * other values - error 7585 **/ 7586 static int 7587 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7588 { 7589 struct lpfc_sli *psli = &phba->sli; 7590 7591 /* 7592 * Driver resources common to all SLI revisions 7593 */ 7594 atomic_set(&phba->fast_event_count, 0); 7595 atomic_set(&phba->dbg_log_idx, 0); 7596 atomic_set(&phba->dbg_log_cnt, 0); 7597 atomic_set(&phba->dbg_log_dmping, 0); 7598 spin_lock_init(&phba->hbalock); 7599 7600 /* Initialize port_list spinlock */ 7601 spin_lock_init(&phba->port_list_lock); 7602 INIT_LIST_HEAD(&phba->port_list); 7603 7604 INIT_LIST_HEAD(&phba->work_list); 7605 init_waitqueue_head(&phba->wait_4_mlo_m_q); 7606 7607 /* Initialize the wait queue head for the kernel thread */ 7608 init_waitqueue_head(&phba->work_waitq); 7609 7610 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7611 "1403 Protocols supported %s %s %s\n", 7612 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7613 "SCSI" : " "), 7614 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7615 "NVME" : " "), 7616 (phba->nvmet_support ? "NVMET" : " ")); 7617 7618 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7619 spin_lock_init(&phba->scsi_buf_list_get_lock); 7620 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7621 spin_lock_init(&phba->scsi_buf_list_put_lock); 7622 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7623 7624 /* Initialize the fabric iocb list */ 7625 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7626 7627 /* Initialize list to save ELS buffers */ 7628 INIT_LIST_HEAD(&phba->elsbuf); 7629 7630 /* Initialize FCF connection rec list */ 7631 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7632 7633 /* Initialize OAS configuration list */ 7634 spin_lock_init(&phba->devicelock); 7635 INIT_LIST_HEAD(&phba->luns); 7636 7637 /* MBOX heartbeat timer */ 7638 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7639 /* Fabric block timer */ 7640 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7641 /* EA polling mode timer */ 7642 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7643 /* Heartbeat timer */ 7644 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7645 7646 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7647 7648 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7649 lpfc_idle_stat_delay_work); 7650 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7651 return 0; 7652 } 7653 7654 /** 7655 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7656 * @phba: pointer to lpfc hba data structure. 7657 * 7658 * This routine is invoked to set up the driver internal resources specific to 7659 * support the SLI-3 HBA device it attached to. 7660 * 7661 * Return codes 7662 * 0 - successful 7663 * other values - error 7664 **/ 7665 static int 7666 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7667 { 7668 int rc, entry_sz; 7669 7670 /* 7671 * Initialize timers used by driver 7672 */ 7673 7674 /* FCP polling mode timer */ 7675 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7676 7677 /* Host attention work mask setup */ 7678 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7679 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7680 7681 /* Get all the module params for configuring this host */ 7682 lpfc_get_cfgparam(phba); 7683 /* Set up phase-1 common device driver resources */ 7684 7685 rc = lpfc_setup_driver_resource_phase1(phba); 7686 if (rc) 7687 return -ENODEV; 7688 7689 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 7690 phba->menlo_flag |= HBA_MENLO_SUPPORT; 7691 /* check for menlo minimum sg count */ 7692 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 7693 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 7694 } 7695 7696 if (!phba->sli.sli3_ring) 7697 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7698 sizeof(struct lpfc_sli_ring), 7699 GFP_KERNEL); 7700 if (!phba->sli.sli3_ring) 7701 return -ENOMEM; 7702 7703 /* 7704 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7705 * used to create the sg_dma_buf_pool must be dynamically calculated. 7706 */ 7707 7708 if (phba->sli_rev == LPFC_SLI_REV4) 7709 entry_sz = sizeof(struct sli4_sge); 7710 else 7711 entry_sz = sizeof(struct ulp_bde64); 7712 7713 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7714 if (phba->cfg_enable_bg) { 7715 /* 7716 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7717 * the FCP rsp, and a BDE for each. Sice we have no control 7718 * over how many protection data segments the SCSI Layer 7719 * will hand us (ie: there could be one for every block 7720 * in the IO), we just allocate enough BDEs to accomidate 7721 * our max amount and we need to limit lpfc_sg_seg_cnt to 7722 * minimize the risk of running out. 7723 */ 7724 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7725 sizeof(struct fcp_rsp) + 7726 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7727 7728 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7729 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7730 7731 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7732 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7733 } else { 7734 /* 7735 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7736 * the FCP rsp, a BDE for each, and a BDE for up to 7737 * cfg_sg_seg_cnt data segments. 7738 */ 7739 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7740 sizeof(struct fcp_rsp) + 7741 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7742 7743 /* Total BDEs in BPL for scsi_sg_list */ 7744 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7745 } 7746 7747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7748 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7749 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7750 phba->cfg_total_seg_cnt); 7751 7752 phba->max_vpi = LPFC_MAX_VPI; 7753 /* This will be set to correct value after config_port mbox */ 7754 phba->max_vports = 0; 7755 7756 /* 7757 * Initialize the SLI Layer to run with lpfc HBAs. 7758 */ 7759 lpfc_sli_setup(phba); 7760 lpfc_sli_queue_init(phba); 7761 7762 /* Allocate device driver memory */ 7763 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7764 return -ENOMEM; 7765 7766 phba->lpfc_sg_dma_buf_pool = 7767 dma_pool_create("lpfc_sg_dma_buf_pool", 7768 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7769 BPL_ALIGN_SZ, 0); 7770 7771 if (!phba->lpfc_sg_dma_buf_pool) 7772 goto fail_free_mem; 7773 7774 phba->lpfc_cmd_rsp_buf_pool = 7775 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7776 &phba->pcidev->dev, 7777 sizeof(struct fcp_cmnd) + 7778 sizeof(struct fcp_rsp), 7779 BPL_ALIGN_SZ, 0); 7780 7781 if (!phba->lpfc_cmd_rsp_buf_pool) 7782 goto fail_free_dma_buf_pool; 7783 7784 /* 7785 * Enable sr-iov virtual functions if supported and configured 7786 * through the module parameter. 7787 */ 7788 if (phba->cfg_sriov_nr_virtfn > 0) { 7789 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7790 phba->cfg_sriov_nr_virtfn); 7791 if (rc) { 7792 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7793 "2808 Requested number of SR-IOV " 7794 "virtual functions (%d) is not " 7795 "supported\n", 7796 phba->cfg_sriov_nr_virtfn); 7797 phba->cfg_sriov_nr_virtfn = 0; 7798 } 7799 } 7800 7801 return 0; 7802 7803 fail_free_dma_buf_pool: 7804 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7805 phba->lpfc_sg_dma_buf_pool = NULL; 7806 fail_free_mem: 7807 lpfc_mem_free(phba); 7808 return -ENOMEM; 7809 } 7810 7811 /** 7812 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7813 * @phba: pointer to lpfc hba data structure. 7814 * 7815 * This routine is invoked to unset the driver internal resources set up 7816 * specific for supporting the SLI-3 HBA device it attached to. 7817 **/ 7818 static void 7819 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7820 { 7821 /* Free device driver memory allocated */ 7822 lpfc_mem_free_all(phba); 7823 7824 return; 7825 } 7826 7827 /** 7828 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7829 * @phba: pointer to lpfc hba data structure. 7830 * 7831 * This routine is invoked to set up the driver internal resources specific to 7832 * support the SLI-4 HBA device it attached to. 7833 * 7834 * Return codes 7835 * 0 - successful 7836 * other values - error 7837 **/ 7838 static int 7839 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7840 { 7841 LPFC_MBOXQ_t *mboxq; 7842 MAILBOX_t *mb; 7843 int rc, i, max_buf_size; 7844 int longs; 7845 int extra; 7846 uint64_t wwn; 7847 u32 if_type; 7848 u32 if_fam; 7849 7850 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7851 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7852 phba->sli4_hba.curr_disp_cpu = 0; 7853 7854 /* Get all the module params for configuring this host */ 7855 lpfc_get_cfgparam(phba); 7856 7857 /* Set up phase-1 common device driver resources */ 7858 rc = lpfc_setup_driver_resource_phase1(phba); 7859 if (rc) 7860 return -ENODEV; 7861 7862 /* Before proceed, wait for POST done and device ready */ 7863 rc = lpfc_sli4_post_status_check(phba); 7864 if (rc) 7865 return -ENODEV; 7866 7867 /* Allocate all driver workqueues here */ 7868 7869 /* The lpfc_wq workqueue for deferred irq use */ 7870 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7871 7872 /* 7873 * Initialize timers used by driver 7874 */ 7875 7876 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7877 7878 /* FCF rediscover timer */ 7879 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7880 7881 /* CMF congestion timer */ 7882 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7883 phba->cmf_timer.function = lpfc_cmf_timer; 7884 7885 /* 7886 * Control structure for handling external multi-buffer mailbox 7887 * command pass-through. 7888 */ 7889 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7890 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7891 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7892 7893 phba->max_vpi = LPFC_MAX_VPI; 7894 7895 /* This will be set to correct value after the read_config mbox */ 7896 phba->max_vports = 0; 7897 7898 /* Program the default value of vlan_id and fc_map */ 7899 phba->valid_vlan = 0; 7900 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7901 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7902 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7903 7904 /* 7905 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7906 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7907 * The WQ create will allocate the ring. 7908 */ 7909 7910 /* Initialize buffer queue management fields */ 7911 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 7912 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 7913 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 7914 7915 /* for VMID idle timeout if VMID is enabled */ 7916 if (lpfc_is_vmid_enabled(phba)) 7917 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 7918 7919 /* 7920 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 7921 */ 7922 /* Initialize the Abort buffer list used by driver */ 7923 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 7924 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 7925 7926 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7927 /* Initialize the Abort nvme buffer list used by driver */ 7928 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 7929 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7930 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 7931 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 7932 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 7933 } 7934 7935 /* This abort list used by worker thread */ 7936 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 7937 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 7938 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 7939 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 7940 7941 /* 7942 * Initialize driver internal slow-path work queues 7943 */ 7944 7945 /* Driver internel slow-path CQ Event pool */ 7946 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 7947 /* Response IOCB work queue list */ 7948 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 7949 /* Asynchronous event CQ Event work queue list */ 7950 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 7951 /* Slow-path XRI aborted CQ Event work queue list */ 7952 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 7953 /* Receive queue CQ Event work queue list */ 7954 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 7955 7956 /* Initialize extent block lists. */ 7957 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 7958 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 7959 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 7960 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 7961 7962 /* Initialize mboxq lists. If the early init routines fail 7963 * these lists need to be correctly initialized. 7964 */ 7965 INIT_LIST_HEAD(&phba->sli.mboxq); 7966 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 7967 7968 /* initialize optic_state to 0xFF */ 7969 phba->sli4_hba.lnk_info.optic_state = 0xff; 7970 7971 /* Allocate device driver memory */ 7972 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 7973 if (rc) 7974 return -ENOMEM; 7975 7976 /* IF Type 2 ports get initialized now. */ 7977 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 7978 LPFC_SLI_INTF_IF_TYPE_2) { 7979 rc = lpfc_pci_function_reset(phba); 7980 if (unlikely(rc)) { 7981 rc = -ENODEV; 7982 goto out_free_mem; 7983 } 7984 phba->temp_sensor_support = 1; 7985 } 7986 7987 /* Create the bootstrap mailbox command */ 7988 rc = lpfc_create_bootstrap_mbox(phba); 7989 if (unlikely(rc)) 7990 goto out_free_mem; 7991 7992 /* Set up the host's endian order with the device. */ 7993 rc = lpfc_setup_endian_order(phba); 7994 if (unlikely(rc)) 7995 goto out_free_bsmbx; 7996 7997 /* Set up the hba's configuration parameters. */ 7998 rc = lpfc_sli4_read_config(phba); 7999 if (unlikely(rc)) 8000 goto out_free_bsmbx; 8001 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 8002 if (unlikely(rc)) 8003 goto out_free_bsmbx; 8004 8005 /* IF Type 0 ports get initialized now. */ 8006 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8007 LPFC_SLI_INTF_IF_TYPE_0) { 8008 rc = lpfc_pci_function_reset(phba); 8009 if (unlikely(rc)) 8010 goto out_free_bsmbx; 8011 } 8012 8013 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8014 GFP_KERNEL); 8015 if (!mboxq) { 8016 rc = -ENOMEM; 8017 goto out_free_bsmbx; 8018 } 8019 8020 /* Check for NVMET being configured */ 8021 phba->nvmet_support = 0; 8022 if (lpfc_enable_nvmet_cnt) { 8023 8024 /* First get WWN of HBA instance */ 8025 lpfc_read_nv(phba, mboxq); 8026 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8027 if (rc != MBX_SUCCESS) { 8028 lpfc_printf_log(phba, KERN_ERR, 8029 LOG_TRACE_EVENT, 8030 "6016 Mailbox failed , mbxCmd x%x " 8031 "READ_NV, mbxStatus x%x\n", 8032 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8033 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 8034 mempool_free(mboxq, phba->mbox_mem_pool); 8035 rc = -EIO; 8036 goto out_free_bsmbx; 8037 } 8038 mb = &mboxq->u.mb; 8039 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 8040 sizeof(uint64_t)); 8041 wwn = cpu_to_be64(wwn); 8042 phba->sli4_hba.wwnn.u.name = wwn; 8043 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 8044 sizeof(uint64_t)); 8045 /* wwn is WWPN of HBA instance */ 8046 wwn = cpu_to_be64(wwn); 8047 phba->sli4_hba.wwpn.u.name = wwn; 8048 8049 /* Check to see if it matches any module parameter */ 8050 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 8051 if (wwn == lpfc_enable_nvmet[i]) { 8052 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 8053 if (lpfc_nvmet_mem_alloc(phba)) 8054 break; 8055 8056 phba->nvmet_support = 1; /* a match */ 8057 8058 lpfc_printf_log(phba, KERN_ERR, 8059 LOG_TRACE_EVENT, 8060 "6017 NVME Target %016llx\n", 8061 wwn); 8062 #else 8063 lpfc_printf_log(phba, KERN_ERR, 8064 LOG_TRACE_EVENT, 8065 "6021 Can't enable NVME Target." 8066 " NVME_TARGET_FC infrastructure" 8067 " is not in kernel\n"); 8068 #endif 8069 /* Not supported for NVMET */ 8070 phba->cfg_xri_rebalancing = 0; 8071 if (phba->irq_chann_mode == NHT_MODE) { 8072 phba->cfg_irq_chann = 8073 phba->sli4_hba.num_present_cpu; 8074 phba->cfg_hdw_queue = 8075 phba->sli4_hba.num_present_cpu; 8076 phba->irq_chann_mode = NORMAL_MODE; 8077 } 8078 break; 8079 } 8080 } 8081 } 8082 8083 lpfc_nvme_mod_param_dep(phba); 8084 8085 /* 8086 * Get sli4 parameters that override parameters from Port capabilities. 8087 * If this call fails, it isn't critical unless the SLI4 parameters come 8088 * back in conflict. 8089 */ 8090 rc = lpfc_get_sli4_parameters(phba, mboxq); 8091 if (rc) { 8092 if_type = bf_get(lpfc_sli_intf_if_type, 8093 &phba->sli4_hba.sli_intf); 8094 if_fam = bf_get(lpfc_sli_intf_sli_family, 8095 &phba->sli4_hba.sli_intf); 8096 if (phba->sli4_hba.extents_in_use && 8097 phba->sli4_hba.rpi_hdrs_in_use) { 8098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8099 "2999 Unsupported SLI4 Parameters " 8100 "Extents and RPI headers enabled.\n"); 8101 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8102 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8103 mempool_free(mboxq, phba->mbox_mem_pool); 8104 rc = -EIO; 8105 goto out_free_bsmbx; 8106 } 8107 } 8108 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8109 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8110 mempool_free(mboxq, phba->mbox_mem_pool); 8111 rc = -EIO; 8112 goto out_free_bsmbx; 8113 } 8114 } 8115 8116 /* 8117 * 1 for cmd, 1 for rsp, NVME adds an extra one 8118 * for boundary conditions in its max_sgl_segment template. 8119 */ 8120 extra = 2; 8121 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8122 extra++; 8123 8124 /* 8125 * It doesn't matter what family our adapter is in, we are 8126 * limited to 2 Pages, 512 SGEs, for our SGL. 8127 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8128 */ 8129 max_buf_size = (2 * SLI4_PAGE_SIZE); 8130 8131 /* 8132 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8133 * used to create the sg_dma_buf_pool must be calculated. 8134 */ 8135 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8136 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8137 8138 /* 8139 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8140 * the FCP rsp, and a SGE. Sice we have no control 8141 * over how many protection segments the SCSI Layer 8142 * will hand us (ie: there could be one for every block 8143 * in the IO), just allocate enough SGEs to accomidate 8144 * our max amount and we need to limit lpfc_sg_seg_cnt 8145 * to minimize the risk of running out. 8146 */ 8147 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8148 sizeof(struct fcp_rsp) + max_buf_size; 8149 8150 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8151 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8152 8153 /* 8154 * If supporting DIF, reduce the seg count for scsi to 8155 * allow room for the DIF sges. 8156 */ 8157 if (phba->cfg_enable_bg && 8158 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8159 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8160 else 8161 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8162 8163 } else { 8164 /* 8165 * The scsi_buf for a regular I/O holds the FCP cmnd, 8166 * the FCP rsp, a SGE for each, and a SGE for up to 8167 * cfg_sg_seg_cnt data segments. 8168 */ 8169 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8170 sizeof(struct fcp_rsp) + 8171 ((phba->cfg_sg_seg_cnt + extra) * 8172 sizeof(struct sli4_sge)); 8173 8174 /* Total SGEs for scsi_sg_list */ 8175 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8176 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8177 8178 /* 8179 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8180 * need to post 1 page for the SGL. 8181 */ 8182 } 8183 8184 if (phba->cfg_xpsgl && !phba->nvmet_support) 8185 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8186 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8187 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8188 else 8189 phba->cfg_sg_dma_buf_size = 8190 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8191 8192 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8193 sizeof(struct sli4_sge); 8194 8195 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8196 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8197 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8198 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8199 "6300 Reducing NVME sg segment " 8200 "cnt to %d\n", 8201 LPFC_MAX_NVME_SEG_CNT); 8202 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8203 } else 8204 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8205 } 8206 8207 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8208 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8209 "total:%d scsi:%d nvme:%d\n", 8210 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8211 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8212 phba->cfg_nvme_seg_cnt); 8213 8214 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8215 i = phba->cfg_sg_dma_buf_size; 8216 else 8217 i = SLI4_PAGE_SIZE; 8218 8219 phba->lpfc_sg_dma_buf_pool = 8220 dma_pool_create("lpfc_sg_dma_buf_pool", 8221 &phba->pcidev->dev, 8222 phba->cfg_sg_dma_buf_size, 8223 i, 0); 8224 if (!phba->lpfc_sg_dma_buf_pool) 8225 goto out_free_bsmbx; 8226 8227 phba->lpfc_cmd_rsp_buf_pool = 8228 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8229 &phba->pcidev->dev, 8230 sizeof(struct fcp_cmnd) + 8231 sizeof(struct fcp_rsp), 8232 i, 0); 8233 if (!phba->lpfc_cmd_rsp_buf_pool) 8234 goto out_free_sg_dma_buf; 8235 8236 mempool_free(mboxq, phba->mbox_mem_pool); 8237 8238 /* Verify OAS is supported */ 8239 lpfc_sli4_oas_verify(phba); 8240 8241 /* Verify RAS support on adapter */ 8242 lpfc_sli4_ras_init(phba); 8243 8244 /* Verify all the SLI4 queues */ 8245 rc = lpfc_sli4_queue_verify(phba); 8246 if (rc) 8247 goto out_free_cmd_rsp_buf; 8248 8249 /* Create driver internal CQE event pool */ 8250 rc = lpfc_sli4_cq_event_pool_create(phba); 8251 if (rc) 8252 goto out_free_cmd_rsp_buf; 8253 8254 /* Initialize sgl lists per host */ 8255 lpfc_init_sgl_list(phba); 8256 8257 /* Allocate and initialize active sgl array */ 8258 rc = lpfc_init_active_sgl_array(phba); 8259 if (rc) { 8260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8261 "1430 Failed to initialize sgl list.\n"); 8262 goto out_destroy_cq_event_pool; 8263 } 8264 rc = lpfc_sli4_init_rpi_hdrs(phba); 8265 if (rc) { 8266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8267 "1432 Failed to initialize rpi headers.\n"); 8268 goto out_free_active_sgl; 8269 } 8270 8271 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8272 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8273 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8274 GFP_KERNEL); 8275 if (!phba->fcf.fcf_rr_bmask) { 8276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8277 "2759 Failed allocate memory for FCF round " 8278 "robin failover bmask\n"); 8279 rc = -ENOMEM; 8280 goto out_remove_rpi_hdrs; 8281 } 8282 8283 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8284 sizeof(struct lpfc_hba_eq_hdl), 8285 GFP_KERNEL); 8286 if (!phba->sli4_hba.hba_eq_hdl) { 8287 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8288 "2572 Failed allocate memory for " 8289 "fast-path per-EQ handle array\n"); 8290 rc = -ENOMEM; 8291 goto out_free_fcf_rr_bmask; 8292 } 8293 8294 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8295 sizeof(struct lpfc_vector_map_info), 8296 GFP_KERNEL); 8297 if (!phba->sli4_hba.cpu_map) { 8298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8299 "3327 Failed allocate memory for msi-x " 8300 "interrupt vector mapping\n"); 8301 rc = -ENOMEM; 8302 goto out_free_hba_eq_hdl; 8303 } 8304 8305 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8306 if (!phba->sli4_hba.eq_info) { 8307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8308 "3321 Failed allocation for per_cpu stats\n"); 8309 rc = -ENOMEM; 8310 goto out_free_hba_cpu_map; 8311 } 8312 8313 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8314 sizeof(*phba->sli4_hba.idle_stat), 8315 GFP_KERNEL); 8316 if (!phba->sli4_hba.idle_stat) { 8317 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8318 "3390 Failed allocation for idle_stat\n"); 8319 rc = -ENOMEM; 8320 goto out_free_hba_eq_info; 8321 } 8322 8323 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8324 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8325 if (!phba->sli4_hba.c_stat) { 8326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8327 "3332 Failed allocating per cpu hdwq stats\n"); 8328 rc = -ENOMEM; 8329 goto out_free_hba_idle_stat; 8330 } 8331 #endif 8332 8333 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8334 if (!phba->cmf_stat) { 8335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8336 "3331 Failed allocating per cpu cgn stats\n"); 8337 rc = -ENOMEM; 8338 goto out_free_hba_hdwq_info; 8339 } 8340 8341 /* 8342 * Enable sr-iov virtual functions if supported and configured 8343 * through the module parameter. 8344 */ 8345 if (phba->cfg_sriov_nr_virtfn > 0) { 8346 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8347 phba->cfg_sriov_nr_virtfn); 8348 if (rc) { 8349 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8350 "3020 Requested number of SR-IOV " 8351 "virtual functions (%d) is not " 8352 "supported\n", 8353 phba->cfg_sriov_nr_virtfn); 8354 phba->cfg_sriov_nr_virtfn = 0; 8355 } 8356 } 8357 8358 return 0; 8359 8360 out_free_hba_hdwq_info: 8361 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8362 free_percpu(phba->sli4_hba.c_stat); 8363 out_free_hba_idle_stat: 8364 #endif 8365 kfree(phba->sli4_hba.idle_stat); 8366 out_free_hba_eq_info: 8367 free_percpu(phba->sli4_hba.eq_info); 8368 out_free_hba_cpu_map: 8369 kfree(phba->sli4_hba.cpu_map); 8370 out_free_hba_eq_hdl: 8371 kfree(phba->sli4_hba.hba_eq_hdl); 8372 out_free_fcf_rr_bmask: 8373 kfree(phba->fcf.fcf_rr_bmask); 8374 out_remove_rpi_hdrs: 8375 lpfc_sli4_remove_rpi_hdrs(phba); 8376 out_free_active_sgl: 8377 lpfc_free_active_sgl(phba); 8378 out_destroy_cq_event_pool: 8379 lpfc_sli4_cq_event_pool_destroy(phba); 8380 out_free_cmd_rsp_buf: 8381 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8382 phba->lpfc_cmd_rsp_buf_pool = NULL; 8383 out_free_sg_dma_buf: 8384 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8385 phba->lpfc_sg_dma_buf_pool = NULL; 8386 out_free_bsmbx: 8387 lpfc_destroy_bootstrap_mbox(phba); 8388 out_free_mem: 8389 lpfc_mem_free(phba); 8390 return rc; 8391 } 8392 8393 /** 8394 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8395 * @phba: pointer to lpfc hba data structure. 8396 * 8397 * This routine is invoked to unset the driver internal resources set up 8398 * specific for supporting the SLI-4 HBA device it attached to. 8399 **/ 8400 static void 8401 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8402 { 8403 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8404 8405 free_percpu(phba->sli4_hba.eq_info); 8406 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8407 free_percpu(phba->sli4_hba.c_stat); 8408 #endif 8409 free_percpu(phba->cmf_stat); 8410 kfree(phba->sli4_hba.idle_stat); 8411 8412 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8413 kfree(phba->sli4_hba.cpu_map); 8414 phba->sli4_hba.num_possible_cpu = 0; 8415 phba->sli4_hba.num_present_cpu = 0; 8416 phba->sli4_hba.curr_disp_cpu = 0; 8417 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8418 8419 /* Free memory allocated for fast-path work queue handles */ 8420 kfree(phba->sli4_hba.hba_eq_hdl); 8421 8422 /* Free the allocated rpi headers. */ 8423 lpfc_sli4_remove_rpi_hdrs(phba); 8424 lpfc_sli4_remove_rpis(phba); 8425 8426 /* Free eligible FCF index bmask */ 8427 kfree(phba->fcf.fcf_rr_bmask); 8428 8429 /* Free the ELS sgl list */ 8430 lpfc_free_active_sgl(phba); 8431 lpfc_free_els_sgl_list(phba); 8432 lpfc_free_nvmet_sgl_list(phba); 8433 8434 /* Free the completion queue EQ event pool */ 8435 lpfc_sli4_cq_event_release_all(phba); 8436 lpfc_sli4_cq_event_pool_destroy(phba); 8437 8438 /* Release resource identifiers. */ 8439 lpfc_sli4_dealloc_resource_identifiers(phba); 8440 8441 /* Free the bsmbx region. */ 8442 lpfc_destroy_bootstrap_mbox(phba); 8443 8444 /* Free the SLI Layer memory with SLI4 HBAs */ 8445 lpfc_mem_free_all(phba); 8446 8447 /* Free the current connect table */ 8448 list_for_each_entry_safe(conn_entry, next_conn_entry, 8449 &phba->fcf_conn_rec_list, list) { 8450 list_del_init(&conn_entry->list); 8451 kfree(conn_entry); 8452 } 8453 8454 return; 8455 } 8456 8457 /** 8458 * lpfc_init_api_table_setup - Set up init api function jump table 8459 * @phba: The hba struct for which this call is being executed. 8460 * @dev_grp: The HBA PCI-Device group number. 8461 * 8462 * This routine sets up the device INIT interface API function jump table 8463 * in @phba struct. 8464 * 8465 * Returns: 0 - success, -ENODEV - failure. 8466 **/ 8467 int 8468 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8469 { 8470 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8471 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8472 phba->lpfc_selective_reset = lpfc_selective_reset; 8473 switch (dev_grp) { 8474 case LPFC_PCI_DEV_LP: 8475 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8476 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8477 phba->lpfc_stop_port = lpfc_stop_port_s3; 8478 break; 8479 case LPFC_PCI_DEV_OC: 8480 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8481 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8482 phba->lpfc_stop_port = lpfc_stop_port_s4; 8483 break; 8484 default: 8485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8486 "1431 Invalid HBA PCI-device group: 0x%x\n", 8487 dev_grp); 8488 return -ENODEV; 8489 } 8490 return 0; 8491 } 8492 8493 /** 8494 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8495 * @phba: pointer to lpfc hba data structure. 8496 * 8497 * This routine is invoked to set up the driver internal resources after the 8498 * device specific resource setup to support the HBA device it attached to. 8499 * 8500 * Return codes 8501 * 0 - successful 8502 * other values - error 8503 **/ 8504 static int 8505 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8506 { 8507 int error; 8508 8509 /* Startup the kernel thread for this host adapter. */ 8510 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8511 "lpfc_worker_%d", phba->brd_no); 8512 if (IS_ERR(phba->worker_thread)) { 8513 error = PTR_ERR(phba->worker_thread); 8514 return error; 8515 } 8516 8517 return 0; 8518 } 8519 8520 /** 8521 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8522 * @phba: pointer to lpfc hba data structure. 8523 * 8524 * This routine is invoked to unset the driver internal resources set up after 8525 * the device specific resource setup for supporting the HBA device it 8526 * attached to. 8527 **/ 8528 static void 8529 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8530 { 8531 if (phba->wq) { 8532 flush_workqueue(phba->wq); 8533 destroy_workqueue(phba->wq); 8534 phba->wq = NULL; 8535 } 8536 8537 /* Stop kernel worker thread */ 8538 if (phba->worker_thread) 8539 kthread_stop(phba->worker_thread); 8540 } 8541 8542 /** 8543 * lpfc_free_iocb_list - Free iocb list. 8544 * @phba: pointer to lpfc hba data structure. 8545 * 8546 * This routine is invoked to free the driver's IOCB list and memory. 8547 **/ 8548 void 8549 lpfc_free_iocb_list(struct lpfc_hba *phba) 8550 { 8551 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8552 8553 spin_lock_irq(&phba->hbalock); 8554 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8555 &phba->lpfc_iocb_list, list) { 8556 list_del(&iocbq_entry->list); 8557 kfree(iocbq_entry); 8558 phba->total_iocbq_bufs--; 8559 } 8560 spin_unlock_irq(&phba->hbalock); 8561 8562 return; 8563 } 8564 8565 /** 8566 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8567 * @phba: pointer to lpfc hba data structure. 8568 * @iocb_count: number of requested iocbs 8569 * 8570 * This routine is invoked to allocate and initizlize the driver's IOCB 8571 * list and set up the IOCB tag array accordingly. 8572 * 8573 * Return codes 8574 * 0 - successful 8575 * other values - error 8576 **/ 8577 int 8578 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8579 { 8580 struct lpfc_iocbq *iocbq_entry = NULL; 8581 uint16_t iotag; 8582 int i; 8583 8584 /* Initialize and populate the iocb list per host. */ 8585 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8586 for (i = 0; i < iocb_count; i++) { 8587 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8588 if (iocbq_entry == NULL) { 8589 printk(KERN_ERR "%s: only allocated %d iocbs of " 8590 "expected %d count. Unloading driver.\n", 8591 __func__, i, iocb_count); 8592 goto out_free_iocbq; 8593 } 8594 8595 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8596 if (iotag == 0) { 8597 kfree(iocbq_entry); 8598 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8599 "Unloading driver.\n", __func__); 8600 goto out_free_iocbq; 8601 } 8602 iocbq_entry->sli4_lxritag = NO_XRI; 8603 iocbq_entry->sli4_xritag = NO_XRI; 8604 8605 spin_lock_irq(&phba->hbalock); 8606 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8607 phba->total_iocbq_bufs++; 8608 spin_unlock_irq(&phba->hbalock); 8609 } 8610 8611 return 0; 8612 8613 out_free_iocbq: 8614 lpfc_free_iocb_list(phba); 8615 8616 return -ENOMEM; 8617 } 8618 8619 /** 8620 * lpfc_free_sgl_list - Free a given sgl list. 8621 * @phba: pointer to lpfc hba data structure. 8622 * @sglq_list: pointer to the head of sgl list. 8623 * 8624 * This routine is invoked to free a give sgl list and memory. 8625 **/ 8626 void 8627 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8628 { 8629 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8630 8631 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8632 list_del(&sglq_entry->list); 8633 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8634 kfree(sglq_entry); 8635 } 8636 } 8637 8638 /** 8639 * lpfc_free_els_sgl_list - Free els sgl list. 8640 * @phba: pointer to lpfc hba data structure. 8641 * 8642 * This routine is invoked to free the driver's els sgl list and memory. 8643 **/ 8644 static void 8645 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8646 { 8647 LIST_HEAD(sglq_list); 8648 8649 /* Retrieve all els sgls from driver list */ 8650 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8651 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8652 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8653 8654 /* Now free the sgl list */ 8655 lpfc_free_sgl_list(phba, &sglq_list); 8656 } 8657 8658 /** 8659 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8660 * @phba: pointer to lpfc hba data structure. 8661 * 8662 * This routine is invoked to free the driver's nvmet sgl list and memory. 8663 **/ 8664 static void 8665 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8666 { 8667 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8668 LIST_HEAD(sglq_list); 8669 8670 /* Retrieve all nvmet sgls from driver list */ 8671 spin_lock_irq(&phba->hbalock); 8672 spin_lock(&phba->sli4_hba.sgl_list_lock); 8673 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8674 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8675 spin_unlock_irq(&phba->hbalock); 8676 8677 /* Now free the sgl list */ 8678 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8679 list_del(&sglq_entry->list); 8680 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8681 kfree(sglq_entry); 8682 } 8683 8684 /* Update the nvmet_xri_cnt to reflect no current sgls. 8685 * The next initialization cycle sets the count and allocates 8686 * the sgls over again. 8687 */ 8688 phba->sli4_hba.nvmet_xri_cnt = 0; 8689 } 8690 8691 /** 8692 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8693 * @phba: pointer to lpfc hba data structure. 8694 * 8695 * This routine is invoked to allocate the driver's active sgl memory. 8696 * This array will hold the sglq_entry's for active IOs. 8697 **/ 8698 static int 8699 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8700 { 8701 int size; 8702 size = sizeof(struct lpfc_sglq *); 8703 size *= phba->sli4_hba.max_cfg_param.max_xri; 8704 8705 phba->sli4_hba.lpfc_sglq_active_list = 8706 kzalloc(size, GFP_KERNEL); 8707 if (!phba->sli4_hba.lpfc_sglq_active_list) 8708 return -ENOMEM; 8709 return 0; 8710 } 8711 8712 /** 8713 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8714 * @phba: pointer to lpfc hba data structure. 8715 * 8716 * This routine is invoked to walk through the array of active sglq entries 8717 * and free all of the resources. 8718 * This is just a place holder for now. 8719 **/ 8720 static void 8721 lpfc_free_active_sgl(struct lpfc_hba *phba) 8722 { 8723 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8724 } 8725 8726 /** 8727 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8728 * @phba: pointer to lpfc hba data structure. 8729 * 8730 * This routine is invoked to allocate and initizlize the driver's sgl 8731 * list and set up the sgl xritag tag array accordingly. 8732 * 8733 **/ 8734 static void 8735 lpfc_init_sgl_list(struct lpfc_hba *phba) 8736 { 8737 /* Initialize and populate the sglq list per host/VF. */ 8738 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8739 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8740 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8741 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8742 8743 /* els xri-sgl book keeping */ 8744 phba->sli4_hba.els_xri_cnt = 0; 8745 8746 /* nvme xri-buffer book keeping */ 8747 phba->sli4_hba.io_xri_cnt = 0; 8748 } 8749 8750 /** 8751 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8752 * @phba: pointer to lpfc hba data structure. 8753 * 8754 * This routine is invoked to post rpi header templates to the 8755 * port for those SLI4 ports that do not support extents. This routine 8756 * posts a PAGE_SIZE memory region to the port to hold up to 8757 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8758 * and should be called only when interrupts are disabled. 8759 * 8760 * Return codes 8761 * 0 - successful 8762 * -ERROR - otherwise. 8763 **/ 8764 int 8765 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8766 { 8767 int rc = 0; 8768 struct lpfc_rpi_hdr *rpi_hdr; 8769 8770 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8771 if (!phba->sli4_hba.rpi_hdrs_in_use) 8772 return rc; 8773 if (phba->sli4_hba.extents_in_use) 8774 return -EIO; 8775 8776 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8777 if (!rpi_hdr) { 8778 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8779 "0391 Error during rpi post operation\n"); 8780 lpfc_sli4_remove_rpis(phba); 8781 rc = -ENODEV; 8782 } 8783 8784 return rc; 8785 } 8786 8787 /** 8788 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8789 * @phba: pointer to lpfc hba data structure. 8790 * 8791 * This routine is invoked to allocate a single 4KB memory region to 8792 * support rpis and stores them in the phba. This single region 8793 * provides support for up to 64 rpis. The region is used globally 8794 * by the device. 8795 * 8796 * Returns: 8797 * A valid rpi hdr on success. 8798 * A NULL pointer on any failure. 8799 **/ 8800 struct lpfc_rpi_hdr * 8801 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8802 { 8803 uint16_t rpi_limit, curr_rpi_range; 8804 struct lpfc_dmabuf *dmabuf; 8805 struct lpfc_rpi_hdr *rpi_hdr; 8806 8807 /* 8808 * If the SLI4 port supports extents, posting the rpi header isn't 8809 * required. Set the expected maximum count and let the actual value 8810 * get set when extents are fully allocated. 8811 */ 8812 if (!phba->sli4_hba.rpi_hdrs_in_use) 8813 return NULL; 8814 if (phba->sli4_hba.extents_in_use) 8815 return NULL; 8816 8817 /* The limit on the logical index is just the max_rpi count. */ 8818 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8819 8820 spin_lock_irq(&phba->hbalock); 8821 /* 8822 * Establish the starting RPI in this header block. The starting 8823 * rpi is normalized to a zero base because the physical rpi is 8824 * port based. 8825 */ 8826 curr_rpi_range = phba->sli4_hba.next_rpi; 8827 spin_unlock_irq(&phba->hbalock); 8828 8829 /* Reached full RPI range */ 8830 if (curr_rpi_range == rpi_limit) 8831 return NULL; 8832 8833 /* 8834 * First allocate the protocol header region for the port. The 8835 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8836 */ 8837 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8838 if (!dmabuf) 8839 return NULL; 8840 8841 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8842 LPFC_HDR_TEMPLATE_SIZE, 8843 &dmabuf->phys, GFP_KERNEL); 8844 if (!dmabuf->virt) { 8845 rpi_hdr = NULL; 8846 goto err_free_dmabuf; 8847 } 8848 8849 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8850 rpi_hdr = NULL; 8851 goto err_free_coherent; 8852 } 8853 8854 /* Save the rpi header data for cleanup later. */ 8855 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8856 if (!rpi_hdr) 8857 goto err_free_coherent; 8858 8859 rpi_hdr->dmabuf = dmabuf; 8860 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8861 rpi_hdr->page_count = 1; 8862 spin_lock_irq(&phba->hbalock); 8863 8864 /* The rpi_hdr stores the logical index only. */ 8865 rpi_hdr->start_rpi = curr_rpi_range; 8866 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8867 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8868 8869 spin_unlock_irq(&phba->hbalock); 8870 return rpi_hdr; 8871 8872 err_free_coherent: 8873 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8874 dmabuf->virt, dmabuf->phys); 8875 err_free_dmabuf: 8876 kfree(dmabuf); 8877 return NULL; 8878 } 8879 8880 /** 8881 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8882 * @phba: pointer to lpfc hba data structure. 8883 * 8884 * This routine is invoked to remove all memory resources allocated 8885 * to support rpis for SLI4 ports not supporting extents. This routine 8886 * presumes the caller has released all rpis consumed by fabric or port 8887 * logins and is prepared to have the header pages removed. 8888 **/ 8889 void 8890 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 8891 { 8892 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 8893 8894 if (!phba->sli4_hba.rpi_hdrs_in_use) 8895 goto exit; 8896 8897 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 8898 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 8899 list_del(&rpi_hdr->list); 8900 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 8901 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 8902 kfree(rpi_hdr->dmabuf); 8903 kfree(rpi_hdr); 8904 } 8905 exit: 8906 /* There are no rpis available to the port now. */ 8907 phba->sli4_hba.next_rpi = 0; 8908 } 8909 8910 /** 8911 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 8912 * @pdev: pointer to pci device data structure. 8913 * 8914 * This routine is invoked to allocate the driver hba data structure for an 8915 * HBA device. If the allocation is successful, the phba reference to the 8916 * PCI device data structure is set. 8917 * 8918 * Return codes 8919 * pointer to @phba - successful 8920 * NULL - error 8921 **/ 8922 static struct lpfc_hba * 8923 lpfc_hba_alloc(struct pci_dev *pdev) 8924 { 8925 struct lpfc_hba *phba; 8926 8927 /* Allocate memory for HBA structure */ 8928 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 8929 if (!phba) { 8930 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 8931 return NULL; 8932 } 8933 8934 /* Set reference to PCI device in HBA structure */ 8935 phba->pcidev = pdev; 8936 8937 /* Assign an unused board number */ 8938 phba->brd_no = lpfc_get_instance(); 8939 if (phba->brd_no < 0) { 8940 kfree(phba); 8941 return NULL; 8942 } 8943 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 8944 8945 spin_lock_init(&phba->ct_ev_lock); 8946 INIT_LIST_HEAD(&phba->ct_ev_waiters); 8947 8948 return phba; 8949 } 8950 8951 /** 8952 * lpfc_hba_free - Free driver hba data structure with a device. 8953 * @phba: pointer to lpfc hba data structure. 8954 * 8955 * This routine is invoked to free the driver hba data structure with an 8956 * HBA device. 8957 **/ 8958 static void 8959 lpfc_hba_free(struct lpfc_hba *phba) 8960 { 8961 if (phba->sli_rev == LPFC_SLI_REV4) 8962 kfree(phba->sli4_hba.hdwq); 8963 8964 /* Release the driver assigned board number */ 8965 idr_remove(&lpfc_hba_index, phba->brd_no); 8966 8967 /* Free memory allocated with sli3 rings */ 8968 kfree(phba->sli.sli3_ring); 8969 phba->sli.sli3_ring = NULL; 8970 8971 kfree(phba); 8972 return; 8973 } 8974 8975 /** 8976 * lpfc_create_shost - Create hba physical port with associated scsi host. 8977 * @phba: pointer to lpfc hba data structure. 8978 * 8979 * This routine is invoked to create HBA physical port and associate a SCSI 8980 * host with it. 8981 * 8982 * Return codes 8983 * 0 - successful 8984 * other values - error 8985 **/ 8986 static int 8987 lpfc_create_shost(struct lpfc_hba *phba) 8988 { 8989 struct lpfc_vport *vport; 8990 struct Scsi_Host *shost; 8991 8992 /* Initialize HBA FC structure */ 8993 phba->fc_edtov = FF_DEF_EDTOV; 8994 phba->fc_ratov = FF_DEF_RATOV; 8995 phba->fc_altov = FF_DEF_ALTOV; 8996 phba->fc_arbtov = FF_DEF_ARBTOV; 8997 8998 atomic_set(&phba->sdev_cnt, 0); 8999 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 9000 if (!vport) 9001 return -ENODEV; 9002 9003 shost = lpfc_shost_from_vport(vport); 9004 phba->pport = vport; 9005 9006 if (phba->nvmet_support) { 9007 /* Only 1 vport (pport) will support NVME target */ 9008 phba->targetport = NULL; 9009 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 9010 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 9011 "6076 NVME Target Found\n"); 9012 } 9013 9014 lpfc_debugfs_initialize(vport); 9015 /* Put reference to SCSI host to driver's device private data */ 9016 pci_set_drvdata(phba->pcidev, shost); 9017 9018 /* 9019 * At this point we are fully registered with PSA. In addition, 9020 * any initial discovery should be completed. 9021 */ 9022 vport->load_flag |= FC_ALLOW_FDMI; 9023 if (phba->cfg_enable_SmartSAN || 9024 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 9025 9026 /* Setup appropriate attribute masks */ 9027 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 9028 if (phba->cfg_enable_SmartSAN) 9029 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 9030 else 9031 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 9032 } 9033 return 0; 9034 } 9035 9036 /** 9037 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 9038 * @phba: pointer to lpfc hba data structure. 9039 * 9040 * This routine is invoked to destroy HBA physical port and the associated 9041 * SCSI host. 9042 **/ 9043 static void 9044 lpfc_destroy_shost(struct lpfc_hba *phba) 9045 { 9046 struct lpfc_vport *vport = phba->pport; 9047 9048 /* Destroy physical port that associated with the SCSI host */ 9049 destroy_port(vport); 9050 9051 return; 9052 } 9053 9054 /** 9055 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9056 * @phba: pointer to lpfc hba data structure. 9057 * @shost: the shost to be used to detect Block guard settings. 9058 * 9059 * This routine sets up the local Block guard protocol settings for @shost. 9060 * This routine also allocates memory for debugging bg buffers. 9061 **/ 9062 static void 9063 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9064 { 9065 uint32_t old_mask; 9066 uint32_t old_guard; 9067 9068 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9070 "1478 Registering BlockGuard with the " 9071 "SCSI layer\n"); 9072 9073 old_mask = phba->cfg_prot_mask; 9074 old_guard = phba->cfg_prot_guard; 9075 9076 /* Only allow supported values */ 9077 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9078 SHOST_DIX_TYPE0_PROTECTION | 9079 SHOST_DIX_TYPE1_PROTECTION); 9080 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9081 SHOST_DIX_GUARD_CRC); 9082 9083 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9084 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9085 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9086 9087 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9088 if ((old_mask != phba->cfg_prot_mask) || 9089 (old_guard != phba->cfg_prot_guard)) 9090 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9091 "1475 Registering BlockGuard with the " 9092 "SCSI layer: mask %d guard %d\n", 9093 phba->cfg_prot_mask, 9094 phba->cfg_prot_guard); 9095 9096 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9097 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9098 } else 9099 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9100 "1479 Not Registering BlockGuard with the SCSI " 9101 "layer, Bad protection parameters: %d %d\n", 9102 old_mask, old_guard); 9103 } 9104 } 9105 9106 /** 9107 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9108 * @phba: pointer to lpfc hba data structure. 9109 * 9110 * This routine is invoked to perform all the necessary post initialization 9111 * setup for the device. 9112 **/ 9113 static void 9114 lpfc_post_init_setup(struct lpfc_hba *phba) 9115 { 9116 struct Scsi_Host *shost; 9117 struct lpfc_adapter_event_header adapter_event; 9118 9119 /* Get the default values for Model Name and Description */ 9120 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9121 9122 /* 9123 * hba setup may have changed the hba_queue_depth so we need to 9124 * adjust the value of can_queue. 9125 */ 9126 shost = pci_get_drvdata(phba->pcidev); 9127 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9128 9129 lpfc_host_attrib_init(shost); 9130 9131 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9132 spin_lock_irq(shost->host_lock); 9133 lpfc_poll_start_timer(phba); 9134 spin_unlock_irq(shost->host_lock); 9135 } 9136 9137 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9138 "0428 Perform SCSI scan\n"); 9139 /* Send board arrival event to upper layer */ 9140 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9141 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9142 fc_host_post_vendor_event(shost, fc_get_event_number(), 9143 sizeof(adapter_event), 9144 (char *) &adapter_event, 9145 LPFC_NL_VENDOR_ID); 9146 return; 9147 } 9148 9149 /** 9150 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9151 * @phba: pointer to lpfc hba data structure. 9152 * 9153 * This routine is invoked to set up the PCI device memory space for device 9154 * with SLI-3 interface spec. 9155 * 9156 * Return codes 9157 * 0 - successful 9158 * other values - error 9159 **/ 9160 static int 9161 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9162 { 9163 struct pci_dev *pdev = phba->pcidev; 9164 unsigned long bar0map_len, bar2map_len; 9165 int i, hbq_count; 9166 void *ptr; 9167 int error; 9168 9169 if (!pdev) 9170 return -ENODEV; 9171 9172 /* Set the device DMA mask size */ 9173 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9174 if (error) 9175 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9176 if (error) 9177 return error; 9178 error = -ENODEV; 9179 9180 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9181 * required by each mapping. 9182 */ 9183 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9184 bar0map_len = pci_resource_len(pdev, 0); 9185 9186 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9187 bar2map_len = pci_resource_len(pdev, 2); 9188 9189 /* Map HBA SLIM to a kernel virtual address. */ 9190 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9191 if (!phba->slim_memmap_p) { 9192 dev_printk(KERN_ERR, &pdev->dev, 9193 "ioremap failed for SLIM memory.\n"); 9194 goto out; 9195 } 9196 9197 /* Map HBA Control Registers to a kernel virtual address. */ 9198 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9199 if (!phba->ctrl_regs_memmap_p) { 9200 dev_printk(KERN_ERR, &pdev->dev, 9201 "ioremap failed for HBA control registers.\n"); 9202 goto out_iounmap_slim; 9203 } 9204 9205 /* Allocate memory for SLI-2 structures */ 9206 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9207 &phba->slim2p.phys, GFP_KERNEL); 9208 if (!phba->slim2p.virt) 9209 goto out_iounmap; 9210 9211 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9212 phba->mbox_ext = (phba->slim2p.virt + 9213 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9214 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9215 phba->IOCBs = (phba->slim2p.virt + 9216 offsetof(struct lpfc_sli2_slim, IOCBs)); 9217 9218 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9219 lpfc_sli_hbq_size(), 9220 &phba->hbqslimp.phys, 9221 GFP_KERNEL); 9222 if (!phba->hbqslimp.virt) 9223 goto out_free_slim; 9224 9225 hbq_count = lpfc_sli_hbq_count(); 9226 ptr = phba->hbqslimp.virt; 9227 for (i = 0; i < hbq_count; ++i) { 9228 phba->hbqs[i].hbq_virt = ptr; 9229 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9230 ptr += (lpfc_hbq_defs[i]->entry_count * 9231 sizeof(struct lpfc_hbq_entry)); 9232 } 9233 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9234 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9235 9236 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9237 9238 phba->MBslimaddr = phba->slim_memmap_p; 9239 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9240 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9241 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9242 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9243 9244 return 0; 9245 9246 out_free_slim: 9247 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9248 phba->slim2p.virt, phba->slim2p.phys); 9249 out_iounmap: 9250 iounmap(phba->ctrl_regs_memmap_p); 9251 out_iounmap_slim: 9252 iounmap(phba->slim_memmap_p); 9253 out: 9254 return error; 9255 } 9256 9257 /** 9258 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9259 * @phba: pointer to lpfc hba data structure. 9260 * 9261 * This routine is invoked to unset the PCI device memory space for device 9262 * with SLI-3 interface spec. 9263 **/ 9264 static void 9265 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9266 { 9267 struct pci_dev *pdev; 9268 9269 /* Obtain PCI device reference */ 9270 if (!phba->pcidev) 9271 return; 9272 else 9273 pdev = phba->pcidev; 9274 9275 /* Free coherent DMA memory allocated */ 9276 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9277 phba->hbqslimp.virt, phba->hbqslimp.phys); 9278 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9279 phba->slim2p.virt, phba->slim2p.phys); 9280 9281 /* I/O memory unmap */ 9282 iounmap(phba->ctrl_regs_memmap_p); 9283 iounmap(phba->slim_memmap_p); 9284 9285 return; 9286 } 9287 9288 /** 9289 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9290 * @phba: pointer to lpfc hba data structure. 9291 * 9292 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9293 * done and check status. 9294 * 9295 * Return 0 if successful, otherwise -ENODEV. 9296 **/ 9297 int 9298 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9299 { 9300 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9301 struct lpfc_register reg_data; 9302 int i, port_error = 0; 9303 uint32_t if_type; 9304 9305 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9306 memset(®_data, 0, sizeof(reg_data)); 9307 if (!phba->sli4_hba.PSMPHRregaddr) 9308 return -ENODEV; 9309 9310 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9311 for (i = 0; i < 3000; i++) { 9312 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9313 &portsmphr_reg.word0) || 9314 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9315 /* Port has a fatal POST error, break out */ 9316 port_error = -ENODEV; 9317 break; 9318 } 9319 if (LPFC_POST_STAGE_PORT_READY == 9320 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9321 break; 9322 msleep(10); 9323 } 9324 9325 /* 9326 * If there was a port error during POST, then don't proceed with 9327 * other register reads as the data may not be valid. Just exit. 9328 */ 9329 if (port_error) { 9330 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9331 "1408 Port Failed POST - portsmphr=0x%x, " 9332 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9333 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9334 portsmphr_reg.word0, 9335 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9336 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9337 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9338 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9339 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9340 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9341 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9342 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9343 } else { 9344 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9345 "2534 Device Info: SLIFamily=0x%x, " 9346 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9347 "SLIHint_2=0x%x, FT=0x%x\n", 9348 bf_get(lpfc_sli_intf_sli_family, 9349 &phba->sli4_hba.sli_intf), 9350 bf_get(lpfc_sli_intf_slirev, 9351 &phba->sli4_hba.sli_intf), 9352 bf_get(lpfc_sli_intf_if_type, 9353 &phba->sli4_hba.sli_intf), 9354 bf_get(lpfc_sli_intf_sli_hint1, 9355 &phba->sli4_hba.sli_intf), 9356 bf_get(lpfc_sli_intf_sli_hint2, 9357 &phba->sli4_hba.sli_intf), 9358 bf_get(lpfc_sli_intf_func_type, 9359 &phba->sli4_hba.sli_intf)); 9360 /* 9361 * Check for other Port errors during the initialization 9362 * process. Fail the load if the port did not come up 9363 * correctly. 9364 */ 9365 if_type = bf_get(lpfc_sli_intf_if_type, 9366 &phba->sli4_hba.sli_intf); 9367 switch (if_type) { 9368 case LPFC_SLI_INTF_IF_TYPE_0: 9369 phba->sli4_hba.ue_mask_lo = 9370 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9371 phba->sli4_hba.ue_mask_hi = 9372 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9373 uerrlo_reg.word0 = 9374 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9375 uerrhi_reg.word0 = 9376 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9377 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9378 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9379 lpfc_printf_log(phba, KERN_ERR, 9380 LOG_TRACE_EVENT, 9381 "1422 Unrecoverable Error " 9382 "Detected during POST " 9383 "uerr_lo_reg=0x%x, " 9384 "uerr_hi_reg=0x%x, " 9385 "ue_mask_lo_reg=0x%x, " 9386 "ue_mask_hi_reg=0x%x\n", 9387 uerrlo_reg.word0, 9388 uerrhi_reg.word0, 9389 phba->sli4_hba.ue_mask_lo, 9390 phba->sli4_hba.ue_mask_hi); 9391 port_error = -ENODEV; 9392 } 9393 break; 9394 case LPFC_SLI_INTF_IF_TYPE_2: 9395 case LPFC_SLI_INTF_IF_TYPE_6: 9396 /* Final checks. The port status should be clean. */ 9397 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9398 ®_data.word0) || 9399 (bf_get(lpfc_sliport_status_err, ®_data) && 9400 !bf_get(lpfc_sliport_status_rn, ®_data))) { 9401 phba->work_status[0] = 9402 readl(phba->sli4_hba.u.if_type2. 9403 ERR1regaddr); 9404 phba->work_status[1] = 9405 readl(phba->sli4_hba.u.if_type2. 9406 ERR2regaddr); 9407 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9408 "2888 Unrecoverable port error " 9409 "following POST: port status reg " 9410 "0x%x, port_smphr reg 0x%x, " 9411 "error 1=0x%x, error 2=0x%x\n", 9412 reg_data.word0, 9413 portsmphr_reg.word0, 9414 phba->work_status[0], 9415 phba->work_status[1]); 9416 port_error = -ENODEV; 9417 break; 9418 } 9419 9420 if (lpfc_pldv_detect && 9421 bf_get(lpfc_sli_intf_sli_family, 9422 &phba->sli4_hba.sli_intf) == 9423 LPFC_SLI_INTF_FAMILY_G6) 9424 pci_write_config_byte(phba->pcidev, 9425 LPFC_SLI_INTF, CFG_PLD); 9426 break; 9427 case LPFC_SLI_INTF_IF_TYPE_1: 9428 default: 9429 break; 9430 } 9431 } 9432 return port_error; 9433 } 9434 9435 /** 9436 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9437 * @phba: pointer to lpfc hba data structure. 9438 * @if_type: The SLI4 interface type getting configured. 9439 * 9440 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9441 * memory map. 9442 **/ 9443 static void 9444 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9445 { 9446 switch (if_type) { 9447 case LPFC_SLI_INTF_IF_TYPE_0: 9448 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9449 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9450 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9451 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9452 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9453 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9454 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9455 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9456 phba->sli4_hba.SLIINTFregaddr = 9457 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9458 break; 9459 case LPFC_SLI_INTF_IF_TYPE_2: 9460 phba->sli4_hba.u.if_type2.EQDregaddr = 9461 phba->sli4_hba.conf_regs_memmap_p + 9462 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9463 phba->sli4_hba.u.if_type2.ERR1regaddr = 9464 phba->sli4_hba.conf_regs_memmap_p + 9465 LPFC_CTL_PORT_ER1_OFFSET; 9466 phba->sli4_hba.u.if_type2.ERR2regaddr = 9467 phba->sli4_hba.conf_regs_memmap_p + 9468 LPFC_CTL_PORT_ER2_OFFSET; 9469 phba->sli4_hba.u.if_type2.CTRLregaddr = 9470 phba->sli4_hba.conf_regs_memmap_p + 9471 LPFC_CTL_PORT_CTL_OFFSET; 9472 phba->sli4_hba.u.if_type2.STATUSregaddr = 9473 phba->sli4_hba.conf_regs_memmap_p + 9474 LPFC_CTL_PORT_STA_OFFSET; 9475 phba->sli4_hba.SLIINTFregaddr = 9476 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9477 phba->sli4_hba.PSMPHRregaddr = 9478 phba->sli4_hba.conf_regs_memmap_p + 9479 LPFC_CTL_PORT_SEM_OFFSET; 9480 phba->sli4_hba.RQDBregaddr = 9481 phba->sli4_hba.conf_regs_memmap_p + 9482 LPFC_ULP0_RQ_DOORBELL; 9483 phba->sli4_hba.WQDBregaddr = 9484 phba->sli4_hba.conf_regs_memmap_p + 9485 LPFC_ULP0_WQ_DOORBELL; 9486 phba->sli4_hba.CQDBregaddr = 9487 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9488 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9489 phba->sli4_hba.MQDBregaddr = 9490 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9491 phba->sli4_hba.BMBXregaddr = 9492 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9493 break; 9494 case LPFC_SLI_INTF_IF_TYPE_6: 9495 phba->sli4_hba.u.if_type2.EQDregaddr = 9496 phba->sli4_hba.conf_regs_memmap_p + 9497 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9498 phba->sli4_hba.u.if_type2.ERR1regaddr = 9499 phba->sli4_hba.conf_regs_memmap_p + 9500 LPFC_CTL_PORT_ER1_OFFSET; 9501 phba->sli4_hba.u.if_type2.ERR2regaddr = 9502 phba->sli4_hba.conf_regs_memmap_p + 9503 LPFC_CTL_PORT_ER2_OFFSET; 9504 phba->sli4_hba.u.if_type2.CTRLregaddr = 9505 phba->sli4_hba.conf_regs_memmap_p + 9506 LPFC_CTL_PORT_CTL_OFFSET; 9507 phba->sli4_hba.u.if_type2.STATUSregaddr = 9508 phba->sli4_hba.conf_regs_memmap_p + 9509 LPFC_CTL_PORT_STA_OFFSET; 9510 phba->sli4_hba.PSMPHRregaddr = 9511 phba->sli4_hba.conf_regs_memmap_p + 9512 LPFC_CTL_PORT_SEM_OFFSET; 9513 phba->sli4_hba.BMBXregaddr = 9514 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9515 break; 9516 case LPFC_SLI_INTF_IF_TYPE_1: 9517 default: 9518 dev_printk(KERN_ERR, &phba->pcidev->dev, 9519 "FATAL - unsupported SLI4 interface type - %d\n", 9520 if_type); 9521 break; 9522 } 9523 } 9524 9525 /** 9526 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9527 * @phba: pointer to lpfc hba data structure. 9528 * @if_type: sli if type to operate on. 9529 * 9530 * This routine is invoked to set up SLI4 BAR1 register memory map. 9531 **/ 9532 static void 9533 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9534 { 9535 switch (if_type) { 9536 case LPFC_SLI_INTF_IF_TYPE_0: 9537 phba->sli4_hba.PSMPHRregaddr = 9538 phba->sli4_hba.ctrl_regs_memmap_p + 9539 LPFC_SLIPORT_IF0_SMPHR; 9540 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9541 LPFC_HST_ISR0; 9542 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9543 LPFC_HST_IMR0; 9544 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9545 LPFC_HST_ISCR0; 9546 break; 9547 case LPFC_SLI_INTF_IF_TYPE_6: 9548 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9549 LPFC_IF6_RQ_DOORBELL; 9550 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9551 LPFC_IF6_WQ_DOORBELL; 9552 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9553 LPFC_IF6_CQ_DOORBELL; 9554 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9555 LPFC_IF6_EQ_DOORBELL; 9556 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9557 LPFC_IF6_MQ_DOORBELL; 9558 break; 9559 case LPFC_SLI_INTF_IF_TYPE_2: 9560 case LPFC_SLI_INTF_IF_TYPE_1: 9561 default: 9562 dev_err(&phba->pcidev->dev, 9563 "FATAL - unsupported SLI4 interface type - %d\n", 9564 if_type); 9565 break; 9566 } 9567 } 9568 9569 /** 9570 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9571 * @phba: pointer to lpfc hba data structure. 9572 * @vf: virtual function number 9573 * 9574 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9575 * based on the given viftual function number, @vf. 9576 * 9577 * Return 0 if successful, otherwise -ENODEV. 9578 **/ 9579 static int 9580 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9581 { 9582 if (vf > LPFC_VIR_FUNC_MAX) 9583 return -ENODEV; 9584 9585 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9586 vf * LPFC_VFR_PAGE_SIZE + 9587 LPFC_ULP0_RQ_DOORBELL); 9588 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9589 vf * LPFC_VFR_PAGE_SIZE + 9590 LPFC_ULP0_WQ_DOORBELL); 9591 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9592 vf * LPFC_VFR_PAGE_SIZE + 9593 LPFC_EQCQ_DOORBELL); 9594 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9595 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9596 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9597 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9598 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9599 return 0; 9600 } 9601 9602 /** 9603 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9604 * @phba: pointer to lpfc hba data structure. 9605 * 9606 * This routine is invoked to create the bootstrap mailbox 9607 * region consistent with the SLI-4 interface spec. This 9608 * routine allocates all memory necessary to communicate 9609 * mailbox commands to the port and sets up all alignment 9610 * needs. No locks are expected to be held when calling 9611 * this routine. 9612 * 9613 * Return codes 9614 * 0 - successful 9615 * -ENOMEM - could not allocated memory. 9616 **/ 9617 static int 9618 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9619 { 9620 uint32_t bmbx_size; 9621 struct lpfc_dmabuf *dmabuf; 9622 struct dma_address *dma_address; 9623 uint32_t pa_addr; 9624 uint64_t phys_addr; 9625 9626 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9627 if (!dmabuf) 9628 return -ENOMEM; 9629 9630 /* 9631 * The bootstrap mailbox region is comprised of 2 parts 9632 * plus an alignment restriction of 16 bytes. 9633 */ 9634 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9635 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9636 &dmabuf->phys, GFP_KERNEL); 9637 if (!dmabuf->virt) { 9638 kfree(dmabuf); 9639 return -ENOMEM; 9640 } 9641 9642 /* 9643 * Initialize the bootstrap mailbox pointers now so that the register 9644 * operations are simple later. The mailbox dma address is required 9645 * to be 16-byte aligned. Also align the virtual memory as each 9646 * maibox is copied into the bmbx mailbox region before issuing the 9647 * command to the port. 9648 */ 9649 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9650 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9651 9652 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9653 LPFC_ALIGN_16_BYTE); 9654 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9655 LPFC_ALIGN_16_BYTE); 9656 9657 /* 9658 * Set the high and low physical addresses now. The SLI4 alignment 9659 * requirement is 16 bytes and the mailbox is posted to the port 9660 * as two 30-bit addresses. The other data is a bit marking whether 9661 * the 30-bit address is the high or low address. 9662 * Upcast bmbx aphys to 64bits so shift instruction compiles 9663 * clean on 32 bit machines. 9664 */ 9665 dma_address = &phba->sli4_hba.bmbx.dma_address; 9666 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9667 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9668 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9669 LPFC_BMBX_BIT1_ADDR_HI); 9670 9671 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9672 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9673 LPFC_BMBX_BIT1_ADDR_LO); 9674 return 0; 9675 } 9676 9677 /** 9678 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9679 * @phba: pointer to lpfc hba data structure. 9680 * 9681 * This routine is invoked to teardown the bootstrap mailbox 9682 * region and release all host resources. This routine requires 9683 * the caller to ensure all mailbox commands recovered, no 9684 * additional mailbox comands are sent, and interrupts are disabled 9685 * before calling this routine. 9686 * 9687 **/ 9688 static void 9689 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9690 { 9691 dma_free_coherent(&phba->pcidev->dev, 9692 phba->sli4_hba.bmbx.bmbx_size, 9693 phba->sli4_hba.bmbx.dmabuf->virt, 9694 phba->sli4_hba.bmbx.dmabuf->phys); 9695 9696 kfree(phba->sli4_hba.bmbx.dmabuf); 9697 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9698 } 9699 9700 static const char * const lpfc_topo_to_str[] = { 9701 "Loop then P2P", 9702 "Loopback", 9703 "P2P Only", 9704 "Unsupported", 9705 "Loop Only", 9706 "Unsupported", 9707 "P2P then Loop", 9708 }; 9709 9710 #define LINK_FLAGS_DEF 0x0 9711 #define LINK_FLAGS_P2P 0x1 9712 #define LINK_FLAGS_LOOP 0x2 9713 /** 9714 * lpfc_map_topology - Map the topology read from READ_CONFIG 9715 * @phba: pointer to lpfc hba data structure. 9716 * @rd_config: pointer to read config data 9717 * 9718 * This routine is invoked to map the topology values as read 9719 * from the read config mailbox command. If the persistent 9720 * topology feature is supported, the firmware will provide the 9721 * saved topology information to be used in INIT_LINK 9722 **/ 9723 static void 9724 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9725 { 9726 u8 ptv, tf, pt; 9727 9728 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9729 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9730 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9731 9732 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9733 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9734 ptv, tf, pt); 9735 if (!ptv) { 9736 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9737 "2019 FW does not support persistent topology " 9738 "Using driver parameter defined value [%s]", 9739 lpfc_topo_to_str[phba->cfg_topology]); 9740 return; 9741 } 9742 /* FW supports persistent topology - override module parameter value */ 9743 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9744 9745 /* if ASIC_GEN_NUM >= 0xC) */ 9746 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9747 LPFC_SLI_INTF_IF_TYPE_6) || 9748 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9749 LPFC_SLI_INTF_FAMILY_G6)) { 9750 if (!tf) { 9751 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9752 ? FLAGS_TOPOLOGY_MODE_LOOP 9753 : FLAGS_TOPOLOGY_MODE_PT_PT); 9754 } else { 9755 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9756 } 9757 } else { /* G5 */ 9758 if (tf) { 9759 /* If topology failover set - pt is '0' or '1' */ 9760 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9761 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9762 } else { 9763 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9764 ? FLAGS_TOPOLOGY_MODE_PT_PT 9765 : FLAGS_TOPOLOGY_MODE_LOOP); 9766 } 9767 } 9768 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9769 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9770 "2020 Using persistent topology value [%s]", 9771 lpfc_topo_to_str[phba->cfg_topology]); 9772 } else { 9773 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9774 "2021 Invalid topology values from FW " 9775 "Using driver parameter defined value [%s]", 9776 lpfc_topo_to_str[phba->cfg_topology]); 9777 } 9778 } 9779 9780 /** 9781 * lpfc_sli4_read_config - Get the config parameters. 9782 * @phba: pointer to lpfc hba data structure. 9783 * 9784 * This routine is invoked to read the configuration parameters from the HBA. 9785 * The configuration parameters are used to set the base and maximum values 9786 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9787 * allocation for the port. 9788 * 9789 * Return codes 9790 * 0 - successful 9791 * -ENOMEM - No available memory 9792 * -EIO - The mailbox failed to complete successfully. 9793 **/ 9794 int 9795 lpfc_sli4_read_config(struct lpfc_hba *phba) 9796 { 9797 LPFC_MBOXQ_t *pmb; 9798 struct lpfc_mbx_read_config *rd_config; 9799 union lpfc_sli4_cfg_shdr *shdr; 9800 uint32_t shdr_status, shdr_add_status; 9801 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9802 struct lpfc_rsrc_desc_fcfcoe *desc; 9803 char *pdesc_0; 9804 uint16_t forced_link_speed; 9805 uint32_t if_type, qmin; 9806 int length, i, rc = 0, rc2; 9807 9808 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9809 if (!pmb) { 9810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9811 "2011 Unable to allocate memory for issuing " 9812 "SLI_CONFIG_SPECIAL mailbox command\n"); 9813 return -ENOMEM; 9814 } 9815 9816 lpfc_read_config(phba, pmb); 9817 9818 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9819 if (rc != MBX_SUCCESS) { 9820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9821 "2012 Mailbox failed , mbxCmd x%x " 9822 "READ_CONFIG, mbxStatus x%x\n", 9823 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9824 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9825 rc = -EIO; 9826 } else { 9827 rd_config = &pmb->u.mqe.un.rd_config; 9828 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9829 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9830 phba->sli4_hba.lnk_info.lnk_tp = 9831 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9832 phba->sli4_hba.lnk_info.lnk_no = 9833 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9834 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9835 "3081 lnk_type:%d, lnk_numb:%d\n", 9836 phba->sli4_hba.lnk_info.lnk_tp, 9837 phba->sli4_hba.lnk_info.lnk_no); 9838 } else 9839 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9840 "3082 Mailbox (x%x) returned ldv:x0\n", 9841 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9842 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9843 phba->bbcredit_support = 1; 9844 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9845 } 9846 9847 phba->sli4_hba.conf_trunk = 9848 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9849 phba->sli4_hba.extents_in_use = 9850 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9851 phba->sli4_hba.max_cfg_param.max_xri = 9852 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9853 /* Reduce resource usage in kdump environment */ 9854 if (is_kdump_kernel() && 9855 phba->sli4_hba.max_cfg_param.max_xri > 512) 9856 phba->sli4_hba.max_cfg_param.max_xri = 512; 9857 phba->sli4_hba.max_cfg_param.xri_base = 9858 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 9859 phba->sli4_hba.max_cfg_param.max_vpi = 9860 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 9861 /* Limit the max we support */ 9862 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 9863 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 9864 phba->sli4_hba.max_cfg_param.vpi_base = 9865 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 9866 phba->sli4_hba.max_cfg_param.max_rpi = 9867 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 9868 phba->sli4_hba.max_cfg_param.rpi_base = 9869 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 9870 phba->sli4_hba.max_cfg_param.max_vfi = 9871 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 9872 phba->sli4_hba.max_cfg_param.vfi_base = 9873 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 9874 phba->sli4_hba.max_cfg_param.max_fcfi = 9875 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 9876 phba->sli4_hba.max_cfg_param.max_eq = 9877 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 9878 phba->sli4_hba.max_cfg_param.max_rq = 9879 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 9880 phba->sli4_hba.max_cfg_param.max_wq = 9881 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 9882 phba->sli4_hba.max_cfg_param.max_cq = 9883 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 9884 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 9885 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 9886 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 9887 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 9888 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 9889 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 9890 phba->max_vports = phba->max_vpi; 9891 9892 /* Next decide on FPIN or Signal E2E CGN support 9893 * For congestion alarms and warnings valid combination are: 9894 * 1. FPIN alarms / FPIN warnings 9895 * 2. Signal alarms / Signal warnings 9896 * 3. FPIN alarms / Signal warnings 9897 * 4. Signal alarms / FPIN warnings 9898 * 9899 * Initialize the adapter frequency to 100 mSecs 9900 */ 9901 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9902 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9903 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9904 9905 if (lpfc_use_cgn_signal) { 9906 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 9907 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 9908 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 9909 } 9910 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 9911 /* MUST support both alarm and warning 9912 * because EDC does not support alarm alone. 9913 */ 9914 if (phba->cgn_reg_signal != 9915 EDC_CG_SIG_WARN_ONLY) { 9916 /* Must support both or none */ 9917 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9918 phba->cgn_reg_signal = 9919 EDC_CG_SIG_NOTSUPPORTED; 9920 } else { 9921 phba->cgn_reg_signal = 9922 EDC_CG_SIG_WARN_ALARM; 9923 phba->cgn_reg_fpin = 9924 LPFC_CGN_FPIN_NONE; 9925 } 9926 } 9927 } 9928 9929 /* Set the congestion initial signal and fpin values. */ 9930 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 9931 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 9932 9933 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 9934 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 9935 phba->cgn_reg_signal, phba->cgn_reg_fpin); 9936 9937 lpfc_map_topology(phba, rd_config); 9938 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9939 "2003 cfg params Extents? %d " 9940 "XRI(B:%d M:%d), " 9941 "VPI(B:%d M:%d) " 9942 "VFI(B:%d M:%d) " 9943 "RPI(B:%d M:%d) " 9944 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 9945 phba->sli4_hba.extents_in_use, 9946 phba->sli4_hba.max_cfg_param.xri_base, 9947 phba->sli4_hba.max_cfg_param.max_xri, 9948 phba->sli4_hba.max_cfg_param.vpi_base, 9949 phba->sli4_hba.max_cfg_param.max_vpi, 9950 phba->sli4_hba.max_cfg_param.vfi_base, 9951 phba->sli4_hba.max_cfg_param.max_vfi, 9952 phba->sli4_hba.max_cfg_param.rpi_base, 9953 phba->sli4_hba.max_cfg_param.max_rpi, 9954 phba->sli4_hba.max_cfg_param.max_fcfi, 9955 phba->sli4_hba.max_cfg_param.max_eq, 9956 phba->sli4_hba.max_cfg_param.max_cq, 9957 phba->sli4_hba.max_cfg_param.max_wq, 9958 phba->sli4_hba.max_cfg_param.max_rq, 9959 phba->lmt); 9960 9961 /* 9962 * Calculate queue resources based on how 9963 * many WQ/CQ/EQs are available. 9964 */ 9965 qmin = phba->sli4_hba.max_cfg_param.max_wq; 9966 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 9967 qmin = phba->sli4_hba.max_cfg_param.max_cq; 9968 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 9969 qmin = phba->sli4_hba.max_cfg_param.max_eq; 9970 /* 9971 * Whats left after this can go toward NVME / FCP. 9972 * The minus 4 accounts for ELS, NVME LS, MBOX 9973 * plus one extra. When configured for 9974 * NVMET, FCP io channel WQs are not created. 9975 */ 9976 qmin -= 4; 9977 9978 /* Check to see if there is enough for NVME */ 9979 if ((phba->cfg_irq_chann > qmin) || 9980 (phba->cfg_hdw_queue > qmin)) { 9981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9982 "2005 Reducing Queues - " 9983 "FW resource limitation: " 9984 "WQ %d CQ %d EQ %d: min %d: " 9985 "IRQ %d HDWQ %d\n", 9986 phba->sli4_hba.max_cfg_param.max_wq, 9987 phba->sli4_hba.max_cfg_param.max_cq, 9988 phba->sli4_hba.max_cfg_param.max_eq, 9989 qmin, phba->cfg_irq_chann, 9990 phba->cfg_hdw_queue); 9991 9992 if (phba->cfg_irq_chann > qmin) 9993 phba->cfg_irq_chann = qmin; 9994 if (phba->cfg_hdw_queue > qmin) 9995 phba->cfg_hdw_queue = qmin; 9996 } 9997 } 9998 9999 if (rc) 10000 goto read_cfg_out; 10001 10002 /* Update link speed if forced link speed is supported */ 10003 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10004 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10005 forced_link_speed = 10006 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10007 if (forced_link_speed) { 10008 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10009 10010 switch (forced_link_speed) { 10011 case LINK_SPEED_1G: 10012 phba->cfg_link_speed = 10013 LPFC_USER_LINK_SPEED_1G; 10014 break; 10015 case LINK_SPEED_2G: 10016 phba->cfg_link_speed = 10017 LPFC_USER_LINK_SPEED_2G; 10018 break; 10019 case LINK_SPEED_4G: 10020 phba->cfg_link_speed = 10021 LPFC_USER_LINK_SPEED_4G; 10022 break; 10023 case LINK_SPEED_8G: 10024 phba->cfg_link_speed = 10025 LPFC_USER_LINK_SPEED_8G; 10026 break; 10027 case LINK_SPEED_10G: 10028 phba->cfg_link_speed = 10029 LPFC_USER_LINK_SPEED_10G; 10030 break; 10031 case LINK_SPEED_16G: 10032 phba->cfg_link_speed = 10033 LPFC_USER_LINK_SPEED_16G; 10034 break; 10035 case LINK_SPEED_32G: 10036 phba->cfg_link_speed = 10037 LPFC_USER_LINK_SPEED_32G; 10038 break; 10039 case LINK_SPEED_64G: 10040 phba->cfg_link_speed = 10041 LPFC_USER_LINK_SPEED_64G; 10042 break; 10043 case 0xffff: 10044 phba->cfg_link_speed = 10045 LPFC_USER_LINK_SPEED_AUTO; 10046 break; 10047 default: 10048 lpfc_printf_log(phba, KERN_ERR, 10049 LOG_TRACE_EVENT, 10050 "0047 Unrecognized link " 10051 "speed : %d\n", 10052 forced_link_speed); 10053 phba->cfg_link_speed = 10054 LPFC_USER_LINK_SPEED_AUTO; 10055 } 10056 } 10057 } 10058 10059 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10060 length = phba->sli4_hba.max_cfg_param.max_xri - 10061 lpfc_sli4_get_els_iocb_cnt(phba); 10062 if (phba->cfg_hba_queue_depth > length) { 10063 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10064 "3361 HBA queue depth changed from %d to %d\n", 10065 phba->cfg_hba_queue_depth, length); 10066 phba->cfg_hba_queue_depth = length; 10067 } 10068 10069 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10070 LPFC_SLI_INTF_IF_TYPE_2) 10071 goto read_cfg_out; 10072 10073 /* get the pf# and vf# for SLI4 if_type 2 port */ 10074 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10075 sizeof(struct lpfc_sli4_cfg_mhdr)); 10076 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10077 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10078 length, LPFC_SLI4_MBX_EMBED); 10079 10080 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10081 shdr = (union lpfc_sli4_cfg_shdr *) 10082 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10083 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10084 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10085 if (rc2 || shdr_status || shdr_add_status) { 10086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10087 "3026 Mailbox failed , mbxCmd x%x " 10088 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10089 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10090 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10091 goto read_cfg_out; 10092 } 10093 10094 /* search for fc_fcoe resrouce descriptor */ 10095 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10096 10097 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10098 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10099 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10100 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10101 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10102 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10103 goto read_cfg_out; 10104 10105 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10106 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10107 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10108 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10109 phba->sli4_hba.iov.pf_number = 10110 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10111 phba->sli4_hba.iov.vf_number = 10112 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10113 break; 10114 } 10115 } 10116 10117 if (i < LPFC_RSRC_DESC_MAX_NUM) 10118 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10119 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10120 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10121 phba->sli4_hba.iov.vf_number); 10122 else 10123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10124 "3028 GET_FUNCTION_CONFIG: failed to find " 10125 "Resource Descriptor:x%x\n", 10126 LPFC_RSRC_DESC_TYPE_FCFCOE); 10127 10128 read_cfg_out: 10129 mempool_free(pmb, phba->mbox_mem_pool); 10130 return rc; 10131 } 10132 10133 /** 10134 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10135 * @phba: pointer to lpfc hba data structure. 10136 * 10137 * This routine is invoked to setup the port-side endian order when 10138 * the port if_type is 0. This routine has no function for other 10139 * if_types. 10140 * 10141 * Return codes 10142 * 0 - successful 10143 * -ENOMEM - No available memory 10144 * -EIO - The mailbox failed to complete successfully. 10145 **/ 10146 static int 10147 lpfc_setup_endian_order(struct lpfc_hba *phba) 10148 { 10149 LPFC_MBOXQ_t *mboxq; 10150 uint32_t if_type, rc = 0; 10151 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10152 HOST_ENDIAN_HIGH_WORD1}; 10153 10154 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10155 switch (if_type) { 10156 case LPFC_SLI_INTF_IF_TYPE_0: 10157 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10158 GFP_KERNEL); 10159 if (!mboxq) { 10160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10161 "0492 Unable to allocate memory for " 10162 "issuing SLI_CONFIG_SPECIAL mailbox " 10163 "command\n"); 10164 return -ENOMEM; 10165 } 10166 10167 /* 10168 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10169 * two words to contain special data values and no other data. 10170 */ 10171 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10172 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10173 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10174 if (rc != MBX_SUCCESS) { 10175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10176 "0493 SLI_CONFIG_SPECIAL mailbox " 10177 "failed with status x%x\n", 10178 rc); 10179 rc = -EIO; 10180 } 10181 mempool_free(mboxq, phba->mbox_mem_pool); 10182 break; 10183 case LPFC_SLI_INTF_IF_TYPE_6: 10184 case LPFC_SLI_INTF_IF_TYPE_2: 10185 case LPFC_SLI_INTF_IF_TYPE_1: 10186 default: 10187 break; 10188 } 10189 return rc; 10190 } 10191 10192 /** 10193 * lpfc_sli4_queue_verify - Verify and update EQ counts 10194 * @phba: pointer to lpfc hba data structure. 10195 * 10196 * This routine is invoked to check the user settable queue counts for EQs. 10197 * After this routine is called the counts will be set to valid values that 10198 * adhere to the constraints of the system's interrupt vectors and the port's 10199 * queue resources. 10200 * 10201 * Return codes 10202 * 0 - successful 10203 * -ENOMEM - No available memory 10204 **/ 10205 static int 10206 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10207 { 10208 /* 10209 * Sanity check for configured queue parameters against the run-time 10210 * device parameters 10211 */ 10212 10213 if (phba->nvmet_support) { 10214 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10215 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10216 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10217 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10218 } 10219 10220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10221 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10222 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10223 phba->cfg_nvmet_mrq); 10224 10225 /* Get EQ depth from module parameter, fake the default for now */ 10226 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10227 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10228 10229 /* Get CQ depth from module parameter, fake the default for now */ 10230 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10231 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10232 return 0; 10233 } 10234 10235 static int 10236 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10237 { 10238 struct lpfc_queue *qdesc; 10239 u32 wqesize; 10240 int cpu; 10241 10242 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10243 /* Create Fast Path IO CQs */ 10244 if (phba->enab_exp_wqcq_pages) 10245 /* Increase the CQ size when WQEs contain an embedded cdb */ 10246 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10247 phba->sli4_hba.cq_esize, 10248 LPFC_CQE_EXP_COUNT, cpu); 10249 10250 else 10251 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10252 phba->sli4_hba.cq_esize, 10253 phba->sli4_hba.cq_ecount, cpu); 10254 if (!qdesc) { 10255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10256 "0499 Failed allocate fast-path IO CQ (%d)\n", 10257 idx); 10258 return 1; 10259 } 10260 qdesc->qe_valid = 1; 10261 qdesc->hdwq = idx; 10262 qdesc->chann = cpu; 10263 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10264 10265 /* Create Fast Path IO WQs */ 10266 if (phba->enab_exp_wqcq_pages) { 10267 /* Increase the WQ size when WQEs contain an embedded cdb */ 10268 wqesize = (phba->fcp_embed_io) ? 10269 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10270 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10271 wqesize, 10272 LPFC_WQE_EXP_COUNT, cpu); 10273 } else 10274 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10275 phba->sli4_hba.wq_esize, 10276 phba->sli4_hba.wq_ecount, cpu); 10277 10278 if (!qdesc) { 10279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10280 "0503 Failed allocate fast-path IO WQ (%d)\n", 10281 idx); 10282 return 1; 10283 } 10284 qdesc->hdwq = idx; 10285 qdesc->chann = cpu; 10286 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10287 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10288 return 0; 10289 } 10290 10291 /** 10292 * lpfc_sli4_queue_create - Create all the SLI4 queues 10293 * @phba: pointer to lpfc hba data structure. 10294 * 10295 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10296 * operation. For each SLI4 queue type, the parameters such as queue entry 10297 * count (queue depth) shall be taken from the module parameter. For now, 10298 * we just use some constant number as place holder. 10299 * 10300 * Return codes 10301 * 0 - successful 10302 * -ENOMEM - No availble memory 10303 * -EIO - The mailbox failed to complete successfully. 10304 **/ 10305 int 10306 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10307 { 10308 struct lpfc_queue *qdesc; 10309 int idx, cpu, eqcpu; 10310 struct lpfc_sli4_hdw_queue *qp; 10311 struct lpfc_vector_map_info *cpup; 10312 struct lpfc_vector_map_info *eqcpup; 10313 struct lpfc_eq_intr_info *eqi; 10314 10315 /* 10316 * Create HBA Record arrays. 10317 * Both NVME and FCP will share that same vectors / EQs 10318 */ 10319 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10320 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10321 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10322 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10323 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10324 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10325 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10326 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10327 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10328 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10329 10330 if (!phba->sli4_hba.hdwq) { 10331 phba->sli4_hba.hdwq = kcalloc( 10332 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10333 GFP_KERNEL); 10334 if (!phba->sli4_hba.hdwq) { 10335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10336 "6427 Failed allocate memory for " 10337 "fast-path Hardware Queue array\n"); 10338 goto out_error; 10339 } 10340 /* Prepare hardware queues to take IO buffers */ 10341 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10342 qp = &phba->sli4_hba.hdwq[idx]; 10343 spin_lock_init(&qp->io_buf_list_get_lock); 10344 spin_lock_init(&qp->io_buf_list_put_lock); 10345 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10346 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10347 qp->get_io_bufs = 0; 10348 qp->put_io_bufs = 0; 10349 qp->total_io_bufs = 0; 10350 spin_lock_init(&qp->abts_io_buf_list_lock); 10351 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10352 qp->abts_scsi_io_bufs = 0; 10353 qp->abts_nvme_io_bufs = 0; 10354 INIT_LIST_HEAD(&qp->sgl_list); 10355 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10356 spin_lock_init(&qp->hdwq_lock); 10357 } 10358 } 10359 10360 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10361 if (phba->nvmet_support) { 10362 phba->sli4_hba.nvmet_cqset = kcalloc( 10363 phba->cfg_nvmet_mrq, 10364 sizeof(struct lpfc_queue *), 10365 GFP_KERNEL); 10366 if (!phba->sli4_hba.nvmet_cqset) { 10367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10368 "3121 Fail allocate memory for " 10369 "fast-path CQ set array\n"); 10370 goto out_error; 10371 } 10372 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10373 phba->cfg_nvmet_mrq, 10374 sizeof(struct lpfc_queue *), 10375 GFP_KERNEL); 10376 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10378 "3122 Fail allocate memory for " 10379 "fast-path RQ set hdr array\n"); 10380 goto out_error; 10381 } 10382 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10383 phba->cfg_nvmet_mrq, 10384 sizeof(struct lpfc_queue *), 10385 GFP_KERNEL); 10386 if (!phba->sli4_hba.nvmet_mrq_data) { 10387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10388 "3124 Fail allocate memory for " 10389 "fast-path RQ set data array\n"); 10390 goto out_error; 10391 } 10392 } 10393 } 10394 10395 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10396 10397 /* Create HBA Event Queues (EQs) */ 10398 for_each_present_cpu(cpu) { 10399 /* We only want to create 1 EQ per vector, even though 10400 * multiple CPUs might be using that vector. so only 10401 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10402 */ 10403 cpup = &phba->sli4_hba.cpu_map[cpu]; 10404 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10405 continue; 10406 10407 /* Get a ptr to the Hardware Queue associated with this CPU */ 10408 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10409 10410 /* Allocate an EQ */ 10411 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10412 phba->sli4_hba.eq_esize, 10413 phba->sli4_hba.eq_ecount, cpu); 10414 if (!qdesc) { 10415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10416 "0497 Failed allocate EQ (%d)\n", 10417 cpup->hdwq); 10418 goto out_error; 10419 } 10420 qdesc->qe_valid = 1; 10421 qdesc->hdwq = cpup->hdwq; 10422 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10423 qdesc->last_cpu = qdesc->chann; 10424 10425 /* Save the allocated EQ in the Hardware Queue */ 10426 qp->hba_eq = qdesc; 10427 10428 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10429 list_add(&qdesc->cpu_list, &eqi->list); 10430 } 10431 10432 /* Now we need to populate the other Hardware Queues, that share 10433 * an IRQ vector, with the associated EQ ptr. 10434 */ 10435 for_each_present_cpu(cpu) { 10436 cpup = &phba->sli4_hba.cpu_map[cpu]; 10437 10438 /* Check for EQ already allocated in previous loop */ 10439 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10440 continue; 10441 10442 /* Check for multiple CPUs per hdwq */ 10443 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10444 if (qp->hba_eq) 10445 continue; 10446 10447 /* We need to share an EQ for this hdwq */ 10448 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10449 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10450 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10451 } 10452 10453 /* Allocate IO Path SLI4 CQ/WQs */ 10454 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10455 if (lpfc_alloc_io_wq_cq(phba, idx)) 10456 goto out_error; 10457 } 10458 10459 if (phba->nvmet_support) { 10460 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10461 cpu = lpfc_find_cpu_handle(phba, idx, 10462 LPFC_FIND_BY_HDWQ); 10463 qdesc = lpfc_sli4_queue_alloc(phba, 10464 LPFC_DEFAULT_PAGE_SIZE, 10465 phba->sli4_hba.cq_esize, 10466 phba->sli4_hba.cq_ecount, 10467 cpu); 10468 if (!qdesc) { 10469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10470 "3142 Failed allocate NVME " 10471 "CQ Set (%d)\n", idx); 10472 goto out_error; 10473 } 10474 qdesc->qe_valid = 1; 10475 qdesc->hdwq = idx; 10476 qdesc->chann = cpu; 10477 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10478 } 10479 } 10480 10481 /* 10482 * Create Slow Path Completion Queues (CQs) 10483 */ 10484 10485 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10486 /* Create slow-path Mailbox Command Complete Queue */ 10487 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10488 phba->sli4_hba.cq_esize, 10489 phba->sli4_hba.cq_ecount, cpu); 10490 if (!qdesc) { 10491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10492 "0500 Failed allocate slow-path mailbox CQ\n"); 10493 goto out_error; 10494 } 10495 qdesc->qe_valid = 1; 10496 phba->sli4_hba.mbx_cq = qdesc; 10497 10498 /* Create slow-path ELS Complete Queue */ 10499 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10500 phba->sli4_hba.cq_esize, 10501 phba->sli4_hba.cq_ecount, cpu); 10502 if (!qdesc) { 10503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10504 "0501 Failed allocate slow-path ELS CQ\n"); 10505 goto out_error; 10506 } 10507 qdesc->qe_valid = 1; 10508 qdesc->chann = cpu; 10509 phba->sli4_hba.els_cq = qdesc; 10510 10511 10512 /* 10513 * Create Slow Path Work Queues (WQs) 10514 */ 10515 10516 /* Create Mailbox Command Queue */ 10517 10518 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10519 phba->sli4_hba.mq_esize, 10520 phba->sli4_hba.mq_ecount, cpu); 10521 if (!qdesc) { 10522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10523 "0505 Failed allocate slow-path MQ\n"); 10524 goto out_error; 10525 } 10526 qdesc->chann = cpu; 10527 phba->sli4_hba.mbx_wq = qdesc; 10528 10529 /* 10530 * Create ELS Work Queues 10531 */ 10532 10533 /* Create slow-path ELS Work Queue */ 10534 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10535 phba->sli4_hba.wq_esize, 10536 phba->sli4_hba.wq_ecount, cpu); 10537 if (!qdesc) { 10538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10539 "0504 Failed allocate slow-path ELS WQ\n"); 10540 goto out_error; 10541 } 10542 qdesc->chann = cpu; 10543 phba->sli4_hba.els_wq = qdesc; 10544 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10545 10546 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10547 /* Create NVME LS Complete Queue */ 10548 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10549 phba->sli4_hba.cq_esize, 10550 phba->sli4_hba.cq_ecount, cpu); 10551 if (!qdesc) { 10552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10553 "6079 Failed allocate NVME LS CQ\n"); 10554 goto out_error; 10555 } 10556 qdesc->chann = cpu; 10557 qdesc->qe_valid = 1; 10558 phba->sli4_hba.nvmels_cq = qdesc; 10559 10560 /* Create NVME LS Work Queue */ 10561 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10562 phba->sli4_hba.wq_esize, 10563 phba->sli4_hba.wq_ecount, cpu); 10564 if (!qdesc) { 10565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10566 "6080 Failed allocate NVME LS WQ\n"); 10567 goto out_error; 10568 } 10569 qdesc->chann = cpu; 10570 phba->sli4_hba.nvmels_wq = qdesc; 10571 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10572 } 10573 10574 /* 10575 * Create Receive Queue (RQ) 10576 */ 10577 10578 /* Create Receive Queue for header */ 10579 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10580 phba->sli4_hba.rq_esize, 10581 phba->sli4_hba.rq_ecount, cpu); 10582 if (!qdesc) { 10583 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10584 "0506 Failed allocate receive HRQ\n"); 10585 goto out_error; 10586 } 10587 phba->sli4_hba.hdr_rq = qdesc; 10588 10589 /* Create Receive Queue for data */ 10590 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10591 phba->sli4_hba.rq_esize, 10592 phba->sli4_hba.rq_ecount, cpu); 10593 if (!qdesc) { 10594 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10595 "0507 Failed allocate receive DRQ\n"); 10596 goto out_error; 10597 } 10598 phba->sli4_hba.dat_rq = qdesc; 10599 10600 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10601 phba->nvmet_support) { 10602 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10603 cpu = lpfc_find_cpu_handle(phba, idx, 10604 LPFC_FIND_BY_HDWQ); 10605 /* Create NVMET Receive Queue for header */ 10606 qdesc = lpfc_sli4_queue_alloc(phba, 10607 LPFC_DEFAULT_PAGE_SIZE, 10608 phba->sli4_hba.rq_esize, 10609 LPFC_NVMET_RQE_DEF_COUNT, 10610 cpu); 10611 if (!qdesc) { 10612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10613 "3146 Failed allocate " 10614 "receive HRQ\n"); 10615 goto out_error; 10616 } 10617 qdesc->hdwq = idx; 10618 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10619 10620 /* Only needed for header of RQ pair */ 10621 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10622 GFP_KERNEL, 10623 cpu_to_node(cpu)); 10624 if (qdesc->rqbp == NULL) { 10625 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10626 "6131 Failed allocate " 10627 "Header RQBP\n"); 10628 goto out_error; 10629 } 10630 10631 /* Put list in known state in case driver load fails. */ 10632 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10633 10634 /* Create NVMET Receive Queue for data */ 10635 qdesc = lpfc_sli4_queue_alloc(phba, 10636 LPFC_DEFAULT_PAGE_SIZE, 10637 phba->sli4_hba.rq_esize, 10638 LPFC_NVMET_RQE_DEF_COUNT, 10639 cpu); 10640 if (!qdesc) { 10641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10642 "3156 Failed allocate " 10643 "receive DRQ\n"); 10644 goto out_error; 10645 } 10646 qdesc->hdwq = idx; 10647 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10648 } 10649 } 10650 10651 /* Clear NVME stats */ 10652 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10653 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10654 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10655 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10656 } 10657 } 10658 10659 /* Clear SCSI stats */ 10660 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10661 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10662 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10663 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10664 } 10665 } 10666 10667 return 0; 10668 10669 out_error: 10670 lpfc_sli4_queue_destroy(phba); 10671 return -ENOMEM; 10672 } 10673 10674 static inline void 10675 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10676 { 10677 if (*qp != NULL) { 10678 lpfc_sli4_queue_free(*qp); 10679 *qp = NULL; 10680 } 10681 } 10682 10683 static inline void 10684 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10685 { 10686 int idx; 10687 10688 if (*qs == NULL) 10689 return; 10690 10691 for (idx = 0; idx < max; idx++) 10692 __lpfc_sli4_release_queue(&(*qs)[idx]); 10693 10694 kfree(*qs); 10695 *qs = NULL; 10696 } 10697 10698 static inline void 10699 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10700 { 10701 struct lpfc_sli4_hdw_queue *hdwq; 10702 struct lpfc_queue *eq; 10703 uint32_t idx; 10704 10705 hdwq = phba->sli4_hba.hdwq; 10706 10707 /* Loop thru all Hardware Queues */ 10708 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10709 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10710 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10711 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10712 hdwq[idx].hba_eq = NULL; 10713 hdwq[idx].io_cq = NULL; 10714 hdwq[idx].io_wq = NULL; 10715 if (phba->cfg_xpsgl && !phba->nvmet_support) 10716 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10717 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10718 } 10719 /* Loop thru all IRQ vectors */ 10720 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10721 /* Free the EQ corresponding to the IRQ vector */ 10722 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10723 lpfc_sli4_queue_free(eq); 10724 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10725 } 10726 } 10727 10728 /** 10729 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10730 * @phba: pointer to lpfc hba data structure. 10731 * 10732 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10733 * operation. 10734 * 10735 * Return codes 10736 * 0 - successful 10737 * -ENOMEM - No available memory 10738 * -EIO - The mailbox failed to complete successfully. 10739 **/ 10740 void 10741 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10742 { 10743 /* 10744 * Set FREE_INIT before beginning to free the queues. 10745 * Wait until the users of queues to acknowledge to 10746 * release queues by clearing FREE_WAIT. 10747 */ 10748 spin_lock_irq(&phba->hbalock); 10749 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10750 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10751 spin_unlock_irq(&phba->hbalock); 10752 msleep(20); 10753 spin_lock_irq(&phba->hbalock); 10754 } 10755 spin_unlock_irq(&phba->hbalock); 10756 10757 lpfc_sli4_cleanup_poll_list(phba); 10758 10759 /* Release HBA eqs */ 10760 if (phba->sli4_hba.hdwq) 10761 lpfc_sli4_release_hdwq(phba); 10762 10763 if (phba->nvmet_support) { 10764 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10765 phba->cfg_nvmet_mrq); 10766 10767 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10768 phba->cfg_nvmet_mrq); 10769 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10770 phba->cfg_nvmet_mrq); 10771 } 10772 10773 /* Release mailbox command work queue */ 10774 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10775 10776 /* Release ELS work queue */ 10777 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10778 10779 /* Release ELS work queue */ 10780 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10781 10782 /* Release unsolicited receive queue */ 10783 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10784 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10785 10786 /* Release ELS complete queue */ 10787 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10788 10789 /* Release NVME LS complete queue */ 10790 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10791 10792 /* Release mailbox command complete queue */ 10793 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10794 10795 /* Everything on this list has been freed */ 10796 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10797 10798 /* Done with freeing the queues */ 10799 spin_lock_irq(&phba->hbalock); 10800 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10801 spin_unlock_irq(&phba->hbalock); 10802 } 10803 10804 int 10805 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10806 { 10807 struct lpfc_rqb *rqbp; 10808 struct lpfc_dmabuf *h_buf; 10809 struct rqb_dmabuf *rqb_buffer; 10810 10811 rqbp = rq->rqbp; 10812 while (!list_empty(&rqbp->rqb_buffer_list)) { 10813 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10814 struct lpfc_dmabuf, list); 10815 10816 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10817 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10818 rqbp->buffer_count--; 10819 } 10820 return 1; 10821 } 10822 10823 static int 10824 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10825 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10826 int qidx, uint32_t qtype) 10827 { 10828 struct lpfc_sli_ring *pring; 10829 int rc; 10830 10831 if (!eq || !cq || !wq) { 10832 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10833 "6085 Fast-path %s (%d) not allocated\n", 10834 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10835 return -ENOMEM; 10836 } 10837 10838 /* create the Cq first */ 10839 rc = lpfc_cq_create(phba, cq, eq, 10840 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10841 if (rc) { 10842 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10843 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10844 qidx, (uint32_t)rc); 10845 return rc; 10846 } 10847 10848 if (qtype != LPFC_MBOX) { 10849 /* Setup cq_map for fast lookup */ 10850 if (cq_map) 10851 *cq_map = cq->queue_id; 10852 10853 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10854 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10855 qidx, cq->queue_id, qidx, eq->queue_id); 10856 10857 /* create the wq */ 10858 rc = lpfc_wq_create(phba, wq, cq, qtype); 10859 if (rc) { 10860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10861 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 10862 qidx, (uint32_t)rc); 10863 /* no need to tear down cq - caller will do so */ 10864 return rc; 10865 } 10866 10867 /* Bind this CQ/WQ to the NVME ring */ 10868 pring = wq->pring; 10869 pring->sli.sli4.wqp = (void *)wq; 10870 cq->pring = pring; 10871 10872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10873 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 10874 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 10875 } else { 10876 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 10877 if (rc) { 10878 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10879 "0539 Failed setup of slow-path MQ: " 10880 "rc = 0x%x\n", rc); 10881 /* no need to tear down cq - caller will do so */ 10882 return rc; 10883 } 10884 10885 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10886 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 10887 phba->sli4_hba.mbx_wq->queue_id, 10888 phba->sli4_hba.mbx_cq->queue_id); 10889 } 10890 10891 return 0; 10892 } 10893 10894 /** 10895 * lpfc_setup_cq_lookup - Setup the CQ lookup table 10896 * @phba: pointer to lpfc hba data structure. 10897 * 10898 * This routine will populate the cq_lookup table by all 10899 * available CQ queue_id's. 10900 **/ 10901 static void 10902 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 10903 { 10904 struct lpfc_queue *eq, *childq; 10905 int qidx; 10906 10907 memset(phba->sli4_hba.cq_lookup, 0, 10908 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 10909 /* Loop thru all IRQ vectors */ 10910 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10911 /* Get the EQ corresponding to the IRQ vector */ 10912 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10913 if (!eq) 10914 continue; 10915 /* Loop through all CQs associated with that EQ */ 10916 list_for_each_entry(childq, &eq->child_list, list) { 10917 if (childq->queue_id > phba->sli4_hba.cq_max) 10918 continue; 10919 if (childq->subtype == LPFC_IO) 10920 phba->sli4_hba.cq_lookup[childq->queue_id] = 10921 childq; 10922 } 10923 } 10924 } 10925 10926 /** 10927 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 10928 * @phba: pointer to lpfc hba data structure. 10929 * 10930 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 10931 * operation. 10932 * 10933 * Return codes 10934 * 0 - successful 10935 * -ENOMEM - No available memory 10936 * -EIO - The mailbox failed to complete successfully. 10937 **/ 10938 int 10939 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 10940 { 10941 uint32_t shdr_status, shdr_add_status; 10942 union lpfc_sli4_cfg_shdr *shdr; 10943 struct lpfc_vector_map_info *cpup; 10944 struct lpfc_sli4_hdw_queue *qp; 10945 LPFC_MBOXQ_t *mboxq; 10946 int qidx, cpu; 10947 uint32_t length, usdelay; 10948 int rc = -ENOMEM; 10949 10950 /* Check for dual-ULP support */ 10951 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10952 if (!mboxq) { 10953 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10954 "3249 Unable to allocate memory for " 10955 "QUERY_FW_CFG mailbox command\n"); 10956 return -ENOMEM; 10957 } 10958 length = (sizeof(struct lpfc_mbx_query_fw_config) - 10959 sizeof(struct lpfc_sli4_cfg_mhdr)); 10960 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10961 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 10962 length, LPFC_SLI4_MBX_EMBED); 10963 10964 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10965 10966 shdr = (union lpfc_sli4_cfg_shdr *) 10967 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10968 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10969 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10970 if (shdr_status || shdr_add_status || rc) { 10971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10972 "3250 QUERY_FW_CFG mailbox failed with status " 10973 "x%x add_status x%x, mbx status x%x\n", 10974 shdr_status, shdr_add_status, rc); 10975 mempool_free(mboxq, phba->mbox_mem_pool); 10976 rc = -ENXIO; 10977 goto out_error; 10978 } 10979 10980 phba->sli4_hba.fw_func_mode = 10981 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 10982 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 10983 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 10984 phba->sli4_hba.physical_port = 10985 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 10986 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10987 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 10988 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 10989 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 10990 10991 mempool_free(mboxq, phba->mbox_mem_pool); 10992 10993 /* 10994 * Set up HBA Event Queues (EQs) 10995 */ 10996 qp = phba->sli4_hba.hdwq; 10997 10998 /* Set up HBA event queue */ 10999 if (!qp) { 11000 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11001 "3147 Fast-path EQs not allocated\n"); 11002 rc = -ENOMEM; 11003 goto out_error; 11004 } 11005 11006 /* Loop thru all IRQ vectors */ 11007 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11008 /* Create HBA Event Queues (EQs) in order */ 11009 for_each_present_cpu(cpu) { 11010 cpup = &phba->sli4_hba.cpu_map[cpu]; 11011 11012 /* Look for the CPU thats using that vector with 11013 * LPFC_CPU_FIRST_IRQ set. 11014 */ 11015 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11016 continue; 11017 if (qidx != cpup->eq) 11018 continue; 11019 11020 /* Create an EQ for that vector */ 11021 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 11022 phba->cfg_fcp_imax); 11023 if (rc) { 11024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11025 "0523 Failed setup of fast-path" 11026 " EQ (%d), rc = 0x%x\n", 11027 cpup->eq, (uint32_t)rc); 11028 goto out_destroy; 11029 } 11030 11031 /* Save the EQ for that vector in the hba_eq_hdl */ 11032 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 11033 qp[cpup->hdwq].hba_eq; 11034 11035 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11036 "2584 HBA EQ setup: queue[%d]-id=%d\n", 11037 cpup->eq, 11038 qp[cpup->hdwq].hba_eq->queue_id); 11039 } 11040 } 11041 11042 /* Loop thru all Hardware Queues */ 11043 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11044 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 11045 cpup = &phba->sli4_hba.cpu_map[cpu]; 11046 11047 /* Create the CQ/WQ corresponding to the Hardware Queue */ 11048 rc = lpfc_create_wq_cq(phba, 11049 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 11050 qp[qidx].io_cq, 11051 qp[qidx].io_wq, 11052 &phba->sli4_hba.hdwq[qidx].io_cq_map, 11053 qidx, 11054 LPFC_IO); 11055 if (rc) { 11056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11057 "0535 Failed to setup fastpath " 11058 "IO WQ/CQ (%d), rc = 0x%x\n", 11059 qidx, (uint32_t)rc); 11060 goto out_destroy; 11061 } 11062 } 11063 11064 /* 11065 * Set up Slow Path Complete Queues (CQs) 11066 */ 11067 11068 /* Set up slow-path MBOX CQ/MQ */ 11069 11070 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11072 "0528 %s not allocated\n", 11073 phba->sli4_hba.mbx_cq ? 11074 "Mailbox WQ" : "Mailbox CQ"); 11075 rc = -ENOMEM; 11076 goto out_destroy; 11077 } 11078 11079 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11080 phba->sli4_hba.mbx_cq, 11081 phba->sli4_hba.mbx_wq, 11082 NULL, 0, LPFC_MBOX); 11083 if (rc) { 11084 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11085 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11086 (uint32_t)rc); 11087 goto out_destroy; 11088 } 11089 if (phba->nvmet_support) { 11090 if (!phba->sli4_hba.nvmet_cqset) { 11091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11092 "3165 Fast-path NVME CQ Set " 11093 "array not allocated\n"); 11094 rc = -ENOMEM; 11095 goto out_destroy; 11096 } 11097 if (phba->cfg_nvmet_mrq > 1) { 11098 rc = lpfc_cq_create_set(phba, 11099 phba->sli4_hba.nvmet_cqset, 11100 qp, 11101 LPFC_WCQ, LPFC_NVMET); 11102 if (rc) { 11103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11104 "3164 Failed setup of NVME CQ " 11105 "Set, rc = 0x%x\n", 11106 (uint32_t)rc); 11107 goto out_destroy; 11108 } 11109 } else { 11110 /* Set up NVMET Receive Complete Queue */ 11111 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11112 qp[0].hba_eq, 11113 LPFC_WCQ, LPFC_NVMET); 11114 if (rc) { 11115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11116 "6089 Failed setup NVMET CQ: " 11117 "rc = 0x%x\n", (uint32_t)rc); 11118 goto out_destroy; 11119 } 11120 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11121 11122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11123 "6090 NVMET CQ setup: cq-id=%d, " 11124 "parent eq-id=%d\n", 11125 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11126 qp[0].hba_eq->queue_id); 11127 } 11128 } 11129 11130 /* Set up slow-path ELS WQ/CQ */ 11131 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11133 "0530 ELS %s not allocated\n", 11134 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11135 rc = -ENOMEM; 11136 goto out_destroy; 11137 } 11138 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11139 phba->sli4_hba.els_cq, 11140 phba->sli4_hba.els_wq, 11141 NULL, 0, LPFC_ELS); 11142 if (rc) { 11143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11144 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11145 (uint32_t)rc); 11146 goto out_destroy; 11147 } 11148 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11149 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11150 phba->sli4_hba.els_wq->queue_id, 11151 phba->sli4_hba.els_cq->queue_id); 11152 11153 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11154 /* Set up NVME LS Complete Queue */ 11155 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11157 "6091 LS %s not allocated\n", 11158 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11159 rc = -ENOMEM; 11160 goto out_destroy; 11161 } 11162 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11163 phba->sli4_hba.nvmels_cq, 11164 phba->sli4_hba.nvmels_wq, 11165 NULL, 0, LPFC_NVME_LS); 11166 if (rc) { 11167 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11168 "0526 Failed setup of NVVME LS WQ/CQ: " 11169 "rc = 0x%x\n", (uint32_t)rc); 11170 goto out_destroy; 11171 } 11172 11173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11174 "6096 ELS WQ setup: wq-id=%d, " 11175 "parent cq-id=%d\n", 11176 phba->sli4_hba.nvmels_wq->queue_id, 11177 phba->sli4_hba.nvmels_cq->queue_id); 11178 } 11179 11180 /* 11181 * Create NVMET Receive Queue (RQ) 11182 */ 11183 if (phba->nvmet_support) { 11184 if ((!phba->sli4_hba.nvmet_cqset) || 11185 (!phba->sli4_hba.nvmet_mrq_hdr) || 11186 (!phba->sli4_hba.nvmet_mrq_data)) { 11187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11188 "6130 MRQ CQ Queues not " 11189 "allocated\n"); 11190 rc = -ENOMEM; 11191 goto out_destroy; 11192 } 11193 if (phba->cfg_nvmet_mrq > 1) { 11194 rc = lpfc_mrq_create(phba, 11195 phba->sli4_hba.nvmet_mrq_hdr, 11196 phba->sli4_hba.nvmet_mrq_data, 11197 phba->sli4_hba.nvmet_cqset, 11198 LPFC_NVMET); 11199 if (rc) { 11200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11201 "6098 Failed setup of NVMET " 11202 "MRQ: rc = 0x%x\n", 11203 (uint32_t)rc); 11204 goto out_destroy; 11205 } 11206 11207 } else { 11208 rc = lpfc_rq_create(phba, 11209 phba->sli4_hba.nvmet_mrq_hdr[0], 11210 phba->sli4_hba.nvmet_mrq_data[0], 11211 phba->sli4_hba.nvmet_cqset[0], 11212 LPFC_NVMET); 11213 if (rc) { 11214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11215 "6057 Failed setup of NVMET " 11216 "Receive Queue: rc = 0x%x\n", 11217 (uint32_t)rc); 11218 goto out_destroy; 11219 } 11220 11221 lpfc_printf_log( 11222 phba, KERN_INFO, LOG_INIT, 11223 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11224 "dat-rq-id=%d parent cq-id=%d\n", 11225 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11226 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11227 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11228 11229 } 11230 } 11231 11232 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11234 "0540 Receive Queue not allocated\n"); 11235 rc = -ENOMEM; 11236 goto out_destroy; 11237 } 11238 11239 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11240 phba->sli4_hba.els_cq, LPFC_USOL); 11241 if (rc) { 11242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11243 "0541 Failed setup of Receive Queue: " 11244 "rc = 0x%x\n", (uint32_t)rc); 11245 goto out_destroy; 11246 } 11247 11248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11249 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11250 "parent cq-id=%d\n", 11251 phba->sli4_hba.hdr_rq->queue_id, 11252 phba->sli4_hba.dat_rq->queue_id, 11253 phba->sli4_hba.els_cq->queue_id); 11254 11255 if (phba->cfg_fcp_imax) 11256 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11257 else 11258 usdelay = 0; 11259 11260 for (qidx = 0; qidx < phba->cfg_irq_chann; 11261 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11262 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11263 usdelay); 11264 11265 if (phba->sli4_hba.cq_max) { 11266 kfree(phba->sli4_hba.cq_lookup); 11267 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11268 sizeof(struct lpfc_queue *), GFP_KERNEL); 11269 if (!phba->sli4_hba.cq_lookup) { 11270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11271 "0549 Failed setup of CQ Lookup table: " 11272 "size 0x%x\n", phba->sli4_hba.cq_max); 11273 rc = -ENOMEM; 11274 goto out_destroy; 11275 } 11276 lpfc_setup_cq_lookup(phba); 11277 } 11278 return 0; 11279 11280 out_destroy: 11281 lpfc_sli4_queue_unset(phba); 11282 out_error: 11283 return rc; 11284 } 11285 11286 /** 11287 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11288 * @phba: pointer to lpfc hba data structure. 11289 * 11290 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11291 * operation. 11292 * 11293 * Return codes 11294 * 0 - successful 11295 * -ENOMEM - No available memory 11296 * -EIO - The mailbox failed to complete successfully. 11297 **/ 11298 void 11299 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11300 { 11301 struct lpfc_sli4_hdw_queue *qp; 11302 struct lpfc_queue *eq; 11303 int qidx; 11304 11305 /* Unset mailbox command work queue */ 11306 if (phba->sli4_hba.mbx_wq) 11307 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11308 11309 /* Unset NVME LS work queue */ 11310 if (phba->sli4_hba.nvmels_wq) 11311 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11312 11313 /* Unset ELS work queue */ 11314 if (phba->sli4_hba.els_wq) 11315 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11316 11317 /* Unset unsolicited receive queue */ 11318 if (phba->sli4_hba.hdr_rq) 11319 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11320 phba->sli4_hba.dat_rq); 11321 11322 /* Unset mailbox command complete queue */ 11323 if (phba->sli4_hba.mbx_cq) 11324 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11325 11326 /* Unset ELS complete queue */ 11327 if (phba->sli4_hba.els_cq) 11328 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11329 11330 /* Unset NVME LS complete queue */ 11331 if (phba->sli4_hba.nvmels_cq) 11332 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11333 11334 if (phba->nvmet_support) { 11335 /* Unset NVMET MRQ queue */ 11336 if (phba->sli4_hba.nvmet_mrq_hdr) { 11337 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11338 lpfc_rq_destroy( 11339 phba, 11340 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11341 phba->sli4_hba.nvmet_mrq_data[qidx]); 11342 } 11343 11344 /* Unset NVMET CQ Set complete queue */ 11345 if (phba->sli4_hba.nvmet_cqset) { 11346 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11347 lpfc_cq_destroy( 11348 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11349 } 11350 } 11351 11352 /* Unset fast-path SLI4 queues */ 11353 if (phba->sli4_hba.hdwq) { 11354 /* Loop thru all Hardware Queues */ 11355 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11356 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11357 qp = &phba->sli4_hba.hdwq[qidx]; 11358 lpfc_wq_destroy(phba, qp->io_wq); 11359 lpfc_cq_destroy(phba, qp->io_cq); 11360 } 11361 /* Loop thru all IRQ vectors */ 11362 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11363 /* Destroy the EQ corresponding to the IRQ vector */ 11364 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11365 lpfc_eq_destroy(phba, eq); 11366 } 11367 } 11368 11369 kfree(phba->sli4_hba.cq_lookup); 11370 phba->sli4_hba.cq_lookup = NULL; 11371 phba->sli4_hba.cq_max = 0; 11372 } 11373 11374 /** 11375 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11376 * @phba: pointer to lpfc hba data structure. 11377 * 11378 * This routine is invoked to allocate and set up a pool of completion queue 11379 * events. The body of the completion queue event is a completion queue entry 11380 * CQE. For now, this pool is used for the interrupt service routine to queue 11381 * the following HBA completion queue events for the worker thread to process: 11382 * - Mailbox asynchronous events 11383 * - Receive queue completion unsolicited events 11384 * Later, this can be used for all the slow-path events. 11385 * 11386 * Return codes 11387 * 0 - successful 11388 * -ENOMEM - No available memory 11389 **/ 11390 static int 11391 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11392 { 11393 struct lpfc_cq_event *cq_event; 11394 int i; 11395 11396 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11397 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11398 if (!cq_event) 11399 goto out_pool_create_fail; 11400 list_add_tail(&cq_event->list, 11401 &phba->sli4_hba.sp_cqe_event_pool); 11402 } 11403 return 0; 11404 11405 out_pool_create_fail: 11406 lpfc_sli4_cq_event_pool_destroy(phba); 11407 return -ENOMEM; 11408 } 11409 11410 /** 11411 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11412 * @phba: pointer to lpfc hba data structure. 11413 * 11414 * This routine is invoked to free the pool of completion queue events at 11415 * driver unload time. Note that, it is the responsibility of the driver 11416 * cleanup routine to free all the outstanding completion-queue events 11417 * allocated from this pool back into the pool before invoking this routine 11418 * to destroy the pool. 11419 **/ 11420 static void 11421 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11422 { 11423 struct lpfc_cq_event *cq_event, *next_cq_event; 11424 11425 list_for_each_entry_safe(cq_event, next_cq_event, 11426 &phba->sli4_hba.sp_cqe_event_pool, list) { 11427 list_del(&cq_event->list); 11428 kfree(cq_event); 11429 } 11430 } 11431 11432 /** 11433 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11434 * @phba: pointer to lpfc hba data structure. 11435 * 11436 * This routine is the lock free version of the API invoked to allocate a 11437 * completion-queue event from the free pool. 11438 * 11439 * Return: Pointer to the newly allocated completion-queue event if successful 11440 * NULL otherwise. 11441 **/ 11442 struct lpfc_cq_event * 11443 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11444 { 11445 struct lpfc_cq_event *cq_event = NULL; 11446 11447 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11448 struct lpfc_cq_event, list); 11449 return cq_event; 11450 } 11451 11452 /** 11453 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11454 * @phba: pointer to lpfc hba data structure. 11455 * 11456 * This routine is the lock version of the API invoked to allocate a 11457 * completion-queue event from the free pool. 11458 * 11459 * Return: Pointer to the newly allocated completion-queue event if successful 11460 * NULL otherwise. 11461 **/ 11462 struct lpfc_cq_event * 11463 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11464 { 11465 struct lpfc_cq_event *cq_event; 11466 unsigned long iflags; 11467 11468 spin_lock_irqsave(&phba->hbalock, iflags); 11469 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11470 spin_unlock_irqrestore(&phba->hbalock, iflags); 11471 return cq_event; 11472 } 11473 11474 /** 11475 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11476 * @phba: pointer to lpfc hba data structure. 11477 * @cq_event: pointer to the completion queue event to be freed. 11478 * 11479 * This routine is the lock free version of the API invoked to release a 11480 * completion-queue event back into the free pool. 11481 **/ 11482 void 11483 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11484 struct lpfc_cq_event *cq_event) 11485 { 11486 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11487 } 11488 11489 /** 11490 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11491 * @phba: pointer to lpfc hba data structure. 11492 * @cq_event: pointer to the completion queue event to be freed. 11493 * 11494 * This routine is the lock version of the API invoked to release a 11495 * completion-queue event back into the free pool. 11496 **/ 11497 void 11498 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11499 struct lpfc_cq_event *cq_event) 11500 { 11501 unsigned long iflags; 11502 spin_lock_irqsave(&phba->hbalock, iflags); 11503 __lpfc_sli4_cq_event_release(phba, cq_event); 11504 spin_unlock_irqrestore(&phba->hbalock, iflags); 11505 } 11506 11507 /** 11508 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11509 * @phba: pointer to lpfc hba data structure. 11510 * 11511 * This routine is to free all the pending completion-queue events to the 11512 * back into the free pool for device reset. 11513 **/ 11514 static void 11515 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11516 { 11517 LIST_HEAD(cq_event_list); 11518 struct lpfc_cq_event *cq_event; 11519 unsigned long iflags; 11520 11521 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11522 11523 /* Pending ELS XRI abort events */ 11524 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11525 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11526 &cq_event_list); 11527 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11528 11529 /* Pending asynnc events */ 11530 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11531 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11532 &cq_event_list); 11533 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11534 11535 while (!list_empty(&cq_event_list)) { 11536 list_remove_head(&cq_event_list, cq_event, 11537 struct lpfc_cq_event, list); 11538 lpfc_sli4_cq_event_release(phba, cq_event); 11539 } 11540 } 11541 11542 /** 11543 * lpfc_pci_function_reset - Reset pci function. 11544 * @phba: pointer to lpfc hba data structure. 11545 * 11546 * This routine is invoked to request a PCI function reset. It will destroys 11547 * all resources assigned to the PCI function which originates this request. 11548 * 11549 * Return codes 11550 * 0 - successful 11551 * -ENOMEM - No available memory 11552 * -EIO - The mailbox failed to complete successfully. 11553 **/ 11554 int 11555 lpfc_pci_function_reset(struct lpfc_hba *phba) 11556 { 11557 LPFC_MBOXQ_t *mboxq; 11558 uint32_t rc = 0, if_type; 11559 uint32_t shdr_status, shdr_add_status; 11560 uint32_t rdy_chk; 11561 uint32_t port_reset = 0; 11562 union lpfc_sli4_cfg_shdr *shdr; 11563 struct lpfc_register reg_data; 11564 uint16_t devid; 11565 11566 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11567 switch (if_type) { 11568 case LPFC_SLI_INTF_IF_TYPE_0: 11569 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11570 GFP_KERNEL); 11571 if (!mboxq) { 11572 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11573 "0494 Unable to allocate memory for " 11574 "issuing SLI_FUNCTION_RESET mailbox " 11575 "command\n"); 11576 return -ENOMEM; 11577 } 11578 11579 /* Setup PCI function reset mailbox-ioctl command */ 11580 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11581 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11582 LPFC_SLI4_MBX_EMBED); 11583 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11584 shdr = (union lpfc_sli4_cfg_shdr *) 11585 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11586 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11587 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11588 &shdr->response); 11589 mempool_free(mboxq, phba->mbox_mem_pool); 11590 if (shdr_status || shdr_add_status || rc) { 11591 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11592 "0495 SLI_FUNCTION_RESET mailbox " 11593 "failed with status x%x add_status x%x," 11594 " mbx status x%x\n", 11595 shdr_status, shdr_add_status, rc); 11596 rc = -ENXIO; 11597 } 11598 break; 11599 case LPFC_SLI_INTF_IF_TYPE_2: 11600 case LPFC_SLI_INTF_IF_TYPE_6: 11601 wait: 11602 /* 11603 * Poll the Port Status Register and wait for RDY for 11604 * up to 30 seconds. If the port doesn't respond, treat 11605 * it as an error. 11606 */ 11607 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11608 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11609 STATUSregaddr, ®_data.word0)) { 11610 rc = -ENODEV; 11611 goto out; 11612 } 11613 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11614 break; 11615 msleep(20); 11616 } 11617 11618 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11619 phba->work_status[0] = readl( 11620 phba->sli4_hba.u.if_type2.ERR1regaddr); 11621 phba->work_status[1] = readl( 11622 phba->sli4_hba.u.if_type2.ERR2regaddr); 11623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11624 "2890 Port not ready, port status reg " 11625 "0x%x error 1=0x%x, error 2=0x%x\n", 11626 reg_data.word0, 11627 phba->work_status[0], 11628 phba->work_status[1]); 11629 rc = -ENODEV; 11630 goto out; 11631 } 11632 11633 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11634 lpfc_pldv_detect = true; 11635 11636 if (!port_reset) { 11637 /* 11638 * Reset the port now 11639 */ 11640 reg_data.word0 = 0; 11641 bf_set(lpfc_sliport_ctrl_end, ®_data, 11642 LPFC_SLIPORT_LITTLE_ENDIAN); 11643 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11644 LPFC_SLIPORT_INIT_PORT); 11645 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11646 CTRLregaddr); 11647 /* flush */ 11648 pci_read_config_word(phba->pcidev, 11649 PCI_DEVICE_ID, &devid); 11650 11651 port_reset = 1; 11652 msleep(20); 11653 goto wait; 11654 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11655 rc = -ENODEV; 11656 goto out; 11657 } 11658 break; 11659 11660 case LPFC_SLI_INTF_IF_TYPE_1: 11661 default: 11662 break; 11663 } 11664 11665 out: 11666 /* Catch the not-ready port failure after a port reset. */ 11667 if (rc) { 11668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11669 "3317 HBA not functional: IP Reset Failed " 11670 "try: echo fw_reset > board_mode\n"); 11671 rc = -ENODEV; 11672 } 11673 11674 return rc; 11675 } 11676 11677 /** 11678 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11679 * @phba: pointer to lpfc hba data structure. 11680 * 11681 * This routine is invoked to set up the PCI device memory space for device 11682 * with SLI-4 interface spec. 11683 * 11684 * Return codes 11685 * 0 - successful 11686 * other values - error 11687 **/ 11688 static int 11689 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11690 { 11691 struct pci_dev *pdev = phba->pcidev; 11692 unsigned long bar0map_len, bar1map_len, bar2map_len; 11693 int error; 11694 uint32_t if_type; 11695 11696 if (!pdev) 11697 return -ENODEV; 11698 11699 /* Set the device DMA mask size */ 11700 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11701 if (error) 11702 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11703 if (error) 11704 return error; 11705 11706 /* 11707 * The BARs and register set definitions and offset locations are 11708 * dependent on the if_type. 11709 */ 11710 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11711 &phba->sli4_hba.sli_intf.word0)) { 11712 return -ENODEV; 11713 } 11714 11715 /* There is no SLI3 failback for SLI4 devices. */ 11716 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11717 LPFC_SLI_INTF_VALID) { 11718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11719 "2894 SLI_INTF reg contents invalid " 11720 "sli_intf reg 0x%x\n", 11721 phba->sli4_hba.sli_intf.word0); 11722 return -ENODEV; 11723 } 11724 11725 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11726 /* 11727 * Get the bus address of SLI4 device Bar regions and the 11728 * number of bytes required by each mapping. The mapping of the 11729 * particular PCI BARs regions is dependent on the type of 11730 * SLI4 device. 11731 */ 11732 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11733 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11734 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11735 11736 /* 11737 * Map SLI4 PCI Config Space Register base to a kernel virtual 11738 * addr 11739 */ 11740 phba->sli4_hba.conf_regs_memmap_p = 11741 ioremap(phba->pci_bar0_map, bar0map_len); 11742 if (!phba->sli4_hba.conf_regs_memmap_p) { 11743 dev_printk(KERN_ERR, &pdev->dev, 11744 "ioremap failed for SLI4 PCI config " 11745 "registers.\n"); 11746 return -ENODEV; 11747 } 11748 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11749 /* Set up BAR0 PCI config space register memory map */ 11750 lpfc_sli4_bar0_register_memmap(phba, if_type); 11751 } else { 11752 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11753 bar0map_len = pci_resource_len(pdev, 1); 11754 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11755 dev_printk(KERN_ERR, &pdev->dev, 11756 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11757 return -ENODEV; 11758 } 11759 phba->sli4_hba.conf_regs_memmap_p = 11760 ioremap(phba->pci_bar0_map, bar0map_len); 11761 if (!phba->sli4_hba.conf_regs_memmap_p) { 11762 dev_printk(KERN_ERR, &pdev->dev, 11763 "ioremap failed for SLI4 PCI config " 11764 "registers.\n"); 11765 return -ENODEV; 11766 } 11767 lpfc_sli4_bar0_register_memmap(phba, if_type); 11768 } 11769 11770 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11771 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11772 /* 11773 * Map SLI4 if type 0 HBA Control Register base to a 11774 * kernel virtual address and setup the registers. 11775 */ 11776 phba->pci_bar1_map = pci_resource_start(pdev, 11777 PCI_64BIT_BAR2); 11778 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11779 phba->sli4_hba.ctrl_regs_memmap_p = 11780 ioremap(phba->pci_bar1_map, 11781 bar1map_len); 11782 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11783 dev_err(&pdev->dev, 11784 "ioremap failed for SLI4 HBA " 11785 "control registers.\n"); 11786 error = -ENOMEM; 11787 goto out_iounmap_conf; 11788 } 11789 phba->pci_bar2_memmap_p = 11790 phba->sli4_hba.ctrl_regs_memmap_p; 11791 lpfc_sli4_bar1_register_memmap(phba, if_type); 11792 } else { 11793 error = -ENOMEM; 11794 goto out_iounmap_conf; 11795 } 11796 } 11797 11798 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11799 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11800 /* 11801 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11802 * virtual address and setup the registers. 11803 */ 11804 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11805 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11806 phba->sli4_hba.drbl_regs_memmap_p = 11807 ioremap(phba->pci_bar1_map, bar1map_len); 11808 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11809 dev_err(&pdev->dev, 11810 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11811 error = -ENOMEM; 11812 goto out_iounmap_conf; 11813 } 11814 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11815 lpfc_sli4_bar1_register_memmap(phba, if_type); 11816 } 11817 11818 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11819 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11820 /* 11821 * Map SLI4 if type 0 HBA Doorbell Register base to 11822 * a kernel virtual address and setup the registers. 11823 */ 11824 phba->pci_bar2_map = pci_resource_start(pdev, 11825 PCI_64BIT_BAR4); 11826 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11827 phba->sli4_hba.drbl_regs_memmap_p = 11828 ioremap(phba->pci_bar2_map, 11829 bar2map_len); 11830 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11831 dev_err(&pdev->dev, 11832 "ioremap failed for SLI4 HBA" 11833 " doorbell registers.\n"); 11834 error = -ENOMEM; 11835 goto out_iounmap_ctrl; 11836 } 11837 phba->pci_bar4_memmap_p = 11838 phba->sli4_hba.drbl_regs_memmap_p; 11839 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11840 if (error) 11841 goto out_iounmap_all; 11842 } else { 11843 error = -ENOMEM; 11844 goto out_iounmap_all; 11845 } 11846 } 11847 11848 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11849 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11850 /* 11851 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11852 * virtual address and setup the registers. 11853 */ 11854 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11855 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11856 phba->sli4_hba.dpp_regs_memmap_p = 11857 ioremap(phba->pci_bar2_map, bar2map_len); 11858 if (!phba->sli4_hba.dpp_regs_memmap_p) { 11859 dev_err(&pdev->dev, 11860 "ioremap failed for SLI4 HBA dpp registers.\n"); 11861 error = -ENOMEM; 11862 goto out_iounmap_ctrl; 11863 } 11864 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 11865 } 11866 11867 /* Set up the EQ/CQ register handeling functions now */ 11868 switch (if_type) { 11869 case LPFC_SLI_INTF_IF_TYPE_0: 11870 case LPFC_SLI_INTF_IF_TYPE_2: 11871 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 11872 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 11873 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 11874 break; 11875 case LPFC_SLI_INTF_IF_TYPE_6: 11876 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 11877 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 11878 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 11879 break; 11880 default: 11881 break; 11882 } 11883 11884 return 0; 11885 11886 out_iounmap_all: 11887 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11888 out_iounmap_ctrl: 11889 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11890 out_iounmap_conf: 11891 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11892 11893 return error; 11894 } 11895 11896 /** 11897 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 11898 * @phba: pointer to lpfc hba data structure. 11899 * 11900 * This routine is invoked to unset the PCI device memory space for device 11901 * with SLI-4 interface spec. 11902 **/ 11903 static void 11904 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 11905 { 11906 uint32_t if_type; 11907 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11908 11909 switch (if_type) { 11910 case LPFC_SLI_INTF_IF_TYPE_0: 11911 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11912 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11913 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11914 break; 11915 case LPFC_SLI_INTF_IF_TYPE_2: 11916 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11917 break; 11918 case LPFC_SLI_INTF_IF_TYPE_6: 11919 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11920 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11921 if (phba->sli4_hba.dpp_regs_memmap_p) 11922 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 11923 break; 11924 case LPFC_SLI_INTF_IF_TYPE_1: 11925 default: 11926 dev_printk(KERN_ERR, &phba->pcidev->dev, 11927 "FATAL - unsupported SLI4 interface type - %d\n", 11928 if_type); 11929 break; 11930 } 11931 } 11932 11933 /** 11934 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 11935 * @phba: pointer to lpfc hba data structure. 11936 * 11937 * This routine is invoked to enable the MSI-X interrupt vectors to device 11938 * with SLI-3 interface specs. 11939 * 11940 * Return codes 11941 * 0 - successful 11942 * other values - error 11943 **/ 11944 static int 11945 lpfc_sli_enable_msix(struct lpfc_hba *phba) 11946 { 11947 int rc; 11948 LPFC_MBOXQ_t *pmb; 11949 11950 /* Set up MSI-X multi-message vectors */ 11951 rc = pci_alloc_irq_vectors(phba->pcidev, 11952 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 11953 if (rc < 0) { 11954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11955 "0420 PCI enable MSI-X failed (%d)\n", rc); 11956 goto vec_fail_out; 11957 } 11958 11959 /* 11960 * Assign MSI-X vectors to interrupt handlers 11961 */ 11962 11963 /* vector-0 is associated to slow-path handler */ 11964 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 11965 &lpfc_sli_sp_intr_handler, 0, 11966 LPFC_SP_DRIVER_HANDLER_NAME, phba); 11967 if (rc) { 11968 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11969 "0421 MSI-X slow-path request_irq failed " 11970 "(%d)\n", rc); 11971 goto msi_fail_out; 11972 } 11973 11974 /* vector-1 is associated to fast-path handler */ 11975 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 11976 &lpfc_sli_fp_intr_handler, 0, 11977 LPFC_FP_DRIVER_HANDLER_NAME, phba); 11978 11979 if (rc) { 11980 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11981 "0429 MSI-X fast-path request_irq failed " 11982 "(%d)\n", rc); 11983 goto irq_fail_out; 11984 } 11985 11986 /* 11987 * Configure HBA MSI-X attention conditions to messages 11988 */ 11989 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11990 11991 if (!pmb) { 11992 rc = -ENOMEM; 11993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11994 "0474 Unable to allocate memory for issuing " 11995 "MBOX_CONFIG_MSI command\n"); 11996 goto mem_fail_out; 11997 } 11998 rc = lpfc_config_msi(phba, pmb); 11999 if (rc) 12000 goto mbx_fail_out; 12001 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12002 if (rc != MBX_SUCCESS) { 12003 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 12004 "0351 Config MSI mailbox command failed, " 12005 "mbxCmd x%x, mbxStatus x%x\n", 12006 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 12007 goto mbx_fail_out; 12008 } 12009 12010 /* Free memory allocated for mailbox command */ 12011 mempool_free(pmb, phba->mbox_mem_pool); 12012 return rc; 12013 12014 mbx_fail_out: 12015 /* Free memory allocated for mailbox command */ 12016 mempool_free(pmb, phba->mbox_mem_pool); 12017 12018 mem_fail_out: 12019 /* free the irq already requested */ 12020 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 12021 12022 irq_fail_out: 12023 /* free the irq already requested */ 12024 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 12025 12026 msi_fail_out: 12027 /* Unconfigure MSI-X capability structure */ 12028 pci_free_irq_vectors(phba->pcidev); 12029 12030 vec_fail_out: 12031 return rc; 12032 } 12033 12034 /** 12035 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 12036 * @phba: pointer to lpfc hba data structure. 12037 * 12038 * This routine is invoked to enable the MSI interrupt mode to device with 12039 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 12040 * enable the MSI vector. The device driver is responsible for calling the 12041 * request_irq() to register MSI vector with a interrupt the handler, which 12042 * is done in this function. 12043 * 12044 * Return codes 12045 * 0 - successful 12046 * other values - error 12047 */ 12048 static int 12049 lpfc_sli_enable_msi(struct lpfc_hba *phba) 12050 { 12051 int rc; 12052 12053 rc = pci_enable_msi(phba->pcidev); 12054 if (!rc) 12055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12056 "0462 PCI enable MSI mode success.\n"); 12057 else { 12058 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12059 "0471 PCI enable MSI mode failed (%d)\n", rc); 12060 return rc; 12061 } 12062 12063 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12064 0, LPFC_DRIVER_NAME, phba); 12065 if (rc) { 12066 pci_disable_msi(phba->pcidev); 12067 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12068 "0478 MSI request_irq failed (%d)\n", rc); 12069 } 12070 return rc; 12071 } 12072 12073 /** 12074 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12075 * @phba: pointer to lpfc hba data structure. 12076 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12077 * 12078 * This routine is invoked to enable device interrupt and associate driver's 12079 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12080 * spec. Depends on the interrupt mode configured to the driver, the driver 12081 * will try to fallback from the configured interrupt mode to an interrupt 12082 * mode which is supported by the platform, kernel, and device in the order 12083 * of: 12084 * MSI-X -> MSI -> IRQ. 12085 * 12086 * Return codes 12087 * 0 - successful 12088 * other values - error 12089 **/ 12090 static uint32_t 12091 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12092 { 12093 uint32_t intr_mode = LPFC_INTR_ERROR; 12094 int retval; 12095 12096 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12097 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12098 if (retval) 12099 return intr_mode; 12100 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12101 12102 if (cfg_mode == 2) { 12103 /* Now, try to enable MSI-X interrupt mode */ 12104 retval = lpfc_sli_enable_msix(phba); 12105 if (!retval) { 12106 /* Indicate initialization to MSI-X mode */ 12107 phba->intr_type = MSIX; 12108 intr_mode = 2; 12109 } 12110 } 12111 12112 /* Fallback to MSI if MSI-X initialization failed */ 12113 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12114 retval = lpfc_sli_enable_msi(phba); 12115 if (!retval) { 12116 /* Indicate initialization to MSI mode */ 12117 phba->intr_type = MSI; 12118 intr_mode = 1; 12119 } 12120 } 12121 12122 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12123 if (phba->intr_type == NONE) { 12124 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12125 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12126 if (!retval) { 12127 /* Indicate initialization to INTx mode */ 12128 phba->intr_type = INTx; 12129 intr_mode = 0; 12130 } 12131 } 12132 return intr_mode; 12133 } 12134 12135 /** 12136 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12137 * @phba: pointer to lpfc hba data structure. 12138 * 12139 * This routine is invoked to disable device interrupt and disassociate the 12140 * driver's interrupt handler(s) from interrupt vector(s) to device with 12141 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12142 * release the interrupt vector(s) for the message signaled interrupt. 12143 **/ 12144 static void 12145 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12146 { 12147 int nr_irqs, i; 12148 12149 if (phba->intr_type == MSIX) 12150 nr_irqs = LPFC_MSIX_VECTORS; 12151 else 12152 nr_irqs = 1; 12153 12154 for (i = 0; i < nr_irqs; i++) 12155 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12156 pci_free_irq_vectors(phba->pcidev); 12157 12158 /* Reset interrupt management states */ 12159 phba->intr_type = NONE; 12160 phba->sli.slistat.sli_intr = 0; 12161 } 12162 12163 /** 12164 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12165 * @phba: pointer to lpfc hba data structure. 12166 * @id: EQ vector index or Hardware Queue index 12167 * @match: LPFC_FIND_BY_EQ = match by EQ 12168 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12169 * Return the CPU that matches the selection criteria 12170 */ 12171 static uint16_t 12172 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12173 { 12174 struct lpfc_vector_map_info *cpup; 12175 int cpu; 12176 12177 /* Loop through all CPUs */ 12178 for_each_present_cpu(cpu) { 12179 cpup = &phba->sli4_hba.cpu_map[cpu]; 12180 12181 /* If we are matching by EQ, there may be multiple CPUs using 12182 * using the same vector, so select the one with 12183 * LPFC_CPU_FIRST_IRQ set. 12184 */ 12185 if ((match == LPFC_FIND_BY_EQ) && 12186 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12187 (cpup->eq == id)) 12188 return cpu; 12189 12190 /* If matching by HDWQ, select the first CPU that matches */ 12191 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12192 return cpu; 12193 } 12194 return 0; 12195 } 12196 12197 #ifdef CONFIG_X86 12198 /** 12199 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12200 * @phba: pointer to lpfc hba data structure. 12201 * @cpu: CPU map index 12202 * @phys_id: CPU package physical id 12203 * @core_id: CPU core id 12204 */ 12205 static int 12206 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12207 uint16_t phys_id, uint16_t core_id) 12208 { 12209 struct lpfc_vector_map_info *cpup; 12210 int idx; 12211 12212 for_each_present_cpu(idx) { 12213 cpup = &phba->sli4_hba.cpu_map[idx]; 12214 /* Does the cpup match the one we are looking for */ 12215 if ((cpup->phys_id == phys_id) && 12216 (cpup->core_id == core_id) && 12217 (cpu != idx)) 12218 return 1; 12219 } 12220 return 0; 12221 } 12222 #endif 12223 12224 /* 12225 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12226 * @phba: pointer to lpfc hba data structure. 12227 * @eqidx: index for eq and irq vector 12228 * @flag: flags to set for vector_map structure 12229 * @cpu: cpu used to index vector_map structure 12230 * 12231 * The routine assigns eq info into vector_map structure 12232 */ 12233 static inline void 12234 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12235 unsigned int cpu) 12236 { 12237 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12238 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12239 12240 cpup->eq = eqidx; 12241 cpup->flag |= flag; 12242 12243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12244 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12245 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12246 } 12247 12248 /** 12249 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12250 * @phba: pointer to lpfc hba data structure. 12251 * 12252 * The routine initializes the cpu_map array structure 12253 */ 12254 static void 12255 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12256 { 12257 struct lpfc_vector_map_info *cpup; 12258 struct lpfc_eq_intr_info *eqi; 12259 int cpu; 12260 12261 for_each_possible_cpu(cpu) { 12262 cpup = &phba->sli4_hba.cpu_map[cpu]; 12263 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12264 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12265 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12266 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12267 cpup->flag = 0; 12268 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12269 INIT_LIST_HEAD(&eqi->list); 12270 eqi->icnt = 0; 12271 } 12272 } 12273 12274 /** 12275 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12276 * @phba: pointer to lpfc hba data structure. 12277 * 12278 * The routine initializes the hba_eq_hdl array structure 12279 */ 12280 static void 12281 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12282 { 12283 struct lpfc_hba_eq_hdl *eqhdl; 12284 int i; 12285 12286 for (i = 0; i < phba->cfg_irq_chann; i++) { 12287 eqhdl = lpfc_get_eq_hdl(i); 12288 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 12289 eqhdl->phba = phba; 12290 } 12291 } 12292 12293 /** 12294 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12295 * @phba: pointer to lpfc hba data structure. 12296 * @vectors: number of msix vectors allocated. 12297 * 12298 * The routine will figure out the CPU affinity assignment for every 12299 * MSI-X vector allocated for the HBA. 12300 * In addition, the CPU to IO channel mapping will be calculated 12301 * and the phba->sli4_hba.cpu_map array will reflect this. 12302 */ 12303 static void 12304 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12305 { 12306 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12307 int max_phys_id, min_phys_id; 12308 int max_core_id, min_core_id; 12309 struct lpfc_vector_map_info *cpup; 12310 struct lpfc_vector_map_info *new_cpup; 12311 #ifdef CONFIG_X86 12312 struct cpuinfo_x86 *cpuinfo; 12313 #endif 12314 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12315 struct lpfc_hdwq_stat *c_stat; 12316 #endif 12317 12318 max_phys_id = 0; 12319 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12320 max_core_id = 0; 12321 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12322 12323 /* Update CPU map with physical id and core id of each CPU */ 12324 for_each_present_cpu(cpu) { 12325 cpup = &phba->sli4_hba.cpu_map[cpu]; 12326 #ifdef CONFIG_X86 12327 cpuinfo = &cpu_data(cpu); 12328 cpup->phys_id = cpuinfo->phys_proc_id; 12329 cpup->core_id = cpuinfo->cpu_core_id; 12330 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12331 cpup->flag |= LPFC_CPU_MAP_HYPER; 12332 #else 12333 /* No distinction between CPUs for other platforms */ 12334 cpup->phys_id = 0; 12335 cpup->core_id = cpu; 12336 #endif 12337 12338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12339 "3328 CPU %d physid %d coreid %d flag x%x\n", 12340 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12341 12342 if (cpup->phys_id > max_phys_id) 12343 max_phys_id = cpup->phys_id; 12344 if (cpup->phys_id < min_phys_id) 12345 min_phys_id = cpup->phys_id; 12346 12347 if (cpup->core_id > max_core_id) 12348 max_core_id = cpup->core_id; 12349 if (cpup->core_id < min_core_id) 12350 min_core_id = cpup->core_id; 12351 } 12352 12353 /* After looking at each irq vector assigned to this pcidev, its 12354 * possible to see that not ALL CPUs have been accounted for. 12355 * Next we will set any unassigned (unaffinitized) cpu map 12356 * entries to a IRQ on the same phys_id. 12357 */ 12358 first_cpu = cpumask_first(cpu_present_mask); 12359 start_cpu = first_cpu; 12360 12361 for_each_present_cpu(cpu) { 12362 cpup = &phba->sli4_hba.cpu_map[cpu]; 12363 12364 /* Is this CPU entry unassigned */ 12365 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12366 /* Mark CPU as IRQ not assigned by the kernel */ 12367 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12368 12369 /* If so, find a new_cpup thats on the the SAME 12370 * phys_id as cpup. start_cpu will start where we 12371 * left off so all unassigned entries don't get assgined 12372 * the IRQ of the first entry. 12373 */ 12374 new_cpu = start_cpu; 12375 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12376 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12377 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12378 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12379 (new_cpup->phys_id == cpup->phys_id)) 12380 goto found_same; 12381 new_cpu = cpumask_next( 12382 new_cpu, cpu_present_mask); 12383 if (new_cpu == nr_cpumask_bits) 12384 new_cpu = first_cpu; 12385 } 12386 /* At this point, we leave the CPU as unassigned */ 12387 continue; 12388 found_same: 12389 /* We found a matching phys_id, so copy the IRQ info */ 12390 cpup->eq = new_cpup->eq; 12391 12392 /* Bump start_cpu to the next slot to minmize the 12393 * chance of having multiple unassigned CPU entries 12394 * selecting the same IRQ. 12395 */ 12396 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12397 if (start_cpu == nr_cpumask_bits) 12398 start_cpu = first_cpu; 12399 12400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12401 "3337 Set Affinity: CPU %d " 12402 "eq %d from peer cpu %d same " 12403 "phys_id (%d)\n", 12404 cpu, cpup->eq, new_cpu, 12405 cpup->phys_id); 12406 } 12407 } 12408 12409 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12410 start_cpu = first_cpu; 12411 12412 for_each_present_cpu(cpu) { 12413 cpup = &phba->sli4_hba.cpu_map[cpu]; 12414 12415 /* Is this entry unassigned */ 12416 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12417 /* Mark it as IRQ not assigned by the kernel */ 12418 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12419 12420 /* If so, find a new_cpup thats on ANY phys_id 12421 * as the cpup. start_cpu will start where we 12422 * left off so all unassigned entries don't get 12423 * assigned the IRQ of the first entry. 12424 */ 12425 new_cpu = start_cpu; 12426 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12427 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12428 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12429 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12430 goto found_any; 12431 new_cpu = cpumask_next( 12432 new_cpu, cpu_present_mask); 12433 if (new_cpu == nr_cpumask_bits) 12434 new_cpu = first_cpu; 12435 } 12436 /* We should never leave an entry unassigned */ 12437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12438 "3339 Set Affinity: CPU %d " 12439 "eq %d UNASSIGNED\n", 12440 cpup->hdwq, cpup->eq); 12441 continue; 12442 found_any: 12443 /* We found an available entry, copy the IRQ info */ 12444 cpup->eq = new_cpup->eq; 12445 12446 /* Bump start_cpu to the next slot to minmize the 12447 * chance of having multiple unassigned CPU entries 12448 * selecting the same IRQ. 12449 */ 12450 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12451 if (start_cpu == nr_cpumask_bits) 12452 start_cpu = first_cpu; 12453 12454 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12455 "3338 Set Affinity: CPU %d " 12456 "eq %d from peer cpu %d (%d/%d)\n", 12457 cpu, cpup->eq, new_cpu, 12458 new_cpup->phys_id, new_cpup->core_id); 12459 } 12460 } 12461 12462 /* Assign hdwq indices that are unique across all cpus in the map 12463 * that are also FIRST_CPUs. 12464 */ 12465 idx = 0; 12466 for_each_present_cpu(cpu) { 12467 cpup = &phba->sli4_hba.cpu_map[cpu]; 12468 12469 /* Only FIRST IRQs get a hdwq index assignment. */ 12470 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12471 continue; 12472 12473 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12474 cpup->hdwq = idx; 12475 idx++; 12476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12477 "3333 Set Affinity: CPU %d (phys %d core %d): " 12478 "hdwq %d eq %d flg x%x\n", 12479 cpu, cpup->phys_id, cpup->core_id, 12480 cpup->hdwq, cpup->eq, cpup->flag); 12481 } 12482 /* Associate a hdwq with each cpu_map entry 12483 * This will be 1 to 1 - hdwq to cpu, unless there are less 12484 * hardware queues then CPUs. For that case we will just round-robin 12485 * the available hardware queues as they get assigned to CPUs. 12486 * The next_idx is the idx from the FIRST_CPU loop above to account 12487 * for irq_chann < hdwq. The idx is used for round-robin assignments 12488 * and needs to start at 0. 12489 */ 12490 next_idx = idx; 12491 start_cpu = 0; 12492 idx = 0; 12493 for_each_present_cpu(cpu) { 12494 cpup = &phba->sli4_hba.cpu_map[cpu]; 12495 12496 /* FIRST cpus are already mapped. */ 12497 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12498 continue; 12499 12500 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12501 * of the unassigned cpus to the next idx so that all 12502 * hdw queues are fully utilized. 12503 */ 12504 if (next_idx < phba->cfg_hdw_queue) { 12505 cpup->hdwq = next_idx; 12506 next_idx++; 12507 continue; 12508 } 12509 12510 /* Not a First CPU and all hdw_queues are used. Reuse a 12511 * Hardware Queue for another CPU, so be smart about it 12512 * and pick one that has its IRQ/EQ mapped to the same phys_id 12513 * (CPU package) and core_id. 12514 */ 12515 new_cpu = start_cpu; 12516 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12517 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12518 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12519 new_cpup->phys_id == cpup->phys_id && 12520 new_cpup->core_id == cpup->core_id) { 12521 goto found_hdwq; 12522 } 12523 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12524 if (new_cpu == nr_cpumask_bits) 12525 new_cpu = first_cpu; 12526 } 12527 12528 /* If we can't match both phys_id and core_id, 12529 * settle for just a phys_id match. 12530 */ 12531 new_cpu = start_cpu; 12532 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12533 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12534 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12535 new_cpup->phys_id == cpup->phys_id) 12536 goto found_hdwq; 12537 12538 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12539 if (new_cpu == nr_cpumask_bits) 12540 new_cpu = first_cpu; 12541 } 12542 12543 /* Otherwise just round robin on cfg_hdw_queue */ 12544 cpup->hdwq = idx % phba->cfg_hdw_queue; 12545 idx++; 12546 goto logit; 12547 found_hdwq: 12548 /* We found an available entry, copy the IRQ info */ 12549 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12550 if (start_cpu == nr_cpumask_bits) 12551 start_cpu = first_cpu; 12552 cpup->hdwq = new_cpup->hdwq; 12553 logit: 12554 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12555 "3335 Set Affinity: CPU %d (phys %d core %d): " 12556 "hdwq %d eq %d flg x%x\n", 12557 cpu, cpup->phys_id, cpup->core_id, 12558 cpup->hdwq, cpup->eq, cpup->flag); 12559 } 12560 12561 /* 12562 * Initialize the cpu_map slots for not-present cpus in case 12563 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12564 */ 12565 idx = 0; 12566 for_each_possible_cpu(cpu) { 12567 cpup = &phba->sli4_hba.cpu_map[cpu]; 12568 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12569 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12570 c_stat->hdwq_no = cpup->hdwq; 12571 #endif 12572 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12573 continue; 12574 12575 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12576 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12577 c_stat->hdwq_no = cpup->hdwq; 12578 #endif 12579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12580 "3340 Set Affinity: not present " 12581 "CPU %d hdwq %d\n", 12582 cpu, cpup->hdwq); 12583 } 12584 12585 /* The cpu_map array will be used later during initialization 12586 * when EQ / CQ / WQs are allocated and configured. 12587 */ 12588 return; 12589 } 12590 12591 /** 12592 * lpfc_cpuhp_get_eq 12593 * 12594 * @phba: pointer to lpfc hba data structure. 12595 * @cpu: cpu going offline 12596 * @eqlist: eq list to append to 12597 */ 12598 static int 12599 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12600 struct list_head *eqlist) 12601 { 12602 const struct cpumask *maskp; 12603 struct lpfc_queue *eq; 12604 struct cpumask *tmp; 12605 u16 idx; 12606 12607 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12608 if (!tmp) 12609 return -ENOMEM; 12610 12611 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12612 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12613 if (!maskp) 12614 continue; 12615 /* 12616 * if irq is not affinitized to the cpu going 12617 * then we don't need to poll the eq attached 12618 * to it. 12619 */ 12620 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12621 continue; 12622 /* get the cpus that are online and are affini- 12623 * tized to this irq vector. If the count is 12624 * more than 1 then cpuhp is not going to shut- 12625 * down this vector. Since this cpu has not 12626 * gone offline yet, we need >1. 12627 */ 12628 cpumask_and(tmp, maskp, cpu_online_mask); 12629 if (cpumask_weight(tmp) > 1) 12630 continue; 12631 12632 /* Now that we have an irq to shutdown, get the eq 12633 * mapped to this irq. Note: multiple hdwq's in 12634 * the software can share an eq, but eventually 12635 * only eq will be mapped to this vector 12636 */ 12637 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12638 list_add(&eq->_poll_list, eqlist); 12639 } 12640 kfree(tmp); 12641 return 0; 12642 } 12643 12644 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12645 { 12646 if (phba->sli_rev != LPFC_SLI_REV4) 12647 return; 12648 12649 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12650 &phba->cpuhp); 12651 /* 12652 * unregistering the instance doesn't stop the polling 12653 * timer. Wait for the poll timer to retire. 12654 */ 12655 synchronize_rcu(); 12656 del_timer_sync(&phba->cpuhp_poll_timer); 12657 } 12658 12659 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12660 { 12661 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 12662 return; 12663 12664 __lpfc_cpuhp_remove(phba); 12665 } 12666 12667 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12668 { 12669 if (phba->sli_rev != LPFC_SLI_REV4) 12670 return; 12671 12672 rcu_read_lock(); 12673 12674 if (!list_empty(&phba->poll_list)) 12675 mod_timer(&phba->cpuhp_poll_timer, 12676 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12677 12678 rcu_read_unlock(); 12679 12680 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12681 &phba->cpuhp); 12682 } 12683 12684 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12685 { 12686 if (phba->pport->load_flag & FC_UNLOADING) { 12687 *retval = -EAGAIN; 12688 return true; 12689 } 12690 12691 if (phba->sli_rev != LPFC_SLI_REV4) { 12692 *retval = 0; 12693 return true; 12694 } 12695 12696 /* proceed with the hotplug */ 12697 return false; 12698 } 12699 12700 /** 12701 * lpfc_irq_set_aff - set IRQ affinity 12702 * @eqhdl: EQ handle 12703 * @cpu: cpu to set affinity 12704 * 12705 **/ 12706 static inline void 12707 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12708 { 12709 cpumask_clear(&eqhdl->aff_mask); 12710 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12711 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12712 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 12713 } 12714 12715 /** 12716 * lpfc_irq_clear_aff - clear IRQ affinity 12717 * @eqhdl: EQ handle 12718 * 12719 **/ 12720 static inline void 12721 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12722 { 12723 cpumask_clear(&eqhdl->aff_mask); 12724 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12725 } 12726 12727 /** 12728 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12729 * @phba: pointer to HBA context object. 12730 * @cpu: cpu going offline/online 12731 * @offline: true, cpu is going offline. false, cpu is coming online. 12732 * 12733 * If cpu is going offline, we'll try our best effort to find the next 12734 * online cpu on the phba's original_mask and migrate all offlining IRQ 12735 * affinities. 12736 * 12737 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12738 * 12739 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12740 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12741 * 12742 **/ 12743 static void 12744 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12745 { 12746 struct lpfc_vector_map_info *cpup; 12747 struct cpumask *aff_mask; 12748 unsigned int cpu_select, cpu_next, idx; 12749 const struct cpumask *orig_mask; 12750 12751 if (phba->irq_chann_mode == NORMAL_MODE) 12752 return; 12753 12754 orig_mask = &phba->sli4_hba.irq_aff_mask; 12755 12756 if (!cpumask_test_cpu(cpu, orig_mask)) 12757 return; 12758 12759 cpup = &phba->sli4_hba.cpu_map[cpu]; 12760 12761 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12762 return; 12763 12764 if (offline) { 12765 /* Find next online CPU on original mask */ 12766 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12767 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12768 12769 /* Found a valid CPU */ 12770 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12771 /* Go through each eqhdl and ensure offlining 12772 * cpu aff_mask is migrated 12773 */ 12774 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12775 aff_mask = lpfc_get_aff_mask(idx); 12776 12777 /* Migrate affinity */ 12778 if (cpumask_test_cpu(cpu, aff_mask)) 12779 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12780 cpu_select); 12781 } 12782 } else { 12783 /* Rely on irqbalance if no online CPUs left on NUMA */ 12784 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12785 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12786 } 12787 } else { 12788 /* Migrate affinity back to this CPU */ 12789 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12790 } 12791 } 12792 12793 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12794 { 12795 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12796 struct lpfc_queue *eq, *next; 12797 LIST_HEAD(eqlist); 12798 int retval; 12799 12800 if (!phba) { 12801 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12802 return 0; 12803 } 12804 12805 if (__lpfc_cpuhp_checks(phba, &retval)) 12806 return retval; 12807 12808 lpfc_irq_rebalance(phba, cpu, true); 12809 12810 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12811 if (retval) 12812 return retval; 12813 12814 /* start polling on these eq's */ 12815 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12816 list_del_init(&eq->_poll_list); 12817 lpfc_sli4_start_polling(eq); 12818 } 12819 12820 return 0; 12821 } 12822 12823 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12824 { 12825 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12826 struct lpfc_queue *eq, *next; 12827 unsigned int n; 12828 int retval; 12829 12830 if (!phba) { 12831 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12832 return 0; 12833 } 12834 12835 if (__lpfc_cpuhp_checks(phba, &retval)) 12836 return retval; 12837 12838 lpfc_irq_rebalance(phba, cpu, false); 12839 12840 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12841 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12842 if (n == cpu) 12843 lpfc_sli4_stop_polling(eq); 12844 } 12845 12846 return 0; 12847 } 12848 12849 /** 12850 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12851 * @phba: pointer to lpfc hba data structure. 12852 * 12853 * This routine is invoked to enable the MSI-X interrupt vectors to device 12854 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12855 * to cpus on the system. 12856 * 12857 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12858 * the number of cpus on the same numa node as this adapter. The vectors are 12859 * allocated without requesting OS affinity mapping. A vector will be 12860 * allocated and assigned to each online and offline cpu. If the cpu is 12861 * online, then affinity will be set to that cpu. If the cpu is offline, then 12862 * affinity will be set to the nearest peer cpu within the numa node that is 12863 * online. If there are no online cpus within the numa node, affinity is not 12864 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12865 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12866 * configured. 12867 * 12868 * If numa mode is not enabled and there is more than 1 vector allocated, then 12869 * the driver relies on the managed irq interface where the OS assigns vector to 12870 * cpu affinity. The driver will then use that affinity mapping to setup its 12871 * cpu mapping table. 12872 * 12873 * Return codes 12874 * 0 - successful 12875 * other values - error 12876 **/ 12877 static int 12878 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 12879 { 12880 int vectors, rc, index; 12881 char *name; 12882 const struct cpumask *aff_mask = NULL; 12883 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 12884 struct lpfc_vector_map_info *cpup; 12885 struct lpfc_hba_eq_hdl *eqhdl; 12886 const struct cpumask *maskp; 12887 unsigned int flags = PCI_IRQ_MSIX; 12888 12889 /* Set up MSI-X multi-message vectors */ 12890 vectors = phba->cfg_irq_chann; 12891 12892 if (phba->irq_chann_mode != NORMAL_MODE) 12893 aff_mask = &phba->sli4_hba.irq_aff_mask; 12894 12895 if (aff_mask) { 12896 cpu_cnt = cpumask_weight(aff_mask); 12897 vectors = min(phba->cfg_irq_chann, cpu_cnt); 12898 12899 /* cpu: iterates over aff_mask including offline or online 12900 * cpu_select: iterates over online aff_mask to set affinity 12901 */ 12902 cpu = cpumask_first(aff_mask); 12903 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12904 } else { 12905 flags |= PCI_IRQ_AFFINITY; 12906 } 12907 12908 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 12909 if (rc < 0) { 12910 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12911 "0484 PCI enable MSI-X failed (%d)\n", rc); 12912 goto vec_fail_out; 12913 } 12914 vectors = rc; 12915 12916 /* Assign MSI-X vectors to interrupt handlers */ 12917 for (index = 0; index < vectors; index++) { 12918 eqhdl = lpfc_get_eq_hdl(index); 12919 name = eqhdl->handler_name; 12920 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 12921 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 12922 LPFC_DRIVER_HANDLER_NAME"%d", index); 12923 12924 eqhdl->idx = index; 12925 rc = request_irq(pci_irq_vector(phba->pcidev, index), 12926 &lpfc_sli4_hba_intr_handler, 0, 12927 name, eqhdl); 12928 if (rc) { 12929 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12930 "0486 MSI-X fast-path (%d) " 12931 "request_irq failed (%d)\n", index, rc); 12932 goto cfg_fail_out; 12933 } 12934 12935 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 12936 12937 if (aff_mask) { 12938 /* If found a neighboring online cpu, set affinity */ 12939 if (cpu_select < nr_cpu_ids) 12940 lpfc_irq_set_aff(eqhdl, cpu_select); 12941 12942 /* Assign EQ to cpu_map */ 12943 lpfc_assign_eq_map_info(phba, index, 12944 LPFC_CPU_FIRST_IRQ, 12945 cpu); 12946 12947 /* Iterate to next offline or online cpu in aff_mask */ 12948 cpu = cpumask_next(cpu, aff_mask); 12949 12950 /* Find next online cpu in aff_mask to set affinity */ 12951 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12952 } else if (vectors == 1) { 12953 cpu = cpumask_first(cpu_present_mask); 12954 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 12955 cpu); 12956 } else { 12957 maskp = pci_irq_get_affinity(phba->pcidev, index); 12958 12959 /* Loop through all CPUs associated with vector index */ 12960 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 12961 cpup = &phba->sli4_hba.cpu_map[cpu]; 12962 12963 /* If this is the first CPU thats assigned to 12964 * this vector, set LPFC_CPU_FIRST_IRQ. 12965 * 12966 * With certain platforms its possible that irq 12967 * vectors are affinitized to all the cpu's. 12968 * This can result in each cpu_map.eq to be set 12969 * to the last vector, resulting in overwrite 12970 * of all the previous cpu_map.eq. Ensure that 12971 * each vector receives a place in cpu_map. 12972 * Later call to lpfc_cpu_affinity_check will 12973 * ensure we are nicely balanced out. 12974 */ 12975 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 12976 continue; 12977 lpfc_assign_eq_map_info(phba, index, 12978 LPFC_CPU_FIRST_IRQ, 12979 cpu); 12980 break; 12981 } 12982 } 12983 } 12984 12985 if (vectors != phba->cfg_irq_chann) { 12986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12987 "3238 Reducing IO channels to match number of " 12988 "MSI-X vectors, requested %d got %d\n", 12989 phba->cfg_irq_chann, vectors); 12990 if (phba->cfg_irq_chann > vectors) 12991 phba->cfg_irq_chann = vectors; 12992 } 12993 12994 return rc; 12995 12996 cfg_fail_out: 12997 /* free the irq already requested */ 12998 for (--index; index >= 0; index--) { 12999 eqhdl = lpfc_get_eq_hdl(index); 13000 lpfc_irq_clear_aff(eqhdl); 13001 irq_set_affinity_hint(eqhdl->irq, NULL); 13002 free_irq(eqhdl->irq, eqhdl); 13003 } 13004 13005 /* Unconfigure MSI-X capability structure */ 13006 pci_free_irq_vectors(phba->pcidev); 13007 13008 vec_fail_out: 13009 return rc; 13010 } 13011 13012 /** 13013 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 13014 * @phba: pointer to lpfc hba data structure. 13015 * 13016 * This routine is invoked to enable the MSI interrupt mode to device with 13017 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 13018 * called to enable the MSI vector. The device driver is responsible for 13019 * calling the request_irq() to register MSI vector with a interrupt the 13020 * handler, which is done in this function. 13021 * 13022 * Return codes 13023 * 0 - successful 13024 * other values - error 13025 **/ 13026 static int 13027 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 13028 { 13029 int rc, index; 13030 unsigned int cpu; 13031 struct lpfc_hba_eq_hdl *eqhdl; 13032 13033 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 13034 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 13035 if (rc > 0) 13036 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13037 "0487 PCI enable MSI mode success.\n"); 13038 else { 13039 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13040 "0488 PCI enable MSI mode failed (%d)\n", rc); 13041 return rc ? rc : -1; 13042 } 13043 13044 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13045 0, LPFC_DRIVER_NAME, phba); 13046 if (rc) { 13047 pci_free_irq_vectors(phba->pcidev); 13048 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13049 "0490 MSI request_irq failed (%d)\n", rc); 13050 return rc; 13051 } 13052 13053 eqhdl = lpfc_get_eq_hdl(0); 13054 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13055 13056 cpu = cpumask_first(cpu_present_mask); 13057 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13058 13059 for (index = 0; index < phba->cfg_irq_chann; index++) { 13060 eqhdl = lpfc_get_eq_hdl(index); 13061 eqhdl->idx = index; 13062 } 13063 13064 return 0; 13065 } 13066 13067 /** 13068 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13069 * @phba: pointer to lpfc hba data structure. 13070 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13071 * 13072 * This routine is invoked to enable device interrupt and associate driver's 13073 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13074 * interface spec. Depends on the interrupt mode configured to the driver, 13075 * the driver will try to fallback from the configured interrupt mode to an 13076 * interrupt mode which is supported by the platform, kernel, and device in 13077 * the order of: 13078 * MSI-X -> MSI -> IRQ. 13079 * 13080 * Return codes 13081 * 0 - successful 13082 * other values - error 13083 **/ 13084 static uint32_t 13085 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13086 { 13087 uint32_t intr_mode = LPFC_INTR_ERROR; 13088 int retval, idx; 13089 13090 if (cfg_mode == 2) { 13091 /* Preparation before conf_msi mbox cmd */ 13092 retval = 0; 13093 if (!retval) { 13094 /* Now, try to enable MSI-X interrupt mode */ 13095 retval = lpfc_sli4_enable_msix(phba); 13096 if (!retval) { 13097 /* Indicate initialization to MSI-X mode */ 13098 phba->intr_type = MSIX; 13099 intr_mode = 2; 13100 } 13101 } 13102 } 13103 13104 /* Fallback to MSI if MSI-X initialization failed */ 13105 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13106 retval = lpfc_sli4_enable_msi(phba); 13107 if (!retval) { 13108 /* Indicate initialization to MSI mode */ 13109 phba->intr_type = MSI; 13110 intr_mode = 1; 13111 } 13112 } 13113 13114 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13115 if (phba->intr_type == NONE) { 13116 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13117 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13118 if (!retval) { 13119 struct lpfc_hba_eq_hdl *eqhdl; 13120 unsigned int cpu; 13121 13122 /* Indicate initialization to INTx mode */ 13123 phba->intr_type = INTx; 13124 intr_mode = 0; 13125 13126 eqhdl = lpfc_get_eq_hdl(0); 13127 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13128 13129 cpu = cpumask_first(cpu_present_mask); 13130 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13131 cpu); 13132 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13133 eqhdl = lpfc_get_eq_hdl(idx); 13134 eqhdl->idx = idx; 13135 } 13136 } 13137 } 13138 return intr_mode; 13139 } 13140 13141 /** 13142 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13143 * @phba: pointer to lpfc hba data structure. 13144 * 13145 * This routine is invoked to disable device interrupt and disassociate 13146 * the driver's interrupt handler(s) from interrupt vector(s) to device 13147 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13148 * will release the interrupt vector(s) for the message signaled interrupt. 13149 **/ 13150 static void 13151 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13152 { 13153 /* Disable the currently initialized interrupt mode */ 13154 if (phba->intr_type == MSIX) { 13155 int index; 13156 struct lpfc_hba_eq_hdl *eqhdl; 13157 13158 /* Free up MSI-X multi-message vectors */ 13159 for (index = 0; index < phba->cfg_irq_chann; index++) { 13160 eqhdl = lpfc_get_eq_hdl(index); 13161 lpfc_irq_clear_aff(eqhdl); 13162 irq_set_affinity_hint(eqhdl->irq, NULL); 13163 free_irq(eqhdl->irq, eqhdl); 13164 } 13165 } else { 13166 free_irq(phba->pcidev->irq, phba); 13167 } 13168 13169 pci_free_irq_vectors(phba->pcidev); 13170 13171 /* Reset interrupt management states */ 13172 phba->intr_type = NONE; 13173 phba->sli.slistat.sli_intr = 0; 13174 } 13175 13176 /** 13177 * lpfc_unset_hba - Unset SLI3 hba device initialization 13178 * @phba: pointer to lpfc hba data structure. 13179 * 13180 * This routine is invoked to unset the HBA device initialization steps to 13181 * a device with SLI-3 interface spec. 13182 **/ 13183 static void 13184 lpfc_unset_hba(struct lpfc_hba *phba) 13185 { 13186 struct lpfc_vport *vport = phba->pport; 13187 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13188 13189 spin_lock_irq(shost->host_lock); 13190 vport->load_flag |= FC_UNLOADING; 13191 spin_unlock_irq(shost->host_lock); 13192 13193 kfree(phba->vpi_bmask); 13194 kfree(phba->vpi_ids); 13195 13196 lpfc_stop_hba_timers(phba); 13197 13198 phba->pport->work_port_events = 0; 13199 13200 lpfc_sli_hba_down(phba); 13201 13202 lpfc_sli_brdrestart(phba); 13203 13204 lpfc_sli_disable_intr(phba); 13205 13206 return; 13207 } 13208 13209 /** 13210 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13211 * @phba: Pointer to HBA context object. 13212 * 13213 * This function is called in the SLI4 code path to wait for completion 13214 * of device's XRIs exchange busy. It will check the XRI exchange busy 13215 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13216 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13217 * I/Os every 30 seconds, log error message, and wait forever. Only when 13218 * all XRI exchange busy complete, the driver unload shall proceed with 13219 * invoking the function reset ioctl mailbox command to the CNA and the 13220 * the rest of the driver unload resource release. 13221 **/ 13222 static void 13223 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13224 { 13225 struct lpfc_sli4_hdw_queue *qp; 13226 int idx, ccnt; 13227 int wait_time = 0; 13228 int io_xri_cmpl = 1; 13229 int nvmet_xri_cmpl = 1; 13230 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13231 13232 /* Driver just aborted IOs during the hba_unset process. Pause 13233 * here to give the HBA time to complete the IO and get entries 13234 * into the abts lists. 13235 */ 13236 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13237 13238 /* Wait for NVME pending IO to flush back to transport. */ 13239 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13240 lpfc_nvme_wait_for_io_drain(phba); 13241 13242 ccnt = 0; 13243 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13244 qp = &phba->sli4_hba.hdwq[idx]; 13245 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13246 if (!io_xri_cmpl) /* if list is NOT empty */ 13247 ccnt++; 13248 } 13249 if (ccnt) 13250 io_xri_cmpl = 0; 13251 13252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13253 nvmet_xri_cmpl = 13254 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13255 } 13256 13257 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13258 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13259 if (!nvmet_xri_cmpl) 13260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13261 "6424 NVMET XRI exchange busy " 13262 "wait time: %d seconds.\n", 13263 wait_time/1000); 13264 if (!io_xri_cmpl) 13265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13266 "6100 IO XRI exchange busy " 13267 "wait time: %d seconds.\n", 13268 wait_time/1000); 13269 if (!els_xri_cmpl) 13270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13271 "2878 ELS XRI exchange busy " 13272 "wait time: %d seconds.\n", 13273 wait_time/1000); 13274 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13275 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13276 } else { 13277 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13278 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13279 } 13280 13281 ccnt = 0; 13282 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13283 qp = &phba->sli4_hba.hdwq[idx]; 13284 io_xri_cmpl = list_empty( 13285 &qp->lpfc_abts_io_buf_list); 13286 if (!io_xri_cmpl) /* if list is NOT empty */ 13287 ccnt++; 13288 } 13289 if (ccnt) 13290 io_xri_cmpl = 0; 13291 13292 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13293 nvmet_xri_cmpl = list_empty( 13294 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13295 } 13296 els_xri_cmpl = 13297 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13298 13299 } 13300 } 13301 13302 /** 13303 * lpfc_sli4_hba_unset - Unset the fcoe hba 13304 * @phba: Pointer to HBA context object. 13305 * 13306 * This function is called in the SLI4 code path to reset the HBA's FCoE 13307 * function. The caller is not required to hold any lock. This routine 13308 * issues PCI function reset mailbox command to reset the FCoE function. 13309 * At the end of the function, it calls lpfc_hba_down_post function to 13310 * free any pending commands. 13311 **/ 13312 static void 13313 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13314 { 13315 int wait_cnt = 0; 13316 LPFC_MBOXQ_t *mboxq; 13317 struct pci_dev *pdev = phba->pcidev; 13318 13319 lpfc_stop_hba_timers(phba); 13320 hrtimer_cancel(&phba->cmf_timer); 13321 13322 if (phba->pport) 13323 phba->sli4_hba.intr_enable = 0; 13324 13325 /* 13326 * Gracefully wait out the potential current outstanding asynchronous 13327 * mailbox command. 13328 */ 13329 13330 /* First, block any pending async mailbox command from posted */ 13331 spin_lock_irq(&phba->hbalock); 13332 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13333 spin_unlock_irq(&phba->hbalock); 13334 /* Now, trying to wait it out if we can */ 13335 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13336 msleep(10); 13337 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13338 break; 13339 } 13340 /* Forcefully release the outstanding mailbox command if timed out */ 13341 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13342 spin_lock_irq(&phba->hbalock); 13343 mboxq = phba->sli.mbox_active; 13344 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13345 __lpfc_mbox_cmpl_put(phba, mboxq); 13346 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13347 phba->sli.mbox_active = NULL; 13348 spin_unlock_irq(&phba->hbalock); 13349 } 13350 13351 /* Abort all iocbs associated with the hba */ 13352 lpfc_sli_hba_iocb_abort(phba); 13353 13354 /* Wait for completion of device XRI exchange busy */ 13355 lpfc_sli4_xri_exchange_busy_wait(phba); 13356 13357 /* per-phba callback de-registration for hotplug event */ 13358 if (phba->pport) 13359 lpfc_cpuhp_remove(phba); 13360 13361 /* Disable PCI subsystem interrupt */ 13362 lpfc_sli4_disable_intr(phba); 13363 13364 /* Disable SR-IOV if enabled */ 13365 if (phba->cfg_sriov_nr_virtfn) 13366 pci_disable_sriov(pdev); 13367 13368 /* Stop kthread signal shall trigger work_done one more time */ 13369 kthread_stop(phba->worker_thread); 13370 13371 /* Disable FW logging to host memory */ 13372 lpfc_ras_stop_fwlog(phba); 13373 13374 /* Unset the queues shared with the hardware then release all 13375 * allocated resources. 13376 */ 13377 lpfc_sli4_queue_unset(phba); 13378 lpfc_sli4_queue_destroy(phba); 13379 13380 /* Reset SLI4 HBA FCoE function */ 13381 lpfc_pci_function_reset(phba); 13382 13383 /* Free RAS DMA memory */ 13384 if (phba->ras_fwlog.ras_enabled) 13385 lpfc_sli4_ras_dma_free(phba); 13386 13387 /* Stop the SLI4 device port */ 13388 if (phba->pport) 13389 phba->pport->work_port_events = 0; 13390 } 13391 13392 static uint32_t 13393 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13394 { 13395 uint32_t msb = 0; 13396 uint32_t bit; 13397 13398 for (bit = 0; bit < 8; bit++) { 13399 msb = (crc >> 31) & 1; 13400 crc <<= 1; 13401 13402 if (msb ^ (byte & 1)) { 13403 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13404 crc |= 1; 13405 } 13406 byte >>= 1; 13407 } 13408 return crc; 13409 } 13410 13411 static uint32_t 13412 lpfc_cgn_reverse_bits(uint32_t wd) 13413 { 13414 uint32_t result = 0; 13415 uint32_t i; 13416 13417 for (i = 0; i < 32; i++) { 13418 result <<= 1; 13419 result |= (1 & (wd >> i)); 13420 } 13421 return result; 13422 } 13423 13424 /* 13425 * The routine corresponds with the algorithm the HBA firmware 13426 * uses to validate the data integrity. 13427 */ 13428 uint32_t 13429 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13430 { 13431 uint32_t i; 13432 uint32_t result; 13433 uint8_t *data = (uint8_t *)ptr; 13434 13435 for (i = 0; i < byteLen; ++i) 13436 crc = lpfc_cgn_crc32(crc, data[i]); 13437 13438 result = ~lpfc_cgn_reverse_bits(crc); 13439 return result; 13440 } 13441 13442 void 13443 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13444 { 13445 struct lpfc_cgn_info *cp; 13446 struct timespec64 cmpl_time; 13447 struct tm broken; 13448 uint16_t size; 13449 uint32_t crc; 13450 13451 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13452 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13453 13454 if (!phba->cgn_i) 13455 return; 13456 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13457 13458 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13459 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13460 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13461 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13462 13463 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13464 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13465 atomic64_set(&phba->cgn_latency_evt, 0); 13466 phba->cgn_evt_minute = 0; 13467 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; 13468 13469 memset(cp, 0xff, LPFC_CGN_DATA_SIZE); 13470 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13471 cp->cgn_info_version = LPFC_CGN_INFO_V3; 13472 13473 /* cgn parameters */ 13474 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13475 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13476 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13477 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13478 13479 ktime_get_real_ts64(&cmpl_time); 13480 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13481 13482 cp->cgn_info_month = broken.tm_mon + 1; 13483 cp->cgn_info_day = broken.tm_mday; 13484 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ 13485 cp->cgn_info_hour = broken.tm_hour; 13486 cp->cgn_info_minute = broken.tm_min; 13487 cp->cgn_info_second = broken.tm_sec; 13488 13489 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13490 "2643 CGNInfo Init: Start Time " 13491 "%d/%d/%d %d:%d:%d\n", 13492 cp->cgn_info_day, cp->cgn_info_month, 13493 cp->cgn_info_year, cp->cgn_info_hour, 13494 cp->cgn_info_minute, cp->cgn_info_second); 13495 13496 /* Fill in default LUN qdepth */ 13497 if (phba->pport) { 13498 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13499 cp->cgn_lunq = cpu_to_le16(size); 13500 } 13501 13502 /* last used Index initialized to 0xff already */ 13503 13504 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13505 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13506 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13507 cp->cgn_info_crc = cpu_to_le32(crc); 13508 13509 phba->cgn_evt_timestamp = jiffies + 13510 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13511 } 13512 13513 void 13514 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13515 { 13516 struct lpfc_cgn_info *cp; 13517 struct timespec64 cmpl_time; 13518 struct tm broken; 13519 uint32_t crc; 13520 13521 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13522 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13523 13524 if (!phba->cgn_i) 13525 return; 13526 13527 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13528 memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE); 13529 13530 ktime_get_real_ts64(&cmpl_time); 13531 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13532 13533 cp->cgn_stat_month = broken.tm_mon + 1; 13534 cp->cgn_stat_day = broken.tm_mday; 13535 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ 13536 cp->cgn_stat_hour = broken.tm_hour; 13537 cp->cgn_stat_minute = broken.tm_min; 13538 13539 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13540 "2647 CGNstat Init: Start Time " 13541 "%d/%d/%d %d:%d\n", 13542 cp->cgn_stat_day, cp->cgn_stat_month, 13543 cp->cgn_stat_year, cp->cgn_stat_hour, 13544 cp->cgn_stat_minute); 13545 13546 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13547 cp->cgn_info_crc = cpu_to_le32(crc); 13548 } 13549 13550 /** 13551 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13552 * @phba: Pointer to hba context object. 13553 * @reg: flag to determine register or unregister. 13554 */ 13555 static int 13556 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13557 { 13558 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13559 union lpfc_sli4_cfg_shdr *shdr; 13560 uint32_t shdr_status, shdr_add_status; 13561 LPFC_MBOXQ_t *mboxq; 13562 int length, rc; 13563 13564 if (!phba->cgn_i) 13565 return -ENXIO; 13566 13567 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13568 if (!mboxq) { 13569 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13570 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13571 "HBA state x%x reg %d\n", 13572 phba->pport->port_state, reg); 13573 return -ENOMEM; 13574 } 13575 13576 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13577 sizeof(struct lpfc_sli4_cfg_mhdr)); 13578 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13579 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13580 LPFC_SLI4_MBX_EMBED); 13581 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13582 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13583 if (reg > 0) 13584 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13585 else 13586 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13587 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13588 reg_congestion_buf->addr_lo = 13589 putPaddrLow(phba->cgn_i->phys); 13590 reg_congestion_buf->addr_hi = 13591 putPaddrHigh(phba->cgn_i->phys); 13592 13593 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13594 shdr = (union lpfc_sli4_cfg_shdr *) 13595 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13596 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13598 &shdr->response); 13599 mempool_free(mboxq, phba->mbox_mem_pool); 13600 if (shdr_status || shdr_add_status || rc) { 13601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13602 "2642 REG_CONGESTION_BUF mailbox " 13603 "failed with status x%x add_status x%x," 13604 " mbx status x%x reg %d\n", 13605 shdr_status, shdr_add_status, rc, reg); 13606 return -ENXIO; 13607 } 13608 return 0; 13609 } 13610 13611 int 13612 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13613 { 13614 lpfc_cmf_stop(phba); 13615 return __lpfc_reg_congestion_buf(phba, 0); 13616 } 13617 13618 int 13619 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13620 { 13621 return __lpfc_reg_congestion_buf(phba, 1); 13622 } 13623 13624 /** 13625 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13626 * @phba: Pointer to HBA context object. 13627 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13628 * 13629 * This function is called in the SLI4 code path to read the port's 13630 * sli4 capabilities. 13631 * 13632 * This function may be be called from any context that can block-wait 13633 * for the completion. The expectation is that this routine is called 13634 * typically from probe_one or from the online routine. 13635 **/ 13636 int 13637 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13638 { 13639 int rc; 13640 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13641 struct lpfc_pc_sli4_params *sli4_params; 13642 uint32_t mbox_tmo; 13643 int length; 13644 bool exp_wqcq_pages = true; 13645 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13646 13647 /* 13648 * By default, the driver assumes the SLI4 port requires RPI 13649 * header postings. The SLI4_PARAM response will correct this 13650 * assumption. 13651 */ 13652 phba->sli4_hba.rpi_hdrs_in_use = 1; 13653 13654 /* Read the port's SLI4 Config Parameters */ 13655 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13656 sizeof(struct lpfc_sli4_cfg_mhdr)); 13657 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13658 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13659 length, LPFC_SLI4_MBX_EMBED); 13660 if (!phba->sli4_hba.intr_enable) 13661 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13662 else { 13663 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13664 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13665 } 13666 if (unlikely(rc)) 13667 return rc; 13668 sli4_params = &phba->sli4_hba.pc_sli4_params; 13669 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13670 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13671 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13672 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13673 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13674 mbx_sli4_parameters); 13675 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13676 mbx_sli4_parameters); 13677 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13678 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13679 else 13680 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13681 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13682 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13683 mbx_sli4_parameters); 13684 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13685 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13686 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13687 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13688 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13689 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13690 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13691 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13692 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13693 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13694 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13695 mbx_sli4_parameters); 13696 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13697 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13698 mbx_sli4_parameters); 13699 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13700 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13701 13702 /* Check for Extended Pre-Registered SGL support */ 13703 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13704 13705 /* Check for firmware nvme support */ 13706 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13707 bf_get(cfg_xib, mbx_sli4_parameters)); 13708 13709 if (rc) { 13710 /* Save this to indicate the Firmware supports NVME */ 13711 sli4_params->nvme = 1; 13712 13713 /* Firmware NVME support, check driver FC4 NVME support */ 13714 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13715 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13716 "6133 Disabling NVME support: " 13717 "FC4 type not supported: x%x\n", 13718 phba->cfg_enable_fc4_type); 13719 goto fcponly; 13720 } 13721 } else { 13722 /* No firmware NVME support, check driver FC4 NVME support */ 13723 sli4_params->nvme = 0; 13724 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13726 "6101 Disabling NVME support: Not " 13727 "supported by firmware (%d %d) x%x\n", 13728 bf_get(cfg_nvme, mbx_sli4_parameters), 13729 bf_get(cfg_xib, mbx_sli4_parameters), 13730 phba->cfg_enable_fc4_type); 13731 fcponly: 13732 phba->nvmet_support = 0; 13733 phba->cfg_nvmet_mrq = 0; 13734 phba->cfg_nvme_seg_cnt = 0; 13735 13736 /* If no FC4 type support, move to just SCSI support */ 13737 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13738 return -ENODEV; 13739 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13740 } 13741 } 13742 13743 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13744 * accommodate 512K and 1M IOs in a single nvme buf. 13745 */ 13746 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13747 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13748 13749 /* Enable embedded Payload BDE if support is indicated */ 13750 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13751 phba->cfg_enable_pbde = 1; 13752 else 13753 phba->cfg_enable_pbde = 0; 13754 13755 /* 13756 * To support Suppress Response feature we must satisfy 3 conditions. 13757 * lpfc_suppress_rsp module parameter must be set (default). 13758 * In SLI4-Parameters Descriptor: 13759 * Extended Inline Buffers (XIB) must be supported. 13760 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13761 * (double negative). 13762 */ 13763 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13764 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13765 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13766 else 13767 phba->cfg_suppress_rsp = 0; 13768 13769 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13770 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13771 13772 /* Make sure that sge_supp_len can be handled by the driver */ 13773 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13774 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13775 13776 /* 13777 * Check whether the adapter supports an embedded copy of the 13778 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13779 * to use this option, 128-byte WQEs must be used. 13780 */ 13781 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13782 phba->fcp_embed_io = 1; 13783 else 13784 phba->fcp_embed_io = 0; 13785 13786 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13787 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13788 bf_get(cfg_xib, mbx_sli4_parameters), 13789 phba->cfg_enable_pbde, 13790 phba->fcp_embed_io, sli4_params->nvme, 13791 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13792 13793 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13794 LPFC_SLI_INTF_IF_TYPE_2) && 13795 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13796 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13797 exp_wqcq_pages = false; 13798 13799 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13800 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13801 exp_wqcq_pages && 13802 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13803 phba->enab_exp_wqcq_pages = 1; 13804 else 13805 phba->enab_exp_wqcq_pages = 0; 13806 /* 13807 * Check if the SLI port supports MDS Diagnostics 13808 */ 13809 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13810 phba->mds_diags_support = 1; 13811 else 13812 phba->mds_diags_support = 0; 13813 13814 /* 13815 * Check if the SLI port supports NSLER 13816 */ 13817 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13818 phba->nsler = 1; 13819 else 13820 phba->nsler = 0; 13821 13822 return 0; 13823 } 13824 13825 /** 13826 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13827 * @pdev: pointer to PCI device 13828 * @pid: pointer to PCI device identifier 13829 * 13830 * This routine is to be called to attach a device with SLI-3 interface spec 13831 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13832 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13833 * information of the device and driver to see if the driver state that it can 13834 * support this kind of device. If the match is successful, the driver core 13835 * invokes this routine. If this routine determines it can claim the HBA, it 13836 * does all the initialization that it needs to do to handle the HBA properly. 13837 * 13838 * Return code 13839 * 0 - driver can claim the device 13840 * negative value - driver can not claim the device 13841 **/ 13842 static int 13843 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13844 { 13845 struct lpfc_hba *phba; 13846 struct lpfc_vport *vport = NULL; 13847 struct Scsi_Host *shost = NULL; 13848 int error; 13849 uint32_t cfg_mode, intr_mode; 13850 13851 /* Allocate memory for HBA structure */ 13852 phba = lpfc_hba_alloc(pdev); 13853 if (!phba) 13854 return -ENOMEM; 13855 13856 /* Perform generic PCI device enabling operation */ 13857 error = lpfc_enable_pci_dev(phba); 13858 if (error) 13859 goto out_free_phba; 13860 13861 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 13862 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 13863 if (error) 13864 goto out_disable_pci_dev; 13865 13866 /* Set up SLI-3 specific device PCI memory space */ 13867 error = lpfc_sli_pci_mem_setup(phba); 13868 if (error) { 13869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13870 "1402 Failed to set up pci memory space.\n"); 13871 goto out_disable_pci_dev; 13872 } 13873 13874 /* Set up SLI-3 specific device driver resources */ 13875 error = lpfc_sli_driver_resource_setup(phba); 13876 if (error) { 13877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13878 "1404 Failed to set up driver resource.\n"); 13879 goto out_unset_pci_mem_s3; 13880 } 13881 13882 /* Initialize and populate the iocb list per host */ 13883 13884 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 13885 if (error) { 13886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13887 "1405 Failed to initialize iocb list.\n"); 13888 goto out_unset_driver_resource_s3; 13889 } 13890 13891 /* Set up common device driver resources */ 13892 error = lpfc_setup_driver_resource_phase2(phba); 13893 if (error) { 13894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13895 "1406 Failed to set up driver resource.\n"); 13896 goto out_free_iocb_list; 13897 } 13898 13899 /* Get the default values for Model Name and Description */ 13900 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13901 13902 /* Create SCSI host to the physical port */ 13903 error = lpfc_create_shost(phba); 13904 if (error) { 13905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13906 "1407 Failed to create scsi host.\n"); 13907 goto out_unset_driver_resource; 13908 } 13909 13910 /* Configure sysfs attributes */ 13911 vport = phba->pport; 13912 error = lpfc_alloc_sysfs_attr(vport); 13913 if (error) { 13914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13915 "1476 Failed to allocate sysfs attr\n"); 13916 goto out_destroy_shost; 13917 } 13918 13919 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13920 /* Now, trying to enable interrupt and bring up the device */ 13921 cfg_mode = phba->cfg_use_msi; 13922 while (true) { 13923 /* Put device to a known state before enabling interrupt */ 13924 lpfc_stop_port(phba); 13925 /* Configure and enable interrupt */ 13926 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 13927 if (intr_mode == LPFC_INTR_ERROR) { 13928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13929 "0431 Failed to enable interrupt.\n"); 13930 error = -ENODEV; 13931 goto out_free_sysfs_attr; 13932 } 13933 /* SLI-3 HBA setup */ 13934 if (lpfc_sli_hba_setup(phba)) { 13935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13936 "1477 Failed to set up hba\n"); 13937 error = -ENODEV; 13938 goto out_remove_device; 13939 } 13940 13941 /* Wait 50ms for the interrupts of previous mailbox commands */ 13942 msleep(50); 13943 /* Check active interrupts on message signaled interrupts */ 13944 if (intr_mode == 0 || 13945 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 13946 /* Log the current active interrupt mode */ 13947 phba->intr_mode = intr_mode; 13948 lpfc_log_intr_mode(phba, intr_mode); 13949 break; 13950 } else { 13951 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13952 "0447 Configure interrupt mode (%d) " 13953 "failed active interrupt test.\n", 13954 intr_mode); 13955 /* Disable the current interrupt mode */ 13956 lpfc_sli_disable_intr(phba); 13957 /* Try next level of interrupt mode */ 13958 cfg_mode = --intr_mode; 13959 } 13960 } 13961 13962 /* Perform post initialization setup */ 13963 lpfc_post_init_setup(phba); 13964 13965 /* Check if there are static vports to be created. */ 13966 lpfc_create_static_vport(phba); 13967 13968 return 0; 13969 13970 out_remove_device: 13971 lpfc_unset_hba(phba); 13972 out_free_sysfs_attr: 13973 lpfc_free_sysfs_attr(vport); 13974 out_destroy_shost: 13975 lpfc_destroy_shost(phba); 13976 out_unset_driver_resource: 13977 lpfc_unset_driver_resource_phase2(phba); 13978 out_free_iocb_list: 13979 lpfc_free_iocb_list(phba); 13980 out_unset_driver_resource_s3: 13981 lpfc_sli_driver_resource_unset(phba); 13982 out_unset_pci_mem_s3: 13983 lpfc_sli_pci_mem_unset(phba); 13984 out_disable_pci_dev: 13985 lpfc_disable_pci_dev(phba); 13986 if (shost) 13987 scsi_host_put(shost); 13988 out_free_phba: 13989 lpfc_hba_free(phba); 13990 return error; 13991 } 13992 13993 /** 13994 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 13995 * @pdev: pointer to PCI device 13996 * 13997 * This routine is to be called to disattach a device with SLI-3 interface 13998 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13999 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14000 * device to be removed from the PCI subsystem properly. 14001 **/ 14002 static void 14003 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 14004 { 14005 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14006 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14007 struct lpfc_vport **vports; 14008 struct lpfc_hba *phba = vport->phba; 14009 int i; 14010 14011 spin_lock_irq(&phba->hbalock); 14012 vport->load_flag |= FC_UNLOADING; 14013 spin_unlock_irq(&phba->hbalock); 14014 14015 lpfc_free_sysfs_attr(vport); 14016 14017 /* Release all the vports against this physical port */ 14018 vports = lpfc_create_vport_work_array(phba); 14019 if (vports != NULL) 14020 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14021 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14022 continue; 14023 fc_vport_terminate(vports[i]->fc_vport); 14024 } 14025 lpfc_destroy_vport_work_array(phba, vports); 14026 14027 /* Remove FC host with the physical port */ 14028 fc_remove_host(shost); 14029 scsi_remove_host(shost); 14030 14031 /* Clean up all nodes, mailboxes and IOs. */ 14032 lpfc_cleanup(vport); 14033 14034 /* 14035 * Bring down the SLI Layer. This step disable all interrupts, 14036 * clears the rings, discards all mailbox commands, and resets 14037 * the HBA. 14038 */ 14039 14040 /* HBA interrupt will be disabled after this call */ 14041 lpfc_sli_hba_down(phba); 14042 /* Stop kthread signal shall trigger work_done one more time */ 14043 kthread_stop(phba->worker_thread); 14044 /* Final cleanup of txcmplq and reset the HBA */ 14045 lpfc_sli_brdrestart(phba); 14046 14047 kfree(phba->vpi_bmask); 14048 kfree(phba->vpi_ids); 14049 14050 lpfc_stop_hba_timers(phba); 14051 spin_lock_irq(&phba->port_list_lock); 14052 list_del_init(&vport->listentry); 14053 spin_unlock_irq(&phba->port_list_lock); 14054 14055 lpfc_debugfs_terminate(vport); 14056 14057 /* Disable SR-IOV if enabled */ 14058 if (phba->cfg_sriov_nr_virtfn) 14059 pci_disable_sriov(pdev); 14060 14061 /* Disable interrupt */ 14062 lpfc_sli_disable_intr(phba); 14063 14064 scsi_host_put(shost); 14065 14066 /* 14067 * Call scsi_free before mem_free since scsi bufs are released to their 14068 * corresponding pools here. 14069 */ 14070 lpfc_scsi_free(phba); 14071 lpfc_free_iocb_list(phba); 14072 14073 lpfc_mem_free_all(phba); 14074 14075 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14076 phba->hbqslimp.virt, phba->hbqslimp.phys); 14077 14078 /* Free resources associated with SLI2 interface */ 14079 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14080 phba->slim2p.virt, phba->slim2p.phys); 14081 14082 /* unmap adapter SLIM and Control Registers */ 14083 iounmap(phba->ctrl_regs_memmap_p); 14084 iounmap(phba->slim_memmap_p); 14085 14086 lpfc_hba_free(phba); 14087 14088 pci_release_mem_regions(pdev); 14089 pci_disable_device(pdev); 14090 } 14091 14092 /** 14093 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14094 * @dev_d: pointer to device 14095 * 14096 * This routine is to be called from the kernel's PCI subsystem to support 14097 * system Power Management (PM) to device with SLI-3 interface spec. When 14098 * PM invokes this method, it quiesces the device by stopping the driver's 14099 * worker thread for the device, turning off device's interrupt and DMA, 14100 * and bring the device offline. Note that as the driver implements the 14101 * minimum PM requirements to a power-aware driver's PM support for the 14102 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14103 * to the suspend() method call will be treated as SUSPEND and the driver will 14104 * fully reinitialize its device during resume() method call, the driver will 14105 * set device to PCI_D3hot state in PCI config space instead of setting it 14106 * according to the @msg provided by the PM. 14107 * 14108 * Return code 14109 * 0 - driver suspended the device 14110 * Error otherwise 14111 **/ 14112 static int __maybe_unused 14113 lpfc_pci_suspend_one_s3(struct device *dev_d) 14114 { 14115 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14116 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14117 14118 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14119 "0473 PCI device Power Management suspend.\n"); 14120 14121 /* Bring down the device */ 14122 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14123 lpfc_offline(phba); 14124 kthread_stop(phba->worker_thread); 14125 14126 /* Disable interrupt from device */ 14127 lpfc_sli_disable_intr(phba); 14128 14129 return 0; 14130 } 14131 14132 /** 14133 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14134 * @dev_d: pointer to device 14135 * 14136 * This routine is to be called from the kernel's PCI subsystem to support 14137 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14138 * invokes this method, it restores the device's PCI config space state and 14139 * fully reinitializes the device and brings it online. Note that as the 14140 * driver implements the minimum PM requirements to a power-aware driver's 14141 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14142 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14143 * driver will fully reinitialize its device during resume() method call, 14144 * the device will be set to PCI_D0 directly in PCI config space before 14145 * restoring the state. 14146 * 14147 * Return code 14148 * 0 - driver suspended the device 14149 * Error otherwise 14150 **/ 14151 static int __maybe_unused 14152 lpfc_pci_resume_one_s3(struct device *dev_d) 14153 { 14154 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14155 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14156 uint32_t intr_mode; 14157 int error; 14158 14159 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14160 "0452 PCI device Power Management resume.\n"); 14161 14162 /* Startup the kernel thread for this host adapter. */ 14163 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14164 "lpfc_worker_%d", phba->brd_no); 14165 if (IS_ERR(phba->worker_thread)) { 14166 error = PTR_ERR(phba->worker_thread); 14167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14168 "0434 PM resume failed to start worker " 14169 "thread: error=x%x.\n", error); 14170 return error; 14171 } 14172 14173 /* Init cpu_map array */ 14174 lpfc_cpu_map_array_init(phba); 14175 /* Init hba_eq_hdl array */ 14176 lpfc_hba_eq_hdl_array_init(phba); 14177 /* Configure and enable interrupt */ 14178 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14179 if (intr_mode == LPFC_INTR_ERROR) { 14180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14181 "0430 PM resume Failed to enable interrupt\n"); 14182 return -EIO; 14183 } else 14184 phba->intr_mode = intr_mode; 14185 14186 /* Restart HBA and bring it online */ 14187 lpfc_sli_brdrestart(phba); 14188 lpfc_online(phba); 14189 14190 /* Log the current active interrupt mode */ 14191 lpfc_log_intr_mode(phba, phba->intr_mode); 14192 14193 return 0; 14194 } 14195 14196 /** 14197 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14198 * @phba: pointer to lpfc hba data structure. 14199 * 14200 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14201 * aborts all the outstanding SCSI I/Os to the pci device. 14202 **/ 14203 static void 14204 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14205 { 14206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14207 "2723 PCI channel I/O abort preparing for recovery\n"); 14208 14209 /* 14210 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14211 * and let the SCSI mid-layer to retry them to recover. 14212 */ 14213 lpfc_sli_abort_fcp_rings(phba); 14214 } 14215 14216 /** 14217 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14218 * @phba: pointer to lpfc hba data structure. 14219 * 14220 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14221 * disables the device interrupt and pci device, and aborts the internal FCP 14222 * pending I/Os. 14223 **/ 14224 static void 14225 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14226 { 14227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14228 "2710 PCI channel disable preparing for reset\n"); 14229 14230 /* Block any management I/Os to the device */ 14231 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14232 14233 /* Block all SCSI devices' I/Os on the host */ 14234 lpfc_scsi_dev_block(phba); 14235 14236 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14237 lpfc_sli_flush_io_rings(phba); 14238 14239 /* stop all timers */ 14240 lpfc_stop_hba_timers(phba); 14241 14242 /* Disable interrupt and pci device */ 14243 lpfc_sli_disable_intr(phba); 14244 pci_disable_device(phba->pcidev); 14245 } 14246 14247 /** 14248 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14249 * @phba: pointer to lpfc hba data structure. 14250 * 14251 * This routine is called to prepare the SLI3 device for PCI slot permanently 14252 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14253 * pending I/Os. 14254 **/ 14255 static void 14256 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14257 { 14258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14259 "2711 PCI channel permanent disable for failure\n"); 14260 /* Block all SCSI devices' I/Os on the host */ 14261 lpfc_scsi_dev_block(phba); 14262 14263 /* stop all timers */ 14264 lpfc_stop_hba_timers(phba); 14265 14266 /* Clean up all driver's outstanding SCSI I/Os */ 14267 lpfc_sli_flush_io_rings(phba); 14268 } 14269 14270 /** 14271 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14272 * @pdev: pointer to PCI device. 14273 * @state: the current PCI connection state. 14274 * 14275 * This routine is called from the PCI subsystem for I/O error handling to 14276 * device with SLI-3 interface spec. This function is called by the PCI 14277 * subsystem after a PCI bus error affecting this device has been detected. 14278 * When this function is invoked, it will need to stop all the I/Os and 14279 * interrupt(s) to the device. Once that is done, it will return 14280 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14281 * as desired. 14282 * 14283 * Return codes 14284 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14285 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14286 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14287 **/ 14288 static pci_ers_result_t 14289 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14290 { 14291 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14292 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14293 14294 switch (state) { 14295 case pci_channel_io_normal: 14296 /* Non-fatal error, prepare for recovery */ 14297 lpfc_sli_prep_dev_for_recover(phba); 14298 return PCI_ERS_RESULT_CAN_RECOVER; 14299 case pci_channel_io_frozen: 14300 /* Fatal error, prepare for slot reset */ 14301 lpfc_sli_prep_dev_for_reset(phba); 14302 return PCI_ERS_RESULT_NEED_RESET; 14303 case pci_channel_io_perm_failure: 14304 /* Permanent failure, prepare for device down */ 14305 lpfc_sli_prep_dev_for_perm_failure(phba); 14306 return PCI_ERS_RESULT_DISCONNECT; 14307 default: 14308 /* Unknown state, prepare and request slot reset */ 14309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14310 "0472 Unknown PCI error state: x%x\n", state); 14311 lpfc_sli_prep_dev_for_reset(phba); 14312 return PCI_ERS_RESULT_NEED_RESET; 14313 } 14314 } 14315 14316 /** 14317 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14318 * @pdev: pointer to PCI device. 14319 * 14320 * This routine is called from the PCI subsystem for error handling to 14321 * device with SLI-3 interface spec. This is called after PCI bus has been 14322 * reset to restart the PCI card from scratch, as if from a cold-boot. 14323 * During the PCI subsystem error recovery, after driver returns 14324 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14325 * recovery and then call this routine before calling the .resume method 14326 * to recover the device. This function will initialize the HBA device, 14327 * enable the interrupt, but it will just put the HBA to offline state 14328 * without passing any I/O traffic. 14329 * 14330 * Return codes 14331 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14332 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14333 */ 14334 static pci_ers_result_t 14335 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14336 { 14337 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14338 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14339 struct lpfc_sli *psli = &phba->sli; 14340 uint32_t intr_mode; 14341 14342 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14343 if (pci_enable_device_mem(pdev)) { 14344 printk(KERN_ERR "lpfc: Cannot re-enable " 14345 "PCI device after reset.\n"); 14346 return PCI_ERS_RESULT_DISCONNECT; 14347 } 14348 14349 pci_restore_state(pdev); 14350 14351 /* 14352 * As the new kernel behavior of pci_restore_state() API call clears 14353 * device saved_state flag, need to save the restored state again. 14354 */ 14355 pci_save_state(pdev); 14356 14357 if (pdev->is_busmaster) 14358 pci_set_master(pdev); 14359 14360 spin_lock_irq(&phba->hbalock); 14361 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14362 spin_unlock_irq(&phba->hbalock); 14363 14364 /* Configure and enable interrupt */ 14365 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14366 if (intr_mode == LPFC_INTR_ERROR) { 14367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14368 "0427 Cannot re-enable interrupt after " 14369 "slot reset.\n"); 14370 return PCI_ERS_RESULT_DISCONNECT; 14371 } else 14372 phba->intr_mode = intr_mode; 14373 14374 /* Take device offline, it will perform cleanup */ 14375 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14376 lpfc_offline(phba); 14377 lpfc_sli_brdrestart(phba); 14378 14379 /* Log the current active interrupt mode */ 14380 lpfc_log_intr_mode(phba, phba->intr_mode); 14381 14382 return PCI_ERS_RESULT_RECOVERED; 14383 } 14384 14385 /** 14386 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14387 * @pdev: pointer to PCI device 14388 * 14389 * This routine is called from the PCI subsystem for error handling to device 14390 * with SLI-3 interface spec. It is called when kernel error recovery tells 14391 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14392 * error recovery. After this call, traffic can start to flow from this device 14393 * again. 14394 */ 14395 static void 14396 lpfc_io_resume_s3(struct pci_dev *pdev) 14397 { 14398 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14399 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14400 14401 /* Bring device online, it will be no-op for non-fatal error resume */ 14402 lpfc_online(phba); 14403 } 14404 14405 /** 14406 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14407 * @phba: pointer to lpfc hba data structure. 14408 * 14409 * returns the number of ELS/CT IOCBs to reserve 14410 **/ 14411 int 14412 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14413 { 14414 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14415 14416 if (phba->sli_rev == LPFC_SLI_REV4) { 14417 if (max_xri <= 100) 14418 return 10; 14419 else if (max_xri <= 256) 14420 return 25; 14421 else if (max_xri <= 512) 14422 return 50; 14423 else if (max_xri <= 1024) 14424 return 100; 14425 else if (max_xri <= 1536) 14426 return 150; 14427 else if (max_xri <= 2048) 14428 return 200; 14429 else 14430 return 250; 14431 } else 14432 return 0; 14433 } 14434 14435 /** 14436 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14437 * @phba: pointer to lpfc hba data structure. 14438 * 14439 * returns the number of ELS/CT + NVMET IOCBs to reserve 14440 **/ 14441 int 14442 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14443 { 14444 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14445 14446 if (phba->nvmet_support) 14447 max_xri += LPFC_NVMET_BUF_POST; 14448 return max_xri; 14449 } 14450 14451 14452 static int 14453 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14454 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14455 const struct firmware *fw) 14456 { 14457 int rc; 14458 u8 sli_family; 14459 14460 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14461 /* Three cases: (1) FW was not supported on the detected adapter. 14462 * (2) FW update has been locked out administratively. 14463 * (3) Some other error during FW update. 14464 * In each case, an unmaskable message is written to the console 14465 * for admin diagnosis. 14466 */ 14467 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14468 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14469 magic_number != MAGIC_NUMBER_G6) || 14470 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14471 magic_number != MAGIC_NUMBER_G7) || 14472 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14473 magic_number != MAGIC_NUMBER_G7P)) { 14474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14475 "3030 This firmware version is not supported on" 14476 " this HBA model. Device:%x Magic:%x Type:%x " 14477 "ID:%x Size %d %zd\n", 14478 phba->pcidev->device, magic_number, ftype, fid, 14479 fsize, fw->size); 14480 rc = -EINVAL; 14481 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14483 "3021 Firmware downloads have been prohibited " 14484 "by a system configuration setting on " 14485 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14486 "%zd\n", 14487 phba->pcidev->device, magic_number, ftype, fid, 14488 fsize, fw->size); 14489 rc = -EACCES; 14490 } else { 14491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14492 "3022 FW Download failed. Add Status x%x " 14493 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14494 "%zd\n", 14495 offset, phba->pcidev->device, magic_number, 14496 ftype, fid, fsize, fw->size); 14497 rc = -EIO; 14498 } 14499 return rc; 14500 } 14501 14502 /** 14503 * lpfc_write_firmware - attempt to write a firmware image to the port 14504 * @fw: pointer to firmware image returned from request_firmware. 14505 * @context: pointer to firmware image returned from request_firmware. 14506 * 14507 **/ 14508 static void 14509 lpfc_write_firmware(const struct firmware *fw, void *context) 14510 { 14511 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14512 char fwrev[FW_REV_STR_SIZE]; 14513 struct lpfc_grp_hdr *image; 14514 struct list_head dma_buffer_list; 14515 int i, rc = 0; 14516 struct lpfc_dmabuf *dmabuf, *next; 14517 uint32_t offset = 0, temp_offset = 0; 14518 uint32_t magic_number, ftype, fid, fsize; 14519 14520 /* It can be null in no-wait mode, sanity check */ 14521 if (!fw) { 14522 rc = -ENXIO; 14523 goto out; 14524 } 14525 image = (struct lpfc_grp_hdr *)fw->data; 14526 14527 magic_number = be32_to_cpu(image->magic_number); 14528 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14529 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14530 fsize = be32_to_cpu(image->size); 14531 14532 INIT_LIST_HEAD(&dma_buffer_list); 14533 lpfc_decode_firmware_rev(phba, fwrev, 1); 14534 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14535 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14536 "3023 Updating Firmware, Current Version:%s " 14537 "New Version:%s\n", 14538 fwrev, image->revision); 14539 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14540 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14541 GFP_KERNEL); 14542 if (!dmabuf) { 14543 rc = -ENOMEM; 14544 goto release_out; 14545 } 14546 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14547 SLI4_PAGE_SIZE, 14548 &dmabuf->phys, 14549 GFP_KERNEL); 14550 if (!dmabuf->virt) { 14551 kfree(dmabuf); 14552 rc = -ENOMEM; 14553 goto release_out; 14554 } 14555 list_add_tail(&dmabuf->list, &dma_buffer_list); 14556 } 14557 while (offset < fw->size) { 14558 temp_offset = offset; 14559 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14560 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14561 memcpy(dmabuf->virt, 14562 fw->data + temp_offset, 14563 fw->size - temp_offset); 14564 temp_offset = fw->size; 14565 break; 14566 } 14567 memcpy(dmabuf->virt, fw->data + temp_offset, 14568 SLI4_PAGE_SIZE); 14569 temp_offset += SLI4_PAGE_SIZE; 14570 } 14571 rc = lpfc_wr_object(phba, &dma_buffer_list, 14572 (fw->size - offset), &offset); 14573 if (rc) { 14574 rc = lpfc_log_write_firmware_error(phba, offset, 14575 magic_number, 14576 ftype, 14577 fid, 14578 fsize, 14579 fw); 14580 goto release_out; 14581 } 14582 } 14583 rc = offset; 14584 } else 14585 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14586 "3029 Skipped Firmware update, Current " 14587 "Version:%s New Version:%s\n", 14588 fwrev, image->revision); 14589 14590 release_out: 14591 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14592 list_del(&dmabuf->list); 14593 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14594 dmabuf->virt, dmabuf->phys); 14595 kfree(dmabuf); 14596 } 14597 release_firmware(fw); 14598 out: 14599 if (rc < 0) 14600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14601 "3062 Firmware update error, status %d.\n", rc); 14602 else 14603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14604 "3024 Firmware update success: size %d.\n", rc); 14605 } 14606 14607 /** 14608 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14609 * @phba: pointer to lpfc hba data structure. 14610 * @fw_upgrade: which firmware to update. 14611 * 14612 * This routine is called to perform Linux generic firmware upgrade on device 14613 * that supports such feature. 14614 **/ 14615 int 14616 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14617 { 14618 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 14619 int ret; 14620 const struct firmware *fw; 14621 14622 /* Only supported on SLI4 interface type 2 for now */ 14623 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14624 LPFC_SLI_INTF_IF_TYPE_2) 14625 return -EPERM; 14626 14627 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 14628 14629 if (fw_upgrade == INT_FW_UPGRADE) { 14630 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14631 file_name, &phba->pcidev->dev, 14632 GFP_KERNEL, (void *)phba, 14633 lpfc_write_firmware); 14634 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14635 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14636 if (!ret) 14637 lpfc_write_firmware(fw, (void *)phba); 14638 } else { 14639 ret = -EINVAL; 14640 } 14641 14642 return ret; 14643 } 14644 14645 /** 14646 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14647 * @pdev: pointer to PCI device 14648 * @pid: pointer to PCI device identifier 14649 * 14650 * This routine is called from the kernel's PCI subsystem to device with 14651 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14652 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14653 * information of the device and driver to see if the driver state that it 14654 * can support this kind of device. If the match is successful, the driver 14655 * core invokes this routine. If this routine determines it can claim the HBA, 14656 * it does all the initialization that it needs to do to handle the HBA 14657 * properly. 14658 * 14659 * Return code 14660 * 0 - driver can claim the device 14661 * negative value - driver can not claim the device 14662 **/ 14663 static int 14664 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14665 { 14666 struct lpfc_hba *phba; 14667 struct lpfc_vport *vport = NULL; 14668 struct Scsi_Host *shost = NULL; 14669 int error; 14670 uint32_t cfg_mode, intr_mode; 14671 14672 /* Allocate memory for HBA structure */ 14673 phba = lpfc_hba_alloc(pdev); 14674 if (!phba) 14675 return -ENOMEM; 14676 14677 INIT_LIST_HEAD(&phba->poll_list); 14678 14679 /* Perform generic PCI device enabling operation */ 14680 error = lpfc_enable_pci_dev(phba); 14681 if (error) 14682 goto out_free_phba; 14683 14684 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14685 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14686 if (error) 14687 goto out_disable_pci_dev; 14688 14689 /* Set up SLI-4 specific device PCI memory space */ 14690 error = lpfc_sli4_pci_mem_setup(phba); 14691 if (error) { 14692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14693 "1410 Failed to set up pci memory space.\n"); 14694 goto out_disable_pci_dev; 14695 } 14696 14697 /* Set up SLI-4 Specific device driver resources */ 14698 error = lpfc_sli4_driver_resource_setup(phba); 14699 if (error) { 14700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14701 "1412 Failed to set up driver resource.\n"); 14702 goto out_unset_pci_mem_s4; 14703 } 14704 14705 INIT_LIST_HEAD(&phba->active_rrq_list); 14706 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14707 14708 /* Set up common device driver resources */ 14709 error = lpfc_setup_driver_resource_phase2(phba); 14710 if (error) { 14711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14712 "1414 Failed to set up driver resource.\n"); 14713 goto out_unset_driver_resource_s4; 14714 } 14715 14716 /* Get the default values for Model Name and Description */ 14717 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14718 14719 /* Now, trying to enable interrupt and bring up the device */ 14720 cfg_mode = phba->cfg_use_msi; 14721 14722 /* Put device to a known state before enabling interrupt */ 14723 phba->pport = NULL; 14724 lpfc_stop_port(phba); 14725 14726 /* Init cpu_map array */ 14727 lpfc_cpu_map_array_init(phba); 14728 14729 /* Init hba_eq_hdl array */ 14730 lpfc_hba_eq_hdl_array_init(phba); 14731 14732 /* Configure and enable interrupt */ 14733 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14734 if (intr_mode == LPFC_INTR_ERROR) { 14735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14736 "0426 Failed to enable interrupt.\n"); 14737 error = -ENODEV; 14738 goto out_unset_driver_resource; 14739 } 14740 /* Default to single EQ for non-MSI-X */ 14741 if (phba->intr_type != MSIX) { 14742 phba->cfg_irq_chann = 1; 14743 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14744 if (phba->nvmet_support) 14745 phba->cfg_nvmet_mrq = 1; 14746 } 14747 } 14748 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14749 14750 /* Create SCSI host to the physical port */ 14751 error = lpfc_create_shost(phba); 14752 if (error) { 14753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14754 "1415 Failed to create scsi host.\n"); 14755 goto out_disable_intr; 14756 } 14757 vport = phba->pport; 14758 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14759 14760 /* Configure sysfs attributes */ 14761 error = lpfc_alloc_sysfs_attr(vport); 14762 if (error) { 14763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14764 "1416 Failed to allocate sysfs attr\n"); 14765 goto out_destroy_shost; 14766 } 14767 14768 /* Set up SLI-4 HBA */ 14769 if (lpfc_sli4_hba_setup(phba)) { 14770 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14771 "1421 Failed to set up hba\n"); 14772 error = -ENODEV; 14773 goto out_free_sysfs_attr; 14774 } 14775 14776 /* Log the current active interrupt mode */ 14777 phba->intr_mode = intr_mode; 14778 lpfc_log_intr_mode(phba, intr_mode); 14779 14780 /* Perform post initialization setup */ 14781 lpfc_post_init_setup(phba); 14782 14783 /* NVME support in FW earlier in the driver load corrects the 14784 * FC4 type making a check for nvme_support unnecessary. 14785 */ 14786 if (phba->nvmet_support == 0) { 14787 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14788 /* Create NVME binding with nvme_fc_transport. This 14789 * ensures the vport is initialized. If the localport 14790 * create fails, it should not unload the driver to 14791 * support field issues. 14792 */ 14793 error = lpfc_nvme_create_localport(vport); 14794 if (error) { 14795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14796 "6004 NVME registration " 14797 "failed, error x%x\n", 14798 error); 14799 } 14800 } 14801 } 14802 14803 /* check for firmware upgrade or downgrade */ 14804 if (phba->cfg_request_firmware_upgrade) 14805 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14806 14807 /* Check if there are static vports to be created. */ 14808 lpfc_create_static_vport(phba); 14809 14810 /* Enable RAS FW log support */ 14811 lpfc_sli4_ras_setup(phba); 14812 14813 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14814 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14815 14816 return 0; 14817 14818 out_free_sysfs_attr: 14819 lpfc_free_sysfs_attr(vport); 14820 out_destroy_shost: 14821 lpfc_destroy_shost(phba); 14822 out_disable_intr: 14823 lpfc_sli4_disable_intr(phba); 14824 out_unset_driver_resource: 14825 lpfc_unset_driver_resource_phase2(phba); 14826 out_unset_driver_resource_s4: 14827 lpfc_sli4_driver_resource_unset(phba); 14828 out_unset_pci_mem_s4: 14829 lpfc_sli4_pci_mem_unset(phba); 14830 out_disable_pci_dev: 14831 lpfc_disable_pci_dev(phba); 14832 if (shost) 14833 scsi_host_put(shost); 14834 out_free_phba: 14835 lpfc_hba_free(phba); 14836 return error; 14837 } 14838 14839 /** 14840 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14841 * @pdev: pointer to PCI device 14842 * 14843 * This routine is called from the kernel's PCI subsystem to device with 14844 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14845 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14846 * device to be removed from the PCI subsystem properly. 14847 **/ 14848 static void 14849 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14850 { 14851 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14852 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14853 struct lpfc_vport **vports; 14854 struct lpfc_hba *phba = vport->phba; 14855 int i; 14856 14857 /* Mark the device unloading flag */ 14858 spin_lock_irq(&phba->hbalock); 14859 vport->load_flag |= FC_UNLOADING; 14860 spin_unlock_irq(&phba->hbalock); 14861 if (phba->cgn_i) 14862 lpfc_unreg_congestion_buf(phba); 14863 14864 lpfc_free_sysfs_attr(vport); 14865 14866 /* Release all the vports against this physical port */ 14867 vports = lpfc_create_vport_work_array(phba); 14868 if (vports != NULL) 14869 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14870 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14871 continue; 14872 fc_vport_terminate(vports[i]->fc_vport); 14873 } 14874 lpfc_destroy_vport_work_array(phba, vports); 14875 14876 /* Remove FC host with the physical port */ 14877 fc_remove_host(shost); 14878 scsi_remove_host(shost); 14879 14880 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 14881 * localports are destroyed after to cleanup all transport memory. 14882 */ 14883 lpfc_cleanup(vport); 14884 lpfc_nvmet_destroy_targetport(phba); 14885 lpfc_nvme_destroy_localport(vport); 14886 14887 /* De-allocate multi-XRI pools */ 14888 if (phba->cfg_xri_rebalancing) 14889 lpfc_destroy_multixri_pools(phba); 14890 14891 /* 14892 * Bring down the SLI Layer. This step disables all interrupts, 14893 * clears the rings, discards all mailbox commands, and resets 14894 * the HBA FCoE function. 14895 */ 14896 lpfc_debugfs_terminate(vport); 14897 14898 lpfc_stop_hba_timers(phba); 14899 spin_lock_irq(&phba->port_list_lock); 14900 list_del_init(&vport->listentry); 14901 spin_unlock_irq(&phba->port_list_lock); 14902 14903 /* Perform scsi free before driver resource_unset since scsi 14904 * buffers are released to their corresponding pools here. 14905 */ 14906 lpfc_io_free(phba); 14907 lpfc_free_iocb_list(phba); 14908 lpfc_sli4_hba_unset(phba); 14909 14910 lpfc_unset_driver_resource_phase2(phba); 14911 lpfc_sli4_driver_resource_unset(phba); 14912 14913 /* Unmap adapter Control and Doorbell registers */ 14914 lpfc_sli4_pci_mem_unset(phba); 14915 14916 /* Release PCI resources and disable device's PCI function */ 14917 scsi_host_put(shost); 14918 lpfc_disable_pci_dev(phba); 14919 14920 /* Finally, free the driver's device data structure */ 14921 lpfc_hba_free(phba); 14922 14923 return; 14924 } 14925 14926 /** 14927 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 14928 * @dev_d: pointer to device 14929 * 14930 * This routine is called from the kernel's PCI subsystem to support system 14931 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 14932 * this method, it quiesces the device by stopping the driver's worker 14933 * thread for the device, turning off device's interrupt and DMA, and bring 14934 * the device offline. Note that as the driver implements the minimum PM 14935 * requirements to a power-aware driver's PM support for suspend/resume -- all 14936 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 14937 * method call will be treated as SUSPEND and the driver will fully 14938 * reinitialize its device during resume() method call, the driver will set 14939 * device to PCI_D3hot state in PCI config space instead of setting it 14940 * according to the @msg provided by the PM. 14941 * 14942 * Return code 14943 * 0 - driver suspended the device 14944 * Error otherwise 14945 **/ 14946 static int __maybe_unused 14947 lpfc_pci_suspend_one_s4(struct device *dev_d) 14948 { 14949 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14950 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14951 14952 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14953 "2843 PCI device Power Management suspend.\n"); 14954 14955 /* Bring down the device */ 14956 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14957 lpfc_offline(phba); 14958 kthread_stop(phba->worker_thread); 14959 14960 /* Disable interrupt from device */ 14961 lpfc_sli4_disable_intr(phba); 14962 lpfc_sli4_queue_destroy(phba); 14963 14964 return 0; 14965 } 14966 14967 /** 14968 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 14969 * @dev_d: pointer to device 14970 * 14971 * This routine is called from the kernel's PCI subsystem to support system 14972 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 14973 * this method, it restores the device's PCI config space state and fully 14974 * reinitializes the device and brings it online. Note that as the driver 14975 * implements the minimum PM requirements to a power-aware driver's PM for 14976 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14977 * to the suspend() method call will be treated as SUSPEND and the driver 14978 * will fully reinitialize its device during resume() method call, the device 14979 * will be set to PCI_D0 directly in PCI config space before restoring the 14980 * state. 14981 * 14982 * Return code 14983 * 0 - driver suspended the device 14984 * Error otherwise 14985 **/ 14986 static int __maybe_unused 14987 lpfc_pci_resume_one_s4(struct device *dev_d) 14988 { 14989 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14990 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14991 uint32_t intr_mode; 14992 int error; 14993 14994 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14995 "0292 PCI device Power Management resume.\n"); 14996 14997 /* Startup the kernel thread for this host adapter. */ 14998 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14999 "lpfc_worker_%d", phba->brd_no); 15000 if (IS_ERR(phba->worker_thread)) { 15001 error = PTR_ERR(phba->worker_thread); 15002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15003 "0293 PM resume failed to start worker " 15004 "thread: error=x%x.\n", error); 15005 return error; 15006 } 15007 15008 /* Configure and enable interrupt */ 15009 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15010 if (intr_mode == LPFC_INTR_ERROR) { 15011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15012 "0294 PM resume Failed to enable interrupt\n"); 15013 return -EIO; 15014 } else 15015 phba->intr_mode = intr_mode; 15016 15017 /* Restart HBA and bring it online */ 15018 lpfc_sli_brdrestart(phba); 15019 lpfc_online(phba); 15020 15021 /* Log the current active interrupt mode */ 15022 lpfc_log_intr_mode(phba, phba->intr_mode); 15023 15024 return 0; 15025 } 15026 15027 /** 15028 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 15029 * @phba: pointer to lpfc hba data structure. 15030 * 15031 * This routine is called to prepare the SLI4 device for PCI slot recover. It 15032 * aborts all the outstanding SCSI I/Os to the pci device. 15033 **/ 15034 static void 15035 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 15036 { 15037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15038 "2828 PCI channel I/O abort preparing for recovery\n"); 15039 /* 15040 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 15041 * and let the SCSI mid-layer to retry them to recover. 15042 */ 15043 lpfc_sli_abort_fcp_rings(phba); 15044 } 15045 15046 /** 15047 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 15048 * @phba: pointer to lpfc hba data structure. 15049 * 15050 * This routine is called to prepare the SLI4 device for PCI slot reset. It 15051 * disables the device interrupt and pci device, and aborts the internal FCP 15052 * pending I/Os. 15053 **/ 15054 static void 15055 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15056 { 15057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15058 "2826 PCI channel disable preparing for reset\n"); 15059 15060 /* Block any management I/Os to the device */ 15061 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15062 15063 /* Block all SCSI devices' I/Os on the host */ 15064 lpfc_scsi_dev_block(phba); 15065 15066 /* Flush all driver's outstanding I/Os as we are to reset */ 15067 lpfc_sli_flush_io_rings(phba); 15068 15069 /* stop all timers */ 15070 lpfc_stop_hba_timers(phba); 15071 15072 /* Disable interrupt and pci device */ 15073 lpfc_sli4_disable_intr(phba); 15074 lpfc_sli4_queue_destroy(phba); 15075 pci_disable_device(phba->pcidev); 15076 } 15077 15078 /** 15079 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15080 * @phba: pointer to lpfc hba data structure. 15081 * 15082 * This routine is called to prepare the SLI4 device for PCI slot permanently 15083 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15084 * pending I/Os. 15085 **/ 15086 static void 15087 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15088 { 15089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15090 "2827 PCI channel permanent disable for failure\n"); 15091 15092 /* Block all SCSI devices' I/Os on the host */ 15093 lpfc_scsi_dev_block(phba); 15094 15095 /* stop all timers */ 15096 lpfc_stop_hba_timers(phba); 15097 15098 /* Clean up all driver's outstanding I/Os */ 15099 lpfc_sli_flush_io_rings(phba); 15100 } 15101 15102 /** 15103 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15104 * @pdev: pointer to PCI device. 15105 * @state: the current PCI connection state. 15106 * 15107 * This routine is called from the PCI subsystem for error handling to device 15108 * with SLI-4 interface spec. This function is called by the PCI subsystem 15109 * after a PCI bus error affecting this device has been detected. When this 15110 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15111 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15112 * for the PCI subsystem to perform proper recovery as desired. 15113 * 15114 * Return codes 15115 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15116 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15117 **/ 15118 static pci_ers_result_t 15119 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15120 { 15121 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15122 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15123 15124 switch (state) { 15125 case pci_channel_io_normal: 15126 /* Non-fatal error, prepare for recovery */ 15127 lpfc_sli4_prep_dev_for_recover(phba); 15128 return PCI_ERS_RESULT_CAN_RECOVER; 15129 case pci_channel_io_frozen: 15130 phba->hba_flag |= HBA_PCI_ERR; 15131 /* Fatal error, prepare for slot reset */ 15132 lpfc_sli4_prep_dev_for_reset(phba); 15133 return PCI_ERS_RESULT_NEED_RESET; 15134 case pci_channel_io_perm_failure: 15135 phba->hba_flag |= HBA_PCI_ERR; 15136 /* Permanent failure, prepare for device down */ 15137 lpfc_sli4_prep_dev_for_perm_failure(phba); 15138 return PCI_ERS_RESULT_DISCONNECT; 15139 default: 15140 phba->hba_flag |= HBA_PCI_ERR; 15141 /* Unknown state, prepare and request slot reset */ 15142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15143 "2825 Unknown PCI error state: x%x\n", state); 15144 lpfc_sli4_prep_dev_for_reset(phba); 15145 return PCI_ERS_RESULT_NEED_RESET; 15146 } 15147 } 15148 15149 /** 15150 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15151 * @pdev: pointer to PCI device. 15152 * 15153 * This routine is called from the PCI subsystem for error handling to device 15154 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15155 * restart the PCI card from scratch, as if from a cold-boot. During the 15156 * PCI subsystem error recovery, after the driver returns 15157 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15158 * recovery and then call this routine before calling the .resume method to 15159 * recover the device. This function will initialize the HBA device, enable 15160 * the interrupt, but it will just put the HBA to offline state without 15161 * passing any I/O traffic. 15162 * 15163 * Return codes 15164 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15165 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15166 */ 15167 static pci_ers_result_t 15168 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15169 { 15170 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15172 struct lpfc_sli *psli = &phba->sli; 15173 uint32_t intr_mode; 15174 15175 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15176 if (pci_enable_device_mem(pdev)) { 15177 printk(KERN_ERR "lpfc: Cannot re-enable " 15178 "PCI device after reset.\n"); 15179 return PCI_ERS_RESULT_DISCONNECT; 15180 } 15181 15182 pci_restore_state(pdev); 15183 15184 phba->hba_flag &= ~HBA_PCI_ERR; 15185 /* 15186 * As the new kernel behavior of pci_restore_state() API call clears 15187 * device saved_state flag, need to save the restored state again. 15188 */ 15189 pci_save_state(pdev); 15190 15191 if (pdev->is_busmaster) 15192 pci_set_master(pdev); 15193 15194 spin_lock_irq(&phba->hbalock); 15195 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15196 spin_unlock_irq(&phba->hbalock); 15197 15198 /* Configure and enable interrupt */ 15199 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15200 if (intr_mode == LPFC_INTR_ERROR) { 15201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15202 "2824 Cannot re-enable interrupt after " 15203 "slot reset.\n"); 15204 return PCI_ERS_RESULT_DISCONNECT; 15205 } else 15206 phba->intr_mode = intr_mode; 15207 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15208 15209 /* Log the current active interrupt mode */ 15210 lpfc_log_intr_mode(phba, phba->intr_mode); 15211 15212 return PCI_ERS_RESULT_RECOVERED; 15213 } 15214 15215 /** 15216 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15217 * @pdev: pointer to PCI device 15218 * 15219 * This routine is called from the PCI subsystem for error handling to device 15220 * with SLI-4 interface spec. It is called when kernel error recovery tells 15221 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15222 * error recovery. After this call, traffic can start to flow from this device 15223 * again. 15224 **/ 15225 static void 15226 lpfc_io_resume_s4(struct pci_dev *pdev) 15227 { 15228 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15229 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15230 15231 /* 15232 * In case of slot reset, as function reset is performed through 15233 * mailbox command which needs DMA to be enabled, this operation 15234 * has to be moved to the io resume phase. Taking device offline 15235 * will perform the necessary cleanup. 15236 */ 15237 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15238 /* Perform device reset */ 15239 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15240 lpfc_offline(phba); 15241 lpfc_sli_brdrestart(phba); 15242 /* Bring the device back online */ 15243 lpfc_online(phba); 15244 } 15245 } 15246 15247 /** 15248 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15249 * @pdev: pointer to PCI device 15250 * @pid: pointer to PCI device identifier 15251 * 15252 * This routine is to be registered to the kernel's PCI subsystem. When an 15253 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15254 * at PCI device-specific information of the device and driver to see if the 15255 * driver state that it can support this kind of device. If the match is 15256 * successful, the driver core invokes this routine. This routine dispatches 15257 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15258 * do all the initialization that it needs to do to handle the HBA device 15259 * properly. 15260 * 15261 * Return code 15262 * 0 - driver can claim the device 15263 * negative value - driver can not claim the device 15264 **/ 15265 static int 15266 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15267 { 15268 int rc; 15269 struct lpfc_sli_intf intf; 15270 15271 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15272 return -ENODEV; 15273 15274 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15275 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15276 rc = lpfc_pci_probe_one_s4(pdev, pid); 15277 else 15278 rc = lpfc_pci_probe_one_s3(pdev, pid); 15279 15280 return rc; 15281 } 15282 15283 /** 15284 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15285 * @pdev: pointer to PCI device 15286 * 15287 * This routine is to be registered to the kernel's PCI subsystem. When an 15288 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15289 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15290 * remove routine, which will perform all the necessary cleanup for the 15291 * device to be removed from the PCI subsystem properly. 15292 **/ 15293 static void 15294 lpfc_pci_remove_one(struct pci_dev *pdev) 15295 { 15296 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15297 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15298 15299 switch (phba->pci_dev_grp) { 15300 case LPFC_PCI_DEV_LP: 15301 lpfc_pci_remove_one_s3(pdev); 15302 break; 15303 case LPFC_PCI_DEV_OC: 15304 lpfc_pci_remove_one_s4(pdev); 15305 break; 15306 default: 15307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15308 "1424 Invalid PCI device group: 0x%x\n", 15309 phba->pci_dev_grp); 15310 break; 15311 } 15312 return; 15313 } 15314 15315 /** 15316 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15317 * @dev: pointer to device 15318 * 15319 * This routine is to be registered to the kernel's PCI subsystem to support 15320 * system Power Management (PM). When PM invokes this method, it dispatches 15321 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15322 * suspend the device. 15323 * 15324 * Return code 15325 * 0 - driver suspended the device 15326 * Error otherwise 15327 **/ 15328 static int __maybe_unused 15329 lpfc_pci_suspend_one(struct device *dev) 15330 { 15331 struct Scsi_Host *shost = dev_get_drvdata(dev); 15332 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15333 int rc = -ENODEV; 15334 15335 switch (phba->pci_dev_grp) { 15336 case LPFC_PCI_DEV_LP: 15337 rc = lpfc_pci_suspend_one_s3(dev); 15338 break; 15339 case LPFC_PCI_DEV_OC: 15340 rc = lpfc_pci_suspend_one_s4(dev); 15341 break; 15342 default: 15343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15344 "1425 Invalid PCI device group: 0x%x\n", 15345 phba->pci_dev_grp); 15346 break; 15347 } 15348 return rc; 15349 } 15350 15351 /** 15352 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15353 * @dev: pointer to device 15354 * 15355 * This routine is to be registered to the kernel's PCI subsystem to support 15356 * system Power Management (PM). When PM invokes this method, it dispatches 15357 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15358 * resume the device. 15359 * 15360 * Return code 15361 * 0 - driver suspended the device 15362 * Error otherwise 15363 **/ 15364 static int __maybe_unused 15365 lpfc_pci_resume_one(struct device *dev) 15366 { 15367 struct Scsi_Host *shost = dev_get_drvdata(dev); 15368 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15369 int rc = -ENODEV; 15370 15371 switch (phba->pci_dev_grp) { 15372 case LPFC_PCI_DEV_LP: 15373 rc = lpfc_pci_resume_one_s3(dev); 15374 break; 15375 case LPFC_PCI_DEV_OC: 15376 rc = lpfc_pci_resume_one_s4(dev); 15377 break; 15378 default: 15379 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15380 "1426 Invalid PCI device group: 0x%x\n", 15381 phba->pci_dev_grp); 15382 break; 15383 } 15384 return rc; 15385 } 15386 15387 /** 15388 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15389 * @pdev: pointer to PCI device. 15390 * @state: the current PCI connection state. 15391 * 15392 * This routine is registered to the PCI subsystem for error handling. This 15393 * function is called by the PCI subsystem after a PCI bus error affecting 15394 * this device has been detected. When this routine is invoked, it dispatches 15395 * the action to the proper SLI-3 or SLI-4 device error detected handling 15396 * routine, which will perform the proper error detected operation. 15397 * 15398 * Return codes 15399 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15400 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15401 **/ 15402 static pci_ers_result_t 15403 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15404 { 15405 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15406 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15407 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15408 15409 if (phba->link_state == LPFC_HBA_ERROR && 15410 phba->hba_flag & HBA_IOQ_FLUSH) 15411 return PCI_ERS_RESULT_NEED_RESET; 15412 15413 switch (phba->pci_dev_grp) { 15414 case LPFC_PCI_DEV_LP: 15415 rc = lpfc_io_error_detected_s3(pdev, state); 15416 break; 15417 case LPFC_PCI_DEV_OC: 15418 rc = lpfc_io_error_detected_s4(pdev, state); 15419 break; 15420 default: 15421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15422 "1427 Invalid PCI device group: 0x%x\n", 15423 phba->pci_dev_grp); 15424 break; 15425 } 15426 return rc; 15427 } 15428 15429 /** 15430 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15431 * @pdev: pointer to PCI device. 15432 * 15433 * This routine is registered to the PCI subsystem for error handling. This 15434 * function is called after PCI bus has been reset to restart the PCI card 15435 * from scratch, as if from a cold-boot. When this routine is invoked, it 15436 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15437 * routine, which will perform the proper device reset. 15438 * 15439 * Return codes 15440 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15441 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15442 **/ 15443 static pci_ers_result_t 15444 lpfc_io_slot_reset(struct pci_dev *pdev) 15445 { 15446 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15447 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15448 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15449 15450 switch (phba->pci_dev_grp) { 15451 case LPFC_PCI_DEV_LP: 15452 rc = lpfc_io_slot_reset_s3(pdev); 15453 break; 15454 case LPFC_PCI_DEV_OC: 15455 rc = lpfc_io_slot_reset_s4(pdev); 15456 break; 15457 default: 15458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15459 "1428 Invalid PCI device group: 0x%x\n", 15460 phba->pci_dev_grp); 15461 break; 15462 } 15463 return rc; 15464 } 15465 15466 /** 15467 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15468 * @pdev: pointer to PCI device 15469 * 15470 * This routine is registered to the PCI subsystem for error handling. It 15471 * is called when kernel error recovery tells the lpfc driver that it is 15472 * OK to resume normal PCI operation after PCI bus error recovery. When 15473 * this routine is invoked, it dispatches the action to the proper SLI-3 15474 * or SLI-4 device io_resume routine, which will resume the device operation. 15475 **/ 15476 static void 15477 lpfc_io_resume(struct pci_dev *pdev) 15478 { 15479 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15480 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15481 15482 switch (phba->pci_dev_grp) { 15483 case LPFC_PCI_DEV_LP: 15484 lpfc_io_resume_s3(pdev); 15485 break; 15486 case LPFC_PCI_DEV_OC: 15487 lpfc_io_resume_s4(pdev); 15488 break; 15489 default: 15490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15491 "1429 Invalid PCI device group: 0x%x\n", 15492 phba->pci_dev_grp); 15493 break; 15494 } 15495 return; 15496 } 15497 15498 /** 15499 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15500 * @phba: pointer to lpfc hba data structure. 15501 * 15502 * This routine checks to see if OAS is supported for this adapter. If 15503 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15504 * the enable oas flag is cleared and the pool created for OAS device data 15505 * is destroyed. 15506 * 15507 **/ 15508 static void 15509 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15510 { 15511 15512 if (!phba->cfg_EnableXLane) 15513 return; 15514 15515 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15516 phba->cfg_fof = 1; 15517 } else { 15518 phba->cfg_fof = 0; 15519 mempool_destroy(phba->device_data_mem_pool); 15520 phba->device_data_mem_pool = NULL; 15521 } 15522 15523 return; 15524 } 15525 15526 /** 15527 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15528 * @phba: pointer to lpfc hba data structure. 15529 * 15530 * This routine checks to see if RAS is supported by the adapter. Check the 15531 * function through which RAS support enablement is to be done. 15532 **/ 15533 void 15534 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15535 { 15536 /* if ASIC_GEN_NUM >= 0xC) */ 15537 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15538 LPFC_SLI_INTF_IF_TYPE_6) || 15539 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15540 LPFC_SLI_INTF_FAMILY_G6)) { 15541 phba->ras_fwlog.ras_hwsupport = true; 15542 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15543 phba->cfg_ras_fwlog_buffsize) 15544 phba->ras_fwlog.ras_enabled = true; 15545 else 15546 phba->ras_fwlog.ras_enabled = false; 15547 } else { 15548 phba->ras_fwlog.ras_hwsupport = false; 15549 } 15550 } 15551 15552 15553 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15554 15555 static const struct pci_error_handlers lpfc_err_handler = { 15556 .error_detected = lpfc_io_error_detected, 15557 .slot_reset = lpfc_io_slot_reset, 15558 .resume = lpfc_io_resume, 15559 }; 15560 15561 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15562 lpfc_pci_suspend_one, 15563 lpfc_pci_resume_one); 15564 15565 static struct pci_driver lpfc_driver = { 15566 .name = LPFC_DRIVER_NAME, 15567 .id_table = lpfc_id_table, 15568 .probe = lpfc_pci_probe_one, 15569 .remove = lpfc_pci_remove_one, 15570 .shutdown = lpfc_pci_remove_one, 15571 .driver.pm = &lpfc_pci_pm_ops_one, 15572 .err_handler = &lpfc_err_handler, 15573 }; 15574 15575 static const struct file_operations lpfc_mgmt_fop = { 15576 .owner = THIS_MODULE, 15577 }; 15578 15579 static struct miscdevice lpfc_mgmt_dev = { 15580 .minor = MISC_DYNAMIC_MINOR, 15581 .name = "lpfcmgmt", 15582 .fops = &lpfc_mgmt_fop, 15583 }; 15584 15585 /** 15586 * lpfc_init - lpfc module initialization routine 15587 * 15588 * This routine is to be invoked when the lpfc module is loaded into the 15589 * kernel. The special kernel macro module_init() is used to indicate the 15590 * role of this routine to the kernel as lpfc module entry point. 15591 * 15592 * Return codes 15593 * 0 - successful 15594 * -ENOMEM - FC attach transport failed 15595 * all others - failed 15596 */ 15597 static int __init 15598 lpfc_init(void) 15599 { 15600 int error = 0; 15601 15602 pr_info(LPFC_MODULE_DESC "\n"); 15603 pr_info(LPFC_COPYRIGHT "\n"); 15604 15605 error = misc_register(&lpfc_mgmt_dev); 15606 if (error) 15607 printk(KERN_ERR "Could not register lpfcmgmt device, " 15608 "misc_register returned with status %d", error); 15609 15610 error = -ENOMEM; 15611 lpfc_transport_functions.vport_create = lpfc_vport_create; 15612 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15613 lpfc_transport_template = 15614 fc_attach_transport(&lpfc_transport_functions); 15615 if (lpfc_transport_template == NULL) 15616 goto unregister; 15617 lpfc_vport_transport_template = 15618 fc_attach_transport(&lpfc_vport_transport_functions); 15619 if (lpfc_vport_transport_template == NULL) { 15620 fc_release_transport(lpfc_transport_template); 15621 goto unregister; 15622 } 15623 lpfc_wqe_cmd_template(); 15624 lpfc_nvmet_cmd_template(); 15625 15626 /* Initialize in case vector mapping is needed */ 15627 lpfc_present_cpu = num_present_cpus(); 15628 15629 lpfc_pldv_detect = false; 15630 15631 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15632 "lpfc/sli4:online", 15633 lpfc_cpu_online, lpfc_cpu_offline); 15634 if (error < 0) 15635 goto cpuhp_failure; 15636 lpfc_cpuhp_state = error; 15637 15638 error = pci_register_driver(&lpfc_driver); 15639 if (error) 15640 goto unwind; 15641 15642 return error; 15643 15644 unwind: 15645 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15646 cpuhp_failure: 15647 fc_release_transport(lpfc_transport_template); 15648 fc_release_transport(lpfc_vport_transport_template); 15649 unregister: 15650 misc_deregister(&lpfc_mgmt_dev); 15651 15652 return error; 15653 } 15654 15655 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15656 { 15657 unsigned int start_idx; 15658 unsigned int dbg_cnt; 15659 unsigned int temp_idx; 15660 int i; 15661 int j = 0; 15662 unsigned long rem_nsec, iflags; 15663 bool log_verbose = false; 15664 struct lpfc_vport *port_iterator; 15665 15666 /* Don't dump messages if we explicitly set log_verbose for the 15667 * physical port or any vport. 15668 */ 15669 if (phba->cfg_log_verbose) 15670 return; 15671 15672 spin_lock_irqsave(&phba->port_list_lock, iflags); 15673 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 15674 if (port_iterator->load_flag & FC_UNLOADING) 15675 continue; 15676 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 15677 if (port_iterator->cfg_log_verbose) 15678 log_verbose = true; 15679 15680 scsi_host_put(lpfc_shost_from_vport(port_iterator)); 15681 15682 if (log_verbose) { 15683 spin_unlock_irqrestore(&phba->port_list_lock, 15684 iflags); 15685 return; 15686 } 15687 } 15688 } 15689 spin_unlock_irqrestore(&phba->port_list_lock, iflags); 15690 15691 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15692 return; 15693 15694 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15695 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15696 if (!dbg_cnt) 15697 goto out; 15698 temp_idx = start_idx; 15699 if (dbg_cnt >= DBG_LOG_SZ) { 15700 dbg_cnt = DBG_LOG_SZ; 15701 temp_idx -= 1; 15702 } else { 15703 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15704 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15705 } else { 15706 if (start_idx < dbg_cnt) 15707 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15708 else 15709 start_idx -= dbg_cnt; 15710 } 15711 } 15712 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15713 start_idx, temp_idx, dbg_cnt); 15714 15715 for (i = 0; i < dbg_cnt; i++) { 15716 if ((start_idx + i) < DBG_LOG_SZ) 15717 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15718 else 15719 temp_idx = j++; 15720 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15721 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15722 temp_idx, 15723 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15724 rem_nsec / 1000, 15725 phba->dbg_log[temp_idx].log); 15726 } 15727 out: 15728 atomic_set(&phba->dbg_log_cnt, 0); 15729 atomic_set(&phba->dbg_log_dmping, 0); 15730 } 15731 15732 __printf(2, 3) 15733 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15734 { 15735 unsigned int idx; 15736 va_list args; 15737 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15738 struct va_format vaf; 15739 15740 15741 va_start(args, fmt); 15742 if (unlikely(dbg_dmping)) { 15743 vaf.fmt = fmt; 15744 vaf.va = &args; 15745 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15746 va_end(args); 15747 return; 15748 } 15749 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15750 DBG_LOG_SZ; 15751 15752 atomic_inc(&phba->dbg_log_cnt); 15753 15754 vscnprintf(phba->dbg_log[idx].log, 15755 sizeof(phba->dbg_log[idx].log), fmt, args); 15756 va_end(args); 15757 15758 phba->dbg_log[idx].t_ns = local_clock(); 15759 } 15760 15761 /** 15762 * lpfc_exit - lpfc module removal routine 15763 * 15764 * This routine is invoked when the lpfc module is removed from the kernel. 15765 * The special kernel macro module_exit() is used to indicate the role of 15766 * this routine to the kernel as lpfc module exit point. 15767 */ 15768 static void __exit 15769 lpfc_exit(void) 15770 { 15771 misc_deregister(&lpfc_mgmt_dev); 15772 pci_unregister_driver(&lpfc_driver); 15773 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15774 fc_release_transport(lpfc_transport_template); 15775 fc_release_transport(lpfc_vport_transport_template); 15776 idr_destroy(&lpfc_hba_index); 15777 } 15778 15779 module_init(lpfc_init); 15780 module_exit(lpfc_exit); 15781 MODULE_LICENSE("GPL"); 15782 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15783 MODULE_AUTHOR("Broadcom"); 15784 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15785