1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 static bool lpfc_pldv_detect; 72 73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 75 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 77 static int lpfc_post_rcv_buf(struct lpfc_hba *); 78 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 80 static int lpfc_setup_endian_order(struct lpfc_hba *); 81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 82 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 84 static void lpfc_init_sgl_list(struct lpfc_hba *); 85 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 86 static void lpfc_free_active_sgl(struct lpfc_hba *); 87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 92 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 260 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 261 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 262 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 263 lpfc_vpd_data + offset, 264 mb->un.varDmp.word_cnt); 265 offset += mb->un.varDmp.word_cnt; 266 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 267 268 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 269 270 kfree(lpfc_vpd_data); 271 out_free_mbox: 272 mempool_free(pmb, phba->mbox_mem_pool); 273 return 0; 274 } 275 276 /** 277 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for driver's configuring asynchronous event 282 * mailbox command to the device. If the mailbox command returns successfully, 283 * it will set internal async event support flag to 1; otherwise, it will 284 * set internal async event support flag to 0. 285 **/ 286 static void 287 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 288 { 289 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 290 phba->temp_sensor_support = 1; 291 else 292 phba->temp_sensor_support = 0; 293 mempool_free(pmboxq, phba->mbox_mem_pool); 294 return; 295 } 296 297 /** 298 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 299 * @phba: pointer to lpfc hba data structure. 300 * @pmboxq: pointer to the driver internal queue element for mailbox command. 301 * 302 * This is the completion handler for dump mailbox command for getting 303 * wake up parameters. When this command complete, the response contain 304 * Option rom version of the HBA. This function translate the version number 305 * into a human readable string and store it in OptionROMVersion. 306 **/ 307 static void 308 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 309 { 310 struct prog_id *prg; 311 uint32_t prog_id_word; 312 char dist = ' '; 313 /* character array used for decoding dist type. */ 314 char dist_char[] = "nabx"; 315 316 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 317 mempool_free(pmboxq, phba->mbox_mem_pool); 318 return; 319 } 320 321 prg = (struct prog_id *) &prog_id_word; 322 323 /* word 7 contain option rom version */ 324 prog_id_word = pmboxq->u.mb.un.varWords[7]; 325 326 /* Decode the Option rom version word to a readable string */ 327 if (prg->dist < 4) 328 dist = dist_char[prg->dist]; 329 330 if ((prg->dist == 3) && (prg->num == 0)) 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 332 prg->ver, prg->rev, prg->lev); 333 else 334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 335 prg->ver, prg->rev, prg->lev, 336 dist, prg->num); 337 mempool_free(pmboxq, phba->mbox_mem_pool); 338 return; 339 } 340 341 /** 342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 343 * cfg_soft_wwnn, cfg_soft_wwpn 344 * @vport: pointer to lpfc vport data structure. 345 * 346 * 347 * Return codes 348 * None. 349 **/ 350 void 351 lpfc_update_vport_wwn(struct lpfc_vport *vport) 352 { 353 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 354 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 355 356 /* If the soft name exists then update it using the service params */ 357 if (vport->phba->cfg_soft_wwnn) 358 u64_to_wwn(vport->phba->cfg_soft_wwnn, 359 vport->fc_sparam.nodeName.u.wwn); 360 if (vport->phba->cfg_soft_wwpn) 361 u64_to_wwn(vport->phba->cfg_soft_wwpn, 362 vport->fc_sparam.portName.u.wwn); 363 364 /* 365 * If the name is empty or there exists a soft name 366 * then copy the service params name, otherwise use the fc name 367 */ 368 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 369 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 370 sizeof(struct lpfc_name)); 371 else 372 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 373 sizeof(struct lpfc_name)); 374 375 /* 376 * If the port name has changed, then set the Param changes flag 377 * to unreg the login 378 */ 379 if (vport->fc_portname.u.wwn[0] != 0 && 380 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 381 sizeof(struct lpfc_name))) 382 vport->vport_flag |= FAWWPN_PARAM_CHG; 383 384 if (vport->fc_portname.u.wwn[0] == 0 || 385 vport->phba->cfg_soft_wwpn || 386 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 387 vport->vport_flag & FAWWPN_SET) { 388 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 389 sizeof(struct lpfc_name)); 390 vport->vport_flag &= ~FAWWPN_SET; 391 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 392 vport->vport_flag |= FAWWPN_SET; 393 } 394 else 395 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 396 sizeof(struct lpfc_name)); 397 } 398 399 /** 400 * lpfc_config_port_post - Perform lpfc initialization after config port 401 * @phba: pointer to lpfc hba data structure. 402 * 403 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 404 * command call. It performs all internal resource and state setups on the 405 * port: post IOCB buffers, enable appropriate host interrupt attentions, 406 * ELS ring timers, etc. 407 * 408 * Return codes 409 * 0 - success. 410 * Any other value - error. 411 **/ 412 int 413 lpfc_config_port_post(struct lpfc_hba *phba) 414 { 415 struct lpfc_vport *vport = phba->pport; 416 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 417 LPFC_MBOXQ_t *pmb; 418 MAILBOX_t *mb; 419 struct lpfc_dmabuf *mp; 420 struct lpfc_sli *psli = &phba->sli; 421 uint32_t status, timeout; 422 int i, j; 423 int rc; 424 425 spin_lock_irq(&phba->hbalock); 426 /* 427 * If the Config port completed correctly the HBA is not 428 * over heated any more. 429 */ 430 if (phba->over_temp_state == HBA_OVER_TEMP) 431 phba->over_temp_state = HBA_NORMAL_TEMP; 432 spin_unlock_irq(&phba->hbalock); 433 434 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 435 if (!pmb) { 436 phba->link_state = LPFC_HBA_ERROR; 437 return -ENOMEM; 438 } 439 mb = &pmb->u.mb; 440 441 /* Get login parameters for NID. */ 442 rc = lpfc_read_sparam(phba, pmb, 0); 443 if (rc) { 444 mempool_free(pmb, phba->mbox_mem_pool); 445 return -ENOMEM; 446 } 447 448 pmb->vport = vport; 449 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 450 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 451 "0448 Adapter failed init, mbxCmd x%x " 452 "READ_SPARM mbxStatus x%x\n", 453 mb->mbxCommand, mb->mbxStatus); 454 phba->link_state = LPFC_HBA_ERROR; 455 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 456 mempool_free(pmb, phba->mbox_mem_pool); 457 lpfc_mbuf_free(phba, mp->virt, mp->phys); 458 kfree(mp); 459 return -EIO; 460 } 461 462 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 463 464 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 465 lpfc_mbuf_free(phba, mp->virt, mp->phys); 466 kfree(mp); 467 pmb->ctx_buf = NULL; 468 lpfc_update_vport_wwn(vport); 469 470 /* Update the fc_host data structures with new wwn. */ 471 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 472 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 473 fc_host_max_npiv_vports(shost) = phba->max_vpi; 474 475 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 476 /* This should be consolidated into parse_vpd ? - mr */ 477 if (phba->SerialNumber[0] == 0) { 478 uint8_t *outptr; 479 480 outptr = &vport->fc_nodename.u.s.IEEE[0]; 481 for (i = 0; i < 12; i++) { 482 status = *outptr++; 483 j = ((status & 0xf0) >> 4); 484 if (j <= 9) 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x30 + (uint8_t) j); 487 else 488 phba->SerialNumber[i] = 489 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 490 i++; 491 j = (status & 0xf); 492 if (j <= 9) 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x30 + (uint8_t) j); 495 else 496 phba->SerialNumber[i] = 497 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 498 } 499 } 500 501 lpfc_read_config(phba, pmb); 502 pmb->vport = vport; 503 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 505 "0453 Adapter failed to init, mbxCmd x%x " 506 "READ_CONFIG, mbxStatus x%x\n", 507 mb->mbxCommand, mb->mbxStatus); 508 phba->link_state = LPFC_HBA_ERROR; 509 mempool_free( pmb, phba->mbox_mem_pool); 510 return -EIO; 511 } 512 513 /* Check if the port is disabled */ 514 lpfc_sli_read_link_ste(phba); 515 516 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 517 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 519 "3359 HBA queue depth changed from %d to %d\n", 520 phba->cfg_hba_queue_depth, 521 mb->un.varRdConfig.max_xri); 522 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 523 } 524 525 phba->lmt = mb->un.varRdConfig.lmt; 526 527 /* Get the default values for Model Name and Description */ 528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 529 530 phba->link_state = LPFC_LINK_DOWN; 531 532 /* Only process IOCBs on ELS ring till hba_state is READY */ 533 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 536 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 537 538 /* Post receive buffers for desired rings */ 539 if (phba->sli_rev != 3) 540 lpfc_post_rcv_buf(phba); 541 542 /* 543 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 544 */ 545 if (phba->intr_type == MSIX) { 546 rc = lpfc_config_msi(phba, pmb); 547 if (rc) { 548 mempool_free(pmb, phba->mbox_mem_pool); 549 return -EIO; 550 } 551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 552 if (rc != MBX_SUCCESS) { 553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 554 "0352 Config MSI mailbox command " 555 "failed, mbxCmd x%x, mbxStatus x%x\n", 556 pmb->u.mb.mbxCommand, 557 pmb->u.mb.mbxStatus); 558 mempool_free(pmb, phba->mbox_mem_pool); 559 return -EIO; 560 } 561 } 562 563 spin_lock_irq(&phba->hbalock); 564 /* Initialize ERATT handling flag */ 565 phba->hba_flag &= ~HBA_ERATT_HANDLED; 566 567 /* Enable appropriate host interrupts */ 568 if (lpfc_readl(phba->HCregaddr, &status)) { 569 spin_unlock_irq(&phba->hbalock); 570 return -EIO; 571 } 572 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 573 if (psli->num_rings > 0) 574 status |= HC_R0INT_ENA; 575 if (psli->num_rings > 1) 576 status |= HC_R1INT_ENA; 577 if (psli->num_rings > 2) 578 status |= HC_R2INT_ENA; 579 if (psli->num_rings > 3) 580 status |= HC_R3INT_ENA; 581 582 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 583 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 584 status &= ~(HC_R0INT_ENA); 585 586 writel(status, phba->HCregaddr); 587 readl(phba->HCregaddr); /* flush */ 588 spin_unlock_irq(&phba->hbalock); 589 590 /* Set up ring-0 (ELS) timer */ 591 timeout = phba->fc_ratov * 2; 592 mod_timer(&vport->els_tmofunc, 593 jiffies + msecs_to_jiffies(1000 * timeout)); 594 /* Set up heart beat (HB) timer */ 595 mod_timer(&phba->hb_tmofunc, 596 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 597 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 598 phba->last_completion_time = jiffies; 599 /* Set up error attention (ERATT) polling timer */ 600 mod_timer(&phba->eratt_poll, 601 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 602 603 if (phba->hba_flag & LINK_DISABLED) { 604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 605 "2598 Adapter Link is disabled.\n"); 606 lpfc_down_link(phba, pmb); 607 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 608 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 609 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 611 "2599 Adapter failed to issue DOWN_LINK" 612 " mbox command rc 0x%x\n", rc); 613 614 mempool_free(pmb, phba->mbox_mem_pool); 615 return -EIO; 616 } 617 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 618 mempool_free(pmb, phba->mbox_mem_pool); 619 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 620 if (rc) 621 return rc; 622 } 623 /* MBOX buffer will be freed in mbox compl */ 624 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 625 if (!pmb) { 626 phba->link_state = LPFC_HBA_ERROR; 627 return -ENOMEM; 628 } 629 630 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 631 pmb->mbox_cmpl = lpfc_config_async_cmpl; 632 pmb->vport = phba->pport; 633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 634 635 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 637 "0456 Adapter failed to issue " 638 "ASYNCEVT_ENABLE mbox status x%x\n", 639 rc); 640 mempool_free(pmb, phba->mbox_mem_pool); 641 } 642 643 /* Get Option rom version */ 644 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 645 if (!pmb) { 646 phba->link_state = LPFC_HBA_ERROR; 647 return -ENOMEM; 648 } 649 650 lpfc_dump_wakeup_param(phba, pmb); 651 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 652 pmb->vport = phba->pport; 653 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 654 655 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 656 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 657 "0435 Adapter failed " 658 "to get Option ROM version status x%x\n", rc); 659 mempool_free(pmb, phba->mbox_mem_pool); 660 } 661 662 return 0; 663 } 664 665 /** 666 * lpfc_hba_init_link - Initialize the FC link 667 * @phba: pointer to lpfc hba data structure. 668 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 669 * 670 * This routine will issue the INIT_LINK mailbox command call. 671 * It is available to other drivers through the lpfc_hba data 672 * structure for use as a delayed link up mechanism with the 673 * module parameter lpfc_suppress_link_up. 674 * 675 * Return code 676 * 0 - success 677 * Any other value - error 678 **/ 679 static int 680 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 681 { 682 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 683 } 684 685 /** 686 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 687 * @phba: pointer to lpfc hba data structure. 688 * @fc_topology: desired fc topology. 689 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 690 * 691 * This routine will issue the INIT_LINK mailbox command call. 692 * It is available to other drivers through the lpfc_hba data 693 * structure for use as a delayed link up mechanism with the 694 * module parameter lpfc_suppress_link_up. 695 * 696 * Return code 697 * 0 - success 698 * Any other value - error 699 **/ 700 int 701 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 702 uint32_t flag) 703 { 704 struct lpfc_vport *vport = phba->pport; 705 LPFC_MBOXQ_t *pmb; 706 MAILBOX_t *mb; 707 int rc; 708 709 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 710 if (!pmb) { 711 phba->link_state = LPFC_HBA_ERROR; 712 return -ENOMEM; 713 } 714 mb = &pmb->u.mb; 715 pmb->vport = vport; 716 717 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 719 !(phba->lmt & LMT_1Gb)) || 720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 721 !(phba->lmt & LMT_2Gb)) || 722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 723 !(phba->lmt & LMT_4Gb)) || 724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 725 !(phba->lmt & LMT_8Gb)) || 726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 727 !(phba->lmt & LMT_10Gb)) || 728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 729 !(phba->lmt & LMT_16Gb)) || 730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 731 !(phba->lmt & LMT_32Gb)) || 732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 733 !(phba->lmt & LMT_64Gb))) { 734 /* Reset link speed to auto */ 735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 736 "1302 Invalid speed for this board:%d " 737 "Reset link speed to auto.\n", 738 phba->cfg_link_speed); 739 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 740 } 741 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 743 if (phba->sli_rev < LPFC_SLI_REV4) 744 lpfc_set_loopback_flag(phba); 745 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 746 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 747 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 748 "0498 Adapter failed to init, mbxCmd x%x " 749 "INIT_LINK, mbxStatus x%x\n", 750 mb->mbxCommand, mb->mbxStatus); 751 if (phba->sli_rev <= LPFC_SLI_REV3) { 752 /* Clear all interrupt enable conditions */ 753 writel(0, phba->HCregaddr); 754 readl(phba->HCregaddr); /* flush */ 755 /* Clear all pending interrupts */ 756 writel(0xffffffff, phba->HAregaddr); 757 readl(phba->HAregaddr); /* flush */ 758 } 759 phba->link_state = LPFC_HBA_ERROR; 760 if (rc != MBX_BUSY || flag == MBX_POLL) 761 mempool_free(pmb, phba->mbox_mem_pool); 762 return -EIO; 763 } 764 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 765 if (flag == MBX_POLL) 766 mempool_free(pmb, phba->mbox_mem_pool); 767 768 return 0; 769 } 770 771 /** 772 * lpfc_hba_down_link - this routine downs the FC link 773 * @phba: pointer to lpfc hba data structure. 774 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 775 * 776 * This routine will issue the DOWN_LINK mailbox command call. 777 * It is available to other drivers through the lpfc_hba data 778 * structure for use to stop the link. 779 * 780 * Return code 781 * 0 - success 782 * Any other value - error 783 **/ 784 static int 785 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 786 { 787 LPFC_MBOXQ_t *pmb; 788 int rc; 789 790 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 791 if (!pmb) { 792 phba->link_state = LPFC_HBA_ERROR; 793 return -ENOMEM; 794 } 795 796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 797 "0491 Adapter Link is disabled.\n"); 798 lpfc_down_link(phba, pmb); 799 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 800 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 801 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 803 "2522 Adapter failed to issue DOWN_LINK" 804 " mbox command rc 0x%x\n", rc); 805 806 mempool_free(pmb, phba->mbox_mem_pool); 807 return -EIO; 808 } 809 if (flag == MBX_POLL) 810 mempool_free(pmb, phba->mbox_mem_pool); 811 812 return 0; 813 } 814 815 /** 816 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 817 * @phba: pointer to lpfc HBA data structure. 818 * 819 * This routine will do LPFC uninitialization before the HBA is reset when 820 * bringing down the SLI Layer. 821 * 822 * Return codes 823 * 0 - success. 824 * Any other value - error. 825 **/ 826 int 827 lpfc_hba_down_prep(struct lpfc_hba *phba) 828 { 829 struct lpfc_vport **vports; 830 int i; 831 832 if (phba->sli_rev <= LPFC_SLI_REV3) { 833 /* Disable interrupts */ 834 writel(0, phba->HCregaddr); 835 readl(phba->HCregaddr); /* flush */ 836 } 837 838 if (phba->pport->load_flag & FC_UNLOADING) 839 lpfc_cleanup_discovery_resources(phba->pport); 840 else { 841 vports = lpfc_create_vport_work_array(phba); 842 if (vports != NULL) 843 for (i = 0; i <= phba->max_vports && 844 vports[i] != NULL; i++) 845 lpfc_cleanup_discovery_resources(vports[i]); 846 lpfc_destroy_vport_work_array(phba, vports); 847 } 848 return 0; 849 } 850 851 /** 852 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 853 * rspiocb which got deferred 854 * 855 * @phba: pointer to lpfc HBA data structure. 856 * 857 * This routine will cleanup completed slow path events after HBA is reset 858 * when bringing down the SLI Layer. 859 * 860 * 861 * Return codes 862 * void. 863 **/ 864 static void 865 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 866 { 867 struct lpfc_iocbq *rspiocbq; 868 struct hbq_dmabuf *dmabuf; 869 struct lpfc_cq_event *cq_event; 870 871 spin_lock_irq(&phba->hbalock); 872 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 873 spin_unlock_irq(&phba->hbalock); 874 875 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 876 /* Get the response iocb from the head of work queue */ 877 spin_lock_irq(&phba->hbalock); 878 list_remove_head(&phba->sli4_hba.sp_queue_event, 879 cq_event, struct lpfc_cq_event, list); 880 spin_unlock_irq(&phba->hbalock); 881 882 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 883 case CQE_CODE_COMPL_WQE: 884 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 885 cq_event); 886 lpfc_sli_release_iocbq(phba, rspiocbq); 887 break; 888 case CQE_CODE_RECEIVE: 889 case CQE_CODE_RECEIVE_V1: 890 dmabuf = container_of(cq_event, struct hbq_dmabuf, 891 cq_event); 892 lpfc_in_buf_free(phba, &dmabuf->dbuf); 893 } 894 } 895 } 896 897 /** 898 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 899 * @phba: pointer to lpfc HBA data structure. 900 * 901 * This routine will cleanup posted ELS buffers after the HBA is reset 902 * when bringing down the SLI Layer. 903 * 904 * 905 * Return codes 906 * void. 907 **/ 908 static void 909 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 910 { 911 struct lpfc_sli *psli = &phba->sli; 912 struct lpfc_sli_ring *pring; 913 struct lpfc_dmabuf *mp, *next_mp; 914 LIST_HEAD(buflist); 915 int count; 916 917 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 918 lpfc_sli_hbqbuf_free_all(phba); 919 else { 920 /* Cleanup preposted buffers on the ELS ring */ 921 pring = &psli->sli3_ring[LPFC_ELS_RING]; 922 spin_lock_irq(&phba->hbalock); 923 list_splice_init(&pring->postbufq, &buflist); 924 spin_unlock_irq(&phba->hbalock); 925 926 count = 0; 927 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 928 list_del(&mp->list); 929 count++; 930 lpfc_mbuf_free(phba, mp->virt, mp->phys); 931 kfree(mp); 932 } 933 934 spin_lock_irq(&phba->hbalock); 935 pring->postbufq_cnt -= count; 936 spin_unlock_irq(&phba->hbalock); 937 } 938 } 939 940 /** 941 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 942 * @phba: pointer to lpfc HBA data structure. 943 * 944 * This routine will cleanup the txcmplq after the HBA is reset when bringing 945 * down the SLI Layer. 946 * 947 * Return codes 948 * void 949 **/ 950 static void 951 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 952 { 953 struct lpfc_sli *psli = &phba->sli; 954 struct lpfc_queue *qp = NULL; 955 struct lpfc_sli_ring *pring; 956 LIST_HEAD(completions); 957 int i; 958 struct lpfc_iocbq *piocb, *next_iocb; 959 960 if (phba->sli_rev != LPFC_SLI_REV4) { 961 for (i = 0; i < psli->num_rings; i++) { 962 pring = &psli->sli3_ring[i]; 963 spin_lock_irq(&phba->hbalock); 964 /* At this point in time the HBA is either reset or DOA 965 * Nothing should be on txcmplq as it will 966 * NEVER complete. 967 */ 968 list_splice_init(&pring->txcmplq, &completions); 969 pring->txcmplq_cnt = 0; 970 spin_unlock_irq(&phba->hbalock); 971 972 lpfc_sli_abort_iocb_ring(phba, pring); 973 } 974 /* Cancel all the IOCBs from the completions list */ 975 lpfc_sli_cancel_iocbs(phba, &completions, 976 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 977 return; 978 } 979 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 980 pring = qp->pring; 981 if (!pring) 982 continue; 983 spin_lock_irq(&pring->ring_lock); 984 list_for_each_entry_safe(piocb, next_iocb, 985 &pring->txcmplq, list) 986 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 987 list_splice_init(&pring->txcmplq, &completions); 988 pring->txcmplq_cnt = 0; 989 spin_unlock_irq(&pring->ring_lock); 990 lpfc_sli_abort_iocb_ring(phba, pring); 991 } 992 /* Cancel all the IOCBs from the completions list */ 993 lpfc_sli_cancel_iocbs(phba, &completions, 994 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 995 } 996 997 /** 998 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 999 * @phba: pointer to lpfc HBA data structure. 1000 * 1001 * This routine will do uninitialization after the HBA is reset when bring 1002 * down the SLI Layer. 1003 * 1004 * Return codes 1005 * 0 - success. 1006 * Any other value - error. 1007 **/ 1008 static int 1009 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1010 { 1011 lpfc_hba_free_post_buf(phba); 1012 lpfc_hba_clean_txcmplq(phba); 1013 return 0; 1014 } 1015 1016 /** 1017 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1018 * @phba: pointer to lpfc HBA data structure. 1019 * 1020 * This routine will do uninitialization after the HBA is reset when bring 1021 * down the SLI Layer. 1022 * 1023 * Return codes 1024 * 0 - success. 1025 * Any other value - error. 1026 **/ 1027 static int 1028 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1029 { 1030 struct lpfc_io_buf *psb, *psb_next; 1031 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1032 struct lpfc_sli4_hdw_queue *qp; 1033 LIST_HEAD(aborts); 1034 LIST_HEAD(nvme_aborts); 1035 LIST_HEAD(nvmet_aborts); 1036 struct lpfc_sglq *sglq_entry = NULL; 1037 int cnt, idx; 1038 1039 1040 lpfc_sli_hbqbuf_free_all(phba); 1041 lpfc_hba_clean_txcmplq(phba); 1042 1043 /* At this point in time the HBA is either reset or DOA. Either 1044 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1045 * on the lpfc_els_sgl_list so that it can either be freed if the 1046 * driver is unloading or reposted if the driver is restarting 1047 * the port. 1048 */ 1049 1050 /* sgl_list_lock required because worker thread uses this 1051 * list. 1052 */ 1053 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1054 list_for_each_entry(sglq_entry, 1055 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1056 sglq_entry->state = SGL_FREED; 1057 1058 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1059 &phba->sli4_hba.lpfc_els_sgl_list); 1060 1061 1062 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1063 1064 /* abts_xxxx_buf_list_lock required because worker thread uses this 1065 * list. 1066 */ 1067 spin_lock_irq(&phba->hbalock); 1068 cnt = 0; 1069 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1070 qp = &phba->sli4_hba.hdwq[idx]; 1071 1072 spin_lock(&qp->abts_io_buf_list_lock); 1073 list_splice_init(&qp->lpfc_abts_io_buf_list, 1074 &aborts); 1075 1076 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1077 psb->pCmd = NULL; 1078 psb->status = IOSTAT_SUCCESS; 1079 cnt++; 1080 } 1081 spin_lock(&qp->io_buf_list_put_lock); 1082 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1083 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1084 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1085 qp->abts_scsi_io_bufs = 0; 1086 qp->abts_nvme_io_bufs = 0; 1087 spin_unlock(&qp->io_buf_list_put_lock); 1088 spin_unlock(&qp->abts_io_buf_list_lock); 1089 } 1090 spin_unlock_irq(&phba->hbalock); 1091 1092 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1093 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1094 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1095 &nvmet_aborts); 1096 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1097 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1098 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1099 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1100 } 1101 } 1102 1103 lpfc_sli4_free_sp_events(phba); 1104 return cnt; 1105 } 1106 1107 /** 1108 * lpfc_hba_down_post - Wrapper func for hba down post routine 1109 * @phba: pointer to lpfc HBA data structure. 1110 * 1111 * This routine wraps the actual SLI3 or SLI4 routine for performing 1112 * uninitialization after the HBA is reset when bring down the SLI Layer. 1113 * 1114 * Return codes 1115 * 0 - success. 1116 * Any other value - error. 1117 **/ 1118 int 1119 lpfc_hba_down_post(struct lpfc_hba *phba) 1120 { 1121 return (*phba->lpfc_hba_down_post)(phba); 1122 } 1123 1124 /** 1125 * lpfc_hb_timeout - The HBA-timer timeout handler 1126 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1127 * 1128 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1129 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1130 * work-port-events bitmap and the worker thread is notified. This timeout 1131 * event will be used by the worker thread to invoke the actual timeout 1132 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1133 * be performed in the timeout handler and the HBA timeout event bit shall 1134 * be cleared by the worker thread after it has taken the event bitmap out. 1135 **/ 1136 static void 1137 lpfc_hb_timeout(struct timer_list *t) 1138 { 1139 struct lpfc_hba *phba; 1140 uint32_t tmo_posted; 1141 unsigned long iflag; 1142 1143 phba = from_timer(phba, t, hb_tmofunc); 1144 1145 /* Check for heart beat timeout conditions */ 1146 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1147 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1148 if (!tmo_posted) 1149 phba->pport->work_port_events |= WORKER_HB_TMO; 1150 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1151 1152 /* Tell the worker thread there is work to do */ 1153 if (!tmo_posted) 1154 lpfc_worker_wake_up(phba); 1155 return; 1156 } 1157 1158 /** 1159 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1160 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1161 * 1162 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1163 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1164 * work-port-events bitmap and the worker thread is notified. This timeout 1165 * event will be used by the worker thread to invoke the actual timeout 1166 * handler routine, lpfc_rrq_handler. Any periodical operations will 1167 * be performed in the timeout handler and the RRQ timeout event bit shall 1168 * be cleared by the worker thread after it has taken the event bitmap out. 1169 **/ 1170 static void 1171 lpfc_rrq_timeout(struct timer_list *t) 1172 { 1173 struct lpfc_hba *phba; 1174 unsigned long iflag; 1175 1176 phba = from_timer(phba, t, rrq_tmr); 1177 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1178 if (!(phba->pport->load_flag & FC_UNLOADING)) 1179 phba->hba_flag |= HBA_RRQ_ACTIVE; 1180 else 1181 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1182 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1183 1184 if (!(phba->pport->load_flag & FC_UNLOADING)) 1185 lpfc_worker_wake_up(phba); 1186 } 1187 1188 /** 1189 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1190 * @phba: pointer to lpfc hba data structure. 1191 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1192 * 1193 * This is the callback function to the lpfc heart-beat mailbox command. 1194 * If configured, the lpfc driver issues the heart-beat mailbox command to 1195 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1196 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1197 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1198 * heart-beat outstanding state. Once the mailbox command comes back and 1199 * no error conditions detected, the heart-beat mailbox command timer is 1200 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1201 * state is cleared for the next heart-beat. If the timer expired with the 1202 * heart-beat outstanding state set, the driver will put the HBA offline. 1203 **/ 1204 static void 1205 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1206 { 1207 unsigned long drvr_flag; 1208 1209 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1210 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1211 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1212 1213 /* Check and reset heart-beat timer if necessary */ 1214 mempool_free(pmboxq, phba->mbox_mem_pool); 1215 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1216 !(phba->link_state == LPFC_HBA_ERROR) && 1217 !(phba->pport->load_flag & FC_UNLOADING)) 1218 mod_timer(&phba->hb_tmofunc, 1219 jiffies + 1220 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1221 return; 1222 } 1223 1224 /* 1225 * lpfc_idle_stat_delay_work - idle_stat tracking 1226 * 1227 * This routine tracks per-cq idle_stat and determines polling decisions. 1228 * 1229 * Return codes: 1230 * None 1231 **/ 1232 static void 1233 lpfc_idle_stat_delay_work(struct work_struct *work) 1234 { 1235 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1236 struct lpfc_hba, 1237 idle_stat_delay_work); 1238 struct lpfc_queue *cq; 1239 struct lpfc_sli4_hdw_queue *hdwq; 1240 struct lpfc_idle_stat *idle_stat; 1241 u32 i, idle_percent; 1242 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1243 1244 if (phba->pport->load_flag & FC_UNLOADING) 1245 return; 1246 1247 if (phba->link_state == LPFC_HBA_ERROR || 1248 phba->pport->fc_flag & FC_OFFLINE_MODE || 1249 phba->cmf_active_mode != LPFC_CFG_OFF) 1250 goto requeue; 1251 1252 for_each_present_cpu(i) { 1253 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1254 cq = hdwq->io_cq; 1255 1256 /* Skip if we've already handled this cq's primary CPU */ 1257 if (cq->chann != i) 1258 continue; 1259 1260 idle_stat = &phba->sli4_hba.idle_stat[i]; 1261 1262 /* get_cpu_idle_time returns values as running counters. Thus, 1263 * to know the amount for this period, the prior counter values 1264 * need to be subtracted from the current counter values. 1265 * From there, the idle time stat can be calculated as a 1266 * percentage of 100 - the sum of the other consumption times. 1267 */ 1268 wall_idle = get_cpu_idle_time(i, &wall, 1); 1269 diff_idle = wall_idle - idle_stat->prev_idle; 1270 diff_wall = wall - idle_stat->prev_wall; 1271 1272 if (diff_wall <= diff_idle) 1273 busy_time = 0; 1274 else 1275 busy_time = diff_wall - diff_idle; 1276 1277 idle_percent = div64_u64(100 * busy_time, diff_wall); 1278 idle_percent = 100 - idle_percent; 1279 1280 if (idle_percent < 15) 1281 cq->poll_mode = LPFC_QUEUE_WORK; 1282 else 1283 cq->poll_mode = LPFC_IRQ_POLL; 1284 1285 idle_stat->prev_idle = wall_idle; 1286 idle_stat->prev_wall = wall; 1287 } 1288 1289 requeue: 1290 schedule_delayed_work(&phba->idle_stat_delay_work, 1291 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1292 } 1293 1294 static void 1295 lpfc_hb_eq_delay_work(struct work_struct *work) 1296 { 1297 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1298 struct lpfc_hba, eq_delay_work); 1299 struct lpfc_eq_intr_info *eqi, *eqi_new; 1300 struct lpfc_queue *eq, *eq_next; 1301 unsigned char *ena_delay = NULL; 1302 uint32_t usdelay; 1303 int i; 1304 1305 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1306 return; 1307 1308 if (phba->link_state == LPFC_HBA_ERROR || 1309 phba->pport->fc_flag & FC_OFFLINE_MODE) 1310 goto requeue; 1311 1312 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1313 GFP_KERNEL); 1314 if (!ena_delay) 1315 goto requeue; 1316 1317 for (i = 0; i < phba->cfg_irq_chann; i++) { 1318 /* Get the EQ corresponding to the IRQ vector */ 1319 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1320 if (!eq) 1321 continue; 1322 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1323 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1324 ena_delay[eq->last_cpu] = 1; 1325 } 1326 } 1327 1328 for_each_present_cpu(i) { 1329 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1330 if (ena_delay[i]) { 1331 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1332 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1333 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1334 } else { 1335 usdelay = 0; 1336 } 1337 1338 eqi->icnt = 0; 1339 1340 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1341 if (unlikely(eq->last_cpu != i)) { 1342 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1343 eq->last_cpu); 1344 list_move_tail(&eq->cpu_list, &eqi_new->list); 1345 continue; 1346 } 1347 if (usdelay != eq->q_mode) 1348 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1349 usdelay); 1350 } 1351 } 1352 1353 kfree(ena_delay); 1354 1355 requeue: 1356 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1357 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1358 } 1359 1360 /** 1361 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1362 * @phba: pointer to lpfc hba data structure. 1363 * 1364 * For each heartbeat, this routine does some heuristic methods to adjust 1365 * XRI distribution. The goal is to fully utilize free XRIs. 1366 **/ 1367 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1368 { 1369 u32 i; 1370 u32 hwq_count; 1371 1372 hwq_count = phba->cfg_hdw_queue; 1373 for (i = 0; i < hwq_count; i++) { 1374 /* Adjust XRIs in private pool */ 1375 lpfc_adjust_pvt_pool_count(phba, i); 1376 1377 /* Adjust high watermark */ 1378 lpfc_adjust_high_watermark(phba, i); 1379 1380 #ifdef LPFC_MXP_STAT 1381 /* Snapshot pbl, pvt and busy count */ 1382 lpfc_snapshot_mxp(phba, i); 1383 #endif 1384 } 1385 } 1386 1387 /** 1388 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1389 * @phba: pointer to lpfc hba data structure. 1390 * 1391 * If a HB mbox is not already in progrees, this routine will allocate 1392 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1393 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1394 **/ 1395 int 1396 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1397 { 1398 LPFC_MBOXQ_t *pmboxq; 1399 int retval; 1400 1401 /* Is a Heartbeat mbox already in progress */ 1402 if (phba->hba_flag & HBA_HBEAT_INP) 1403 return 0; 1404 1405 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1406 if (!pmboxq) 1407 return -ENOMEM; 1408 1409 lpfc_heart_beat(phba, pmboxq); 1410 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1411 pmboxq->vport = phba->pport; 1412 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1413 1414 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1415 mempool_free(pmboxq, phba->mbox_mem_pool); 1416 return -ENXIO; 1417 } 1418 phba->hba_flag |= HBA_HBEAT_INP; 1419 1420 return 0; 1421 } 1422 1423 /** 1424 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1425 * @phba: pointer to lpfc hba data structure. 1426 * 1427 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1428 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1429 * of the value of lpfc_enable_hba_heartbeat. 1430 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1431 * try to issue a MBX_HEARTBEAT mbox command. 1432 **/ 1433 void 1434 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1435 { 1436 if (phba->cfg_enable_hba_heartbeat) 1437 return; 1438 phba->hba_flag |= HBA_HBEAT_TMO; 1439 } 1440 1441 /** 1442 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1443 * @phba: pointer to lpfc hba data structure. 1444 * 1445 * This is the actual HBA-timer timeout handler to be invoked by the worker 1446 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1447 * handler performs any periodic operations needed for the device. If such 1448 * periodic event has already been attended to either in the interrupt handler 1449 * or by processing slow-ring or fast-ring events within the HBA-timer 1450 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1451 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1452 * is configured and there is no heart-beat mailbox command outstanding, a 1453 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1454 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1455 * to offline. 1456 **/ 1457 void 1458 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1459 { 1460 struct lpfc_vport **vports; 1461 struct lpfc_dmabuf *buf_ptr; 1462 int retval = 0; 1463 int i, tmo; 1464 struct lpfc_sli *psli = &phba->sli; 1465 LIST_HEAD(completions); 1466 1467 if (phba->cfg_xri_rebalancing) { 1468 /* Multi-XRI pools handler */ 1469 lpfc_hb_mxp_handler(phba); 1470 } 1471 1472 vports = lpfc_create_vport_work_array(phba); 1473 if (vports != NULL) 1474 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1475 lpfc_rcv_seq_check_edtov(vports[i]); 1476 lpfc_fdmi_change_check(vports[i]); 1477 } 1478 lpfc_destroy_vport_work_array(phba, vports); 1479 1480 if ((phba->link_state == LPFC_HBA_ERROR) || 1481 (phba->pport->load_flag & FC_UNLOADING) || 1482 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1483 return; 1484 1485 if (phba->elsbuf_cnt && 1486 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1487 spin_lock_irq(&phba->hbalock); 1488 list_splice_init(&phba->elsbuf, &completions); 1489 phba->elsbuf_cnt = 0; 1490 phba->elsbuf_prev_cnt = 0; 1491 spin_unlock_irq(&phba->hbalock); 1492 1493 while (!list_empty(&completions)) { 1494 list_remove_head(&completions, buf_ptr, 1495 struct lpfc_dmabuf, list); 1496 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1497 kfree(buf_ptr); 1498 } 1499 } 1500 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1501 1502 /* If there is no heart beat outstanding, issue a heartbeat command */ 1503 if (phba->cfg_enable_hba_heartbeat) { 1504 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1505 spin_lock_irq(&phba->pport->work_port_lock); 1506 if (time_after(phba->last_completion_time + 1507 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1508 jiffies)) { 1509 spin_unlock_irq(&phba->pport->work_port_lock); 1510 if (phba->hba_flag & HBA_HBEAT_INP) 1511 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1512 else 1513 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1514 goto out; 1515 } 1516 spin_unlock_irq(&phba->pport->work_port_lock); 1517 1518 /* Check if a MBX_HEARTBEAT is already in progress */ 1519 if (phba->hba_flag & HBA_HBEAT_INP) { 1520 /* 1521 * If heart beat timeout called with HBA_HBEAT_INP set 1522 * we need to give the hb mailbox cmd a chance to 1523 * complete or TMO. 1524 */ 1525 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1526 "0459 Adapter heartbeat still outstanding: " 1527 "last compl time was %d ms.\n", 1528 jiffies_to_msecs(jiffies 1529 - phba->last_completion_time)); 1530 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1531 } else { 1532 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1533 (list_empty(&psli->mboxq))) { 1534 1535 retval = lpfc_issue_hb_mbox(phba); 1536 if (retval) { 1537 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1538 goto out; 1539 } 1540 phba->skipped_hb = 0; 1541 } else if (time_before_eq(phba->last_completion_time, 1542 phba->skipped_hb)) { 1543 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1544 "2857 Last completion time not " 1545 " updated in %d ms\n", 1546 jiffies_to_msecs(jiffies 1547 - phba->last_completion_time)); 1548 } else 1549 phba->skipped_hb = jiffies; 1550 1551 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1552 goto out; 1553 } 1554 } else { 1555 /* Check to see if we want to force a MBX_HEARTBEAT */ 1556 if (phba->hba_flag & HBA_HBEAT_TMO) { 1557 retval = lpfc_issue_hb_mbox(phba); 1558 if (retval) 1559 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1560 else 1561 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1562 goto out; 1563 } 1564 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1565 } 1566 out: 1567 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1568 } 1569 1570 /** 1571 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1572 * @phba: pointer to lpfc hba data structure. 1573 * 1574 * This routine is called to bring the HBA offline when HBA hardware error 1575 * other than Port Error 6 has been detected. 1576 **/ 1577 static void 1578 lpfc_offline_eratt(struct lpfc_hba *phba) 1579 { 1580 struct lpfc_sli *psli = &phba->sli; 1581 1582 spin_lock_irq(&phba->hbalock); 1583 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1584 spin_unlock_irq(&phba->hbalock); 1585 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1586 1587 lpfc_offline(phba); 1588 lpfc_reset_barrier(phba); 1589 spin_lock_irq(&phba->hbalock); 1590 lpfc_sli_brdreset(phba); 1591 spin_unlock_irq(&phba->hbalock); 1592 lpfc_hba_down_post(phba); 1593 lpfc_sli_brdready(phba, HS_MBRDY); 1594 lpfc_unblock_mgmt_io(phba); 1595 phba->link_state = LPFC_HBA_ERROR; 1596 return; 1597 } 1598 1599 /** 1600 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1601 * @phba: pointer to lpfc hba data structure. 1602 * 1603 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1604 * other than Port Error 6 has been detected. 1605 **/ 1606 void 1607 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1608 { 1609 spin_lock_irq(&phba->hbalock); 1610 if (phba->link_state == LPFC_HBA_ERROR && 1611 phba->hba_flag & HBA_PCI_ERR) { 1612 spin_unlock_irq(&phba->hbalock); 1613 return; 1614 } 1615 phba->link_state = LPFC_HBA_ERROR; 1616 spin_unlock_irq(&phba->hbalock); 1617 1618 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1619 lpfc_sli_flush_io_rings(phba); 1620 lpfc_offline(phba); 1621 lpfc_hba_down_post(phba); 1622 lpfc_unblock_mgmt_io(phba); 1623 } 1624 1625 /** 1626 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1627 * @phba: pointer to lpfc hba data structure. 1628 * 1629 * This routine is invoked to handle the deferred HBA hardware error 1630 * conditions. This type of error is indicated by HBA by setting ER1 1631 * and another ER bit in the host status register. The driver will 1632 * wait until the ER1 bit clears before handling the error condition. 1633 **/ 1634 static void 1635 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1636 { 1637 uint32_t old_host_status = phba->work_hs; 1638 struct lpfc_sli *psli = &phba->sli; 1639 1640 /* If the pci channel is offline, ignore possible errors, 1641 * since we cannot communicate with the pci card anyway. 1642 */ 1643 if (pci_channel_offline(phba->pcidev)) { 1644 spin_lock_irq(&phba->hbalock); 1645 phba->hba_flag &= ~DEFER_ERATT; 1646 spin_unlock_irq(&phba->hbalock); 1647 return; 1648 } 1649 1650 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1651 "0479 Deferred Adapter Hardware Error " 1652 "Data: x%x x%x x%x\n", 1653 phba->work_hs, phba->work_status[0], 1654 phba->work_status[1]); 1655 1656 spin_lock_irq(&phba->hbalock); 1657 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1658 spin_unlock_irq(&phba->hbalock); 1659 1660 1661 /* 1662 * Firmware stops when it triggred erratt. That could cause the I/Os 1663 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1664 * SCSI layer retry it after re-establishing link. 1665 */ 1666 lpfc_sli_abort_fcp_rings(phba); 1667 1668 /* 1669 * There was a firmware error. Take the hba offline and then 1670 * attempt to restart it. 1671 */ 1672 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1673 lpfc_offline(phba); 1674 1675 /* Wait for the ER1 bit to clear.*/ 1676 while (phba->work_hs & HS_FFER1) { 1677 msleep(100); 1678 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1679 phba->work_hs = UNPLUG_ERR ; 1680 break; 1681 } 1682 /* If driver is unloading let the worker thread continue */ 1683 if (phba->pport->load_flag & FC_UNLOADING) { 1684 phba->work_hs = 0; 1685 break; 1686 } 1687 } 1688 1689 /* 1690 * This is to ptrotect against a race condition in which 1691 * first write to the host attention register clear the 1692 * host status register. 1693 */ 1694 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1695 phba->work_hs = old_host_status & ~HS_FFER1; 1696 1697 spin_lock_irq(&phba->hbalock); 1698 phba->hba_flag &= ~DEFER_ERATT; 1699 spin_unlock_irq(&phba->hbalock); 1700 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1701 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1702 } 1703 1704 static void 1705 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1706 { 1707 struct lpfc_board_event_header board_event; 1708 struct Scsi_Host *shost; 1709 1710 board_event.event_type = FC_REG_BOARD_EVENT; 1711 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1712 shost = lpfc_shost_from_vport(phba->pport); 1713 fc_host_post_vendor_event(shost, fc_get_event_number(), 1714 sizeof(board_event), 1715 (char *) &board_event, 1716 LPFC_NL_VENDOR_ID); 1717 } 1718 1719 /** 1720 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1721 * @phba: pointer to lpfc hba data structure. 1722 * 1723 * This routine is invoked to handle the following HBA hardware error 1724 * conditions: 1725 * 1 - HBA error attention interrupt 1726 * 2 - DMA ring index out of range 1727 * 3 - Mailbox command came back as unknown 1728 **/ 1729 static void 1730 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1731 { 1732 struct lpfc_vport *vport = phba->pport; 1733 struct lpfc_sli *psli = &phba->sli; 1734 uint32_t event_data; 1735 unsigned long temperature; 1736 struct temp_event temp_event_data; 1737 struct Scsi_Host *shost; 1738 1739 /* If the pci channel is offline, ignore possible errors, 1740 * since we cannot communicate with the pci card anyway. 1741 */ 1742 if (pci_channel_offline(phba->pcidev)) { 1743 spin_lock_irq(&phba->hbalock); 1744 phba->hba_flag &= ~DEFER_ERATT; 1745 spin_unlock_irq(&phba->hbalock); 1746 return; 1747 } 1748 1749 /* If resets are disabled then leave the HBA alone and return */ 1750 if (!phba->cfg_enable_hba_reset) 1751 return; 1752 1753 /* Send an internal error event to mgmt application */ 1754 lpfc_board_errevt_to_mgmt(phba); 1755 1756 if (phba->hba_flag & DEFER_ERATT) 1757 lpfc_handle_deferred_eratt(phba); 1758 1759 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1760 if (phba->work_hs & HS_FFER6) 1761 /* Re-establishing Link */ 1762 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1763 "1301 Re-establishing Link " 1764 "Data: x%x x%x x%x\n", 1765 phba->work_hs, phba->work_status[0], 1766 phba->work_status[1]); 1767 if (phba->work_hs & HS_FFER8) 1768 /* Device Zeroization */ 1769 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1770 "2861 Host Authentication device " 1771 "zeroization Data:x%x x%x x%x\n", 1772 phba->work_hs, phba->work_status[0], 1773 phba->work_status[1]); 1774 1775 spin_lock_irq(&phba->hbalock); 1776 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1777 spin_unlock_irq(&phba->hbalock); 1778 1779 /* 1780 * Firmware stops when it triggled erratt with HS_FFER6. 1781 * That could cause the I/Os dropped by the firmware. 1782 * Error iocb (I/O) on txcmplq and let the SCSI layer 1783 * retry it after re-establishing link. 1784 */ 1785 lpfc_sli_abort_fcp_rings(phba); 1786 1787 /* 1788 * There was a firmware error. Take the hba offline and then 1789 * attempt to restart it. 1790 */ 1791 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1792 lpfc_offline(phba); 1793 lpfc_sli_brdrestart(phba); 1794 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1795 lpfc_unblock_mgmt_io(phba); 1796 return; 1797 } 1798 lpfc_unblock_mgmt_io(phba); 1799 } else if (phba->work_hs & HS_CRIT_TEMP) { 1800 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1801 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1802 temp_event_data.event_code = LPFC_CRIT_TEMP; 1803 temp_event_data.data = (uint32_t)temperature; 1804 1805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1806 "0406 Adapter maximum temperature exceeded " 1807 "(%ld), taking this port offline " 1808 "Data: x%x x%x x%x\n", 1809 temperature, phba->work_hs, 1810 phba->work_status[0], phba->work_status[1]); 1811 1812 shost = lpfc_shost_from_vport(phba->pport); 1813 fc_host_post_vendor_event(shost, fc_get_event_number(), 1814 sizeof(temp_event_data), 1815 (char *) &temp_event_data, 1816 SCSI_NL_VID_TYPE_PCI 1817 | PCI_VENDOR_ID_EMULEX); 1818 1819 spin_lock_irq(&phba->hbalock); 1820 phba->over_temp_state = HBA_OVER_TEMP; 1821 spin_unlock_irq(&phba->hbalock); 1822 lpfc_offline_eratt(phba); 1823 1824 } else { 1825 /* The if clause above forces this code path when the status 1826 * failure is a value other than FFER6. Do not call the offline 1827 * twice. This is the adapter hardware error path. 1828 */ 1829 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1830 "0457 Adapter Hardware Error " 1831 "Data: x%x x%x x%x\n", 1832 phba->work_hs, 1833 phba->work_status[0], phba->work_status[1]); 1834 1835 event_data = FC_REG_DUMP_EVENT; 1836 shost = lpfc_shost_from_vport(vport); 1837 fc_host_post_vendor_event(shost, fc_get_event_number(), 1838 sizeof(event_data), (char *) &event_data, 1839 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1840 1841 lpfc_offline_eratt(phba); 1842 } 1843 return; 1844 } 1845 1846 /** 1847 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1848 * @phba: pointer to lpfc hba data structure. 1849 * @mbx_action: flag for mailbox shutdown action. 1850 * @en_rn_msg: send reset/port recovery message. 1851 * This routine is invoked to perform an SLI4 port PCI function reset in 1852 * response to port status register polling attention. It waits for port 1853 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1854 * During this process, interrupt vectors are freed and later requested 1855 * for handling possible port resource change. 1856 **/ 1857 static int 1858 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1859 bool en_rn_msg) 1860 { 1861 int rc; 1862 uint32_t intr_mode; 1863 LPFC_MBOXQ_t *mboxq; 1864 1865 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1866 LPFC_SLI_INTF_IF_TYPE_2) { 1867 /* 1868 * On error status condition, driver need to wait for port 1869 * ready before performing reset. 1870 */ 1871 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1872 if (rc) 1873 return rc; 1874 } 1875 1876 /* need reset: attempt for port recovery */ 1877 if (en_rn_msg) 1878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1879 "2887 Reset Needed: Attempting Port " 1880 "Recovery...\n"); 1881 1882 /* If we are no wait, the HBA has been reset and is not 1883 * functional, thus we should clear 1884 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1885 */ 1886 if (mbx_action == LPFC_MBX_NO_WAIT) { 1887 spin_lock_irq(&phba->hbalock); 1888 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1889 if (phba->sli.mbox_active) { 1890 mboxq = phba->sli.mbox_active; 1891 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1892 __lpfc_mbox_cmpl_put(phba, mboxq); 1893 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1894 phba->sli.mbox_active = NULL; 1895 } 1896 spin_unlock_irq(&phba->hbalock); 1897 } 1898 1899 lpfc_offline_prep(phba, mbx_action); 1900 lpfc_sli_flush_io_rings(phba); 1901 lpfc_offline(phba); 1902 /* release interrupt for possible resource change */ 1903 lpfc_sli4_disable_intr(phba); 1904 rc = lpfc_sli_brdrestart(phba); 1905 if (rc) { 1906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1907 "6309 Failed to restart board\n"); 1908 return rc; 1909 } 1910 /* request and enable interrupt */ 1911 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1912 if (intr_mode == LPFC_INTR_ERROR) { 1913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1914 "3175 Failed to enable interrupt\n"); 1915 return -EIO; 1916 } 1917 phba->intr_mode = intr_mode; 1918 rc = lpfc_online(phba); 1919 if (rc == 0) 1920 lpfc_unblock_mgmt_io(phba); 1921 1922 return rc; 1923 } 1924 1925 /** 1926 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1927 * @phba: pointer to lpfc hba data structure. 1928 * 1929 * This routine is invoked to handle the SLI4 HBA hardware error attention 1930 * conditions. 1931 **/ 1932 static void 1933 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1934 { 1935 struct lpfc_vport *vport = phba->pport; 1936 uint32_t event_data; 1937 struct Scsi_Host *shost; 1938 uint32_t if_type; 1939 struct lpfc_register portstat_reg = {0}; 1940 uint32_t reg_err1, reg_err2; 1941 uint32_t uerrlo_reg, uemasklo_reg; 1942 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1943 bool en_rn_msg = true; 1944 struct temp_event temp_event_data; 1945 struct lpfc_register portsmphr_reg; 1946 int rc, i; 1947 1948 /* If the pci channel is offline, ignore possible errors, since 1949 * we cannot communicate with the pci card anyway. 1950 */ 1951 if (pci_channel_offline(phba->pcidev)) { 1952 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1953 "3166 pci channel is offline\n"); 1954 return; 1955 } 1956 1957 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1958 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1959 switch (if_type) { 1960 case LPFC_SLI_INTF_IF_TYPE_0: 1961 pci_rd_rc1 = lpfc_readl( 1962 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1963 &uerrlo_reg); 1964 pci_rd_rc2 = lpfc_readl( 1965 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1966 &uemasklo_reg); 1967 /* consider PCI bus read error as pci_channel_offline */ 1968 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1969 return; 1970 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1971 lpfc_sli4_offline_eratt(phba); 1972 return; 1973 } 1974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1975 "7623 Checking UE recoverable"); 1976 1977 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1978 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1979 &portsmphr_reg.word0)) 1980 continue; 1981 1982 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1983 &portsmphr_reg); 1984 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1985 LPFC_PORT_SEM_UE_RECOVERABLE) 1986 break; 1987 /*Sleep for 1Sec, before checking SEMAPHORE */ 1988 msleep(1000); 1989 } 1990 1991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1992 "4827 smphr_port_status x%x : Waited %dSec", 1993 smphr_port_status, i); 1994 1995 /* Recoverable UE, reset the HBA device */ 1996 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1997 LPFC_PORT_SEM_UE_RECOVERABLE) { 1998 for (i = 0; i < 20; i++) { 1999 msleep(1000); 2000 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2001 &portsmphr_reg.word0) && 2002 (LPFC_POST_STAGE_PORT_READY == 2003 bf_get(lpfc_port_smphr_port_status, 2004 &portsmphr_reg))) { 2005 rc = lpfc_sli4_port_sta_fn_reset(phba, 2006 LPFC_MBX_NO_WAIT, en_rn_msg); 2007 if (rc == 0) 2008 return; 2009 lpfc_printf_log(phba, KERN_ERR, 2010 LOG_TRACE_EVENT, 2011 "4215 Failed to recover UE"); 2012 break; 2013 } 2014 } 2015 } 2016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2017 "7624 Firmware not ready: Failing UE recovery," 2018 " waited %dSec", i); 2019 phba->link_state = LPFC_HBA_ERROR; 2020 break; 2021 2022 case LPFC_SLI_INTF_IF_TYPE_2: 2023 case LPFC_SLI_INTF_IF_TYPE_6: 2024 pci_rd_rc1 = lpfc_readl( 2025 phba->sli4_hba.u.if_type2.STATUSregaddr, 2026 &portstat_reg.word0); 2027 /* consider PCI bus read error as pci_channel_offline */ 2028 if (pci_rd_rc1 == -EIO) { 2029 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2030 "3151 PCI bus read access failure: x%x\n", 2031 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2032 lpfc_sli4_offline_eratt(phba); 2033 return; 2034 } 2035 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2036 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2037 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2038 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2039 "2889 Port Overtemperature event, " 2040 "taking port offline Data: x%x x%x\n", 2041 reg_err1, reg_err2); 2042 2043 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2044 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2045 temp_event_data.event_code = LPFC_CRIT_TEMP; 2046 temp_event_data.data = 0xFFFFFFFF; 2047 2048 shost = lpfc_shost_from_vport(phba->pport); 2049 fc_host_post_vendor_event(shost, fc_get_event_number(), 2050 sizeof(temp_event_data), 2051 (char *)&temp_event_data, 2052 SCSI_NL_VID_TYPE_PCI 2053 | PCI_VENDOR_ID_EMULEX); 2054 2055 spin_lock_irq(&phba->hbalock); 2056 phba->over_temp_state = HBA_OVER_TEMP; 2057 spin_unlock_irq(&phba->hbalock); 2058 lpfc_sli4_offline_eratt(phba); 2059 return; 2060 } 2061 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2062 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2063 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2064 "3143 Port Down: Firmware Update " 2065 "Detected\n"); 2066 en_rn_msg = false; 2067 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2068 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2070 "3144 Port Down: Debug Dump\n"); 2071 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2072 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2074 "3145 Port Down: Provisioning\n"); 2075 2076 /* If resets are disabled then leave the HBA alone and return */ 2077 if (!phba->cfg_enable_hba_reset) 2078 return; 2079 2080 /* Check port status register for function reset */ 2081 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2082 en_rn_msg); 2083 if (rc == 0) { 2084 /* don't report event on forced debug dump */ 2085 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2086 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2087 return; 2088 else 2089 break; 2090 } 2091 /* fall through for not able to recover */ 2092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2093 "3152 Unrecoverable error\n"); 2094 phba->link_state = LPFC_HBA_ERROR; 2095 break; 2096 case LPFC_SLI_INTF_IF_TYPE_1: 2097 default: 2098 break; 2099 } 2100 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2101 "3123 Report dump event to upper layer\n"); 2102 /* Send an internal error event to mgmt application */ 2103 lpfc_board_errevt_to_mgmt(phba); 2104 2105 event_data = FC_REG_DUMP_EVENT; 2106 shost = lpfc_shost_from_vport(vport); 2107 fc_host_post_vendor_event(shost, fc_get_event_number(), 2108 sizeof(event_data), (char *) &event_data, 2109 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2110 } 2111 2112 /** 2113 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2114 * @phba: pointer to lpfc HBA data structure. 2115 * 2116 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2117 * routine from the API jump table function pointer from the lpfc_hba struct. 2118 * 2119 * Return codes 2120 * 0 - success. 2121 * Any other value - error. 2122 **/ 2123 void 2124 lpfc_handle_eratt(struct lpfc_hba *phba) 2125 { 2126 (*phba->lpfc_handle_eratt)(phba); 2127 } 2128 2129 /** 2130 * lpfc_handle_latt - The HBA link event handler 2131 * @phba: pointer to lpfc hba data structure. 2132 * 2133 * This routine is invoked from the worker thread to handle a HBA host 2134 * attention link event. SLI3 only. 2135 **/ 2136 void 2137 lpfc_handle_latt(struct lpfc_hba *phba) 2138 { 2139 struct lpfc_vport *vport = phba->pport; 2140 struct lpfc_sli *psli = &phba->sli; 2141 LPFC_MBOXQ_t *pmb; 2142 volatile uint32_t control; 2143 struct lpfc_dmabuf *mp; 2144 int rc = 0; 2145 2146 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2147 if (!pmb) { 2148 rc = 1; 2149 goto lpfc_handle_latt_err_exit; 2150 } 2151 2152 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2153 if (!mp) { 2154 rc = 2; 2155 goto lpfc_handle_latt_free_pmb; 2156 } 2157 2158 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2159 if (!mp->virt) { 2160 rc = 3; 2161 goto lpfc_handle_latt_free_mp; 2162 } 2163 2164 /* Cleanup any outstanding ELS commands */ 2165 lpfc_els_flush_all_cmd(phba); 2166 2167 psli->slistat.link_event++; 2168 lpfc_read_topology(phba, pmb, mp); 2169 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2170 pmb->vport = vport; 2171 /* Block ELS IOCBs until we have processed this mbox command */ 2172 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2173 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2174 if (rc == MBX_NOT_FINISHED) { 2175 rc = 4; 2176 goto lpfc_handle_latt_free_mbuf; 2177 } 2178 2179 /* Clear Link Attention in HA REG */ 2180 spin_lock_irq(&phba->hbalock); 2181 writel(HA_LATT, phba->HAregaddr); 2182 readl(phba->HAregaddr); /* flush */ 2183 spin_unlock_irq(&phba->hbalock); 2184 2185 return; 2186 2187 lpfc_handle_latt_free_mbuf: 2188 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2189 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2190 lpfc_handle_latt_free_mp: 2191 kfree(mp); 2192 lpfc_handle_latt_free_pmb: 2193 mempool_free(pmb, phba->mbox_mem_pool); 2194 lpfc_handle_latt_err_exit: 2195 /* Enable Link attention interrupts */ 2196 spin_lock_irq(&phba->hbalock); 2197 psli->sli_flag |= LPFC_PROCESS_LA; 2198 control = readl(phba->HCregaddr); 2199 control |= HC_LAINT_ENA; 2200 writel(control, phba->HCregaddr); 2201 readl(phba->HCregaddr); /* flush */ 2202 2203 /* Clear Link Attention in HA REG */ 2204 writel(HA_LATT, phba->HAregaddr); 2205 readl(phba->HAregaddr); /* flush */ 2206 spin_unlock_irq(&phba->hbalock); 2207 lpfc_linkdown(phba); 2208 phba->link_state = LPFC_HBA_ERROR; 2209 2210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2211 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2212 2213 return; 2214 } 2215 2216 /** 2217 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2218 * @phba: pointer to lpfc hba data structure. 2219 * @vpd: pointer to the vital product data. 2220 * @len: length of the vital product data in bytes. 2221 * 2222 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2223 * an array of characters. In this routine, the ModelName, ProgramType, and 2224 * ModelDesc, etc. fields of the phba data structure will be populated. 2225 * 2226 * Return codes 2227 * 0 - pointer to the VPD passed in is NULL 2228 * 1 - success 2229 **/ 2230 int 2231 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2232 { 2233 uint8_t lenlo, lenhi; 2234 int Length; 2235 int i, j; 2236 int finished = 0; 2237 int index = 0; 2238 2239 if (!vpd) 2240 return 0; 2241 2242 /* Vital Product */ 2243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2244 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2245 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2246 (uint32_t) vpd[3]); 2247 while (!finished && (index < (len - 4))) { 2248 switch (vpd[index]) { 2249 case 0x82: 2250 case 0x91: 2251 index += 1; 2252 lenlo = vpd[index]; 2253 index += 1; 2254 lenhi = vpd[index]; 2255 index += 1; 2256 i = ((((unsigned short)lenhi) << 8) + lenlo); 2257 index += i; 2258 break; 2259 case 0x90: 2260 index += 1; 2261 lenlo = vpd[index]; 2262 index += 1; 2263 lenhi = vpd[index]; 2264 index += 1; 2265 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2266 if (Length > len - index) 2267 Length = len - index; 2268 while (Length > 0) { 2269 /* Look for Serial Number */ 2270 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2271 index += 2; 2272 i = vpd[index]; 2273 index += 1; 2274 j = 0; 2275 Length -= (3+i); 2276 while(i--) { 2277 phba->SerialNumber[j++] = vpd[index++]; 2278 if (j == 31) 2279 break; 2280 } 2281 phba->SerialNumber[j] = 0; 2282 continue; 2283 } 2284 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2285 phba->vpd_flag |= VPD_MODEL_DESC; 2286 index += 2; 2287 i = vpd[index]; 2288 index += 1; 2289 j = 0; 2290 Length -= (3+i); 2291 while(i--) { 2292 phba->ModelDesc[j++] = vpd[index++]; 2293 if (j == 255) 2294 break; 2295 } 2296 phba->ModelDesc[j] = 0; 2297 continue; 2298 } 2299 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2300 phba->vpd_flag |= VPD_MODEL_NAME; 2301 index += 2; 2302 i = vpd[index]; 2303 index += 1; 2304 j = 0; 2305 Length -= (3+i); 2306 while(i--) { 2307 phba->ModelName[j++] = vpd[index++]; 2308 if (j == 79) 2309 break; 2310 } 2311 phba->ModelName[j] = 0; 2312 continue; 2313 } 2314 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2315 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2316 index += 2; 2317 i = vpd[index]; 2318 index += 1; 2319 j = 0; 2320 Length -= (3+i); 2321 while(i--) { 2322 phba->ProgramType[j++] = vpd[index++]; 2323 if (j == 255) 2324 break; 2325 } 2326 phba->ProgramType[j] = 0; 2327 continue; 2328 } 2329 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2330 phba->vpd_flag |= VPD_PORT; 2331 index += 2; 2332 i = vpd[index]; 2333 index += 1; 2334 j = 0; 2335 Length -= (3+i); 2336 while(i--) { 2337 if ((phba->sli_rev == LPFC_SLI_REV4) && 2338 (phba->sli4_hba.pport_name_sta == 2339 LPFC_SLI4_PPNAME_GET)) { 2340 j++; 2341 index++; 2342 } else 2343 phba->Port[j++] = vpd[index++]; 2344 if (j == 19) 2345 break; 2346 } 2347 if ((phba->sli_rev != LPFC_SLI_REV4) || 2348 (phba->sli4_hba.pport_name_sta == 2349 LPFC_SLI4_PPNAME_NON)) 2350 phba->Port[j] = 0; 2351 continue; 2352 } 2353 else { 2354 index += 2; 2355 i = vpd[index]; 2356 index += 1; 2357 index += i; 2358 Length -= (3 + i); 2359 } 2360 } 2361 finished = 0; 2362 break; 2363 case 0x78: 2364 finished = 1; 2365 break; 2366 default: 2367 index ++; 2368 break; 2369 } 2370 } 2371 2372 return(1); 2373 } 2374 2375 /** 2376 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2377 * @phba: pointer to lpfc hba data structure. 2378 * @mdp: pointer to the data structure to hold the derived model name. 2379 * @descp: pointer to the data structure to hold the derived description. 2380 * 2381 * This routine retrieves HBA's description based on its registered PCI device 2382 * ID. The @descp passed into this function points to an array of 256 chars. It 2383 * shall be returned with the model name, maximum speed, and the host bus type. 2384 * The @mdp passed into this function points to an array of 80 chars. When the 2385 * function returns, the @mdp will be filled with the model name. 2386 **/ 2387 static void 2388 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2389 { 2390 lpfc_vpd_t *vp; 2391 uint16_t dev_id = phba->pcidev->device; 2392 int max_speed; 2393 int GE = 0; 2394 int oneConnect = 0; /* default is not a oneConnect */ 2395 struct { 2396 char *name; 2397 char *bus; 2398 char *function; 2399 } m = {"<Unknown>", "", ""}; 2400 2401 if (mdp && mdp[0] != '\0' 2402 && descp && descp[0] != '\0') 2403 return; 2404 2405 if (phba->lmt & LMT_64Gb) 2406 max_speed = 64; 2407 else if (phba->lmt & LMT_32Gb) 2408 max_speed = 32; 2409 else if (phba->lmt & LMT_16Gb) 2410 max_speed = 16; 2411 else if (phba->lmt & LMT_10Gb) 2412 max_speed = 10; 2413 else if (phba->lmt & LMT_8Gb) 2414 max_speed = 8; 2415 else if (phba->lmt & LMT_4Gb) 2416 max_speed = 4; 2417 else if (phba->lmt & LMT_2Gb) 2418 max_speed = 2; 2419 else if (phba->lmt & LMT_1Gb) 2420 max_speed = 1; 2421 else 2422 max_speed = 0; 2423 2424 vp = &phba->vpd; 2425 2426 switch (dev_id) { 2427 case PCI_DEVICE_ID_FIREFLY: 2428 m = (typeof(m)){"LP6000", "PCI", 2429 "Obsolete, Unsupported Fibre Channel Adapter"}; 2430 break; 2431 case PCI_DEVICE_ID_SUPERFLY: 2432 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2433 m = (typeof(m)){"LP7000", "PCI", ""}; 2434 else 2435 m = (typeof(m)){"LP7000E", "PCI", ""}; 2436 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2437 break; 2438 case PCI_DEVICE_ID_DRAGONFLY: 2439 m = (typeof(m)){"LP8000", "PCI", 2440 "Obsolete, Unsupported Fibre Channel Adapter"}; 2441 break; 2442 case PCI_DEVICE_ID_CENTAUR: 2443 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2444 m = (typeof(m)){"LP9002", "PCI", ""}; 2445 else 2446 m = (typeof(m)){"LP9000", "PCI", ""}; 2447 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2448 break; 2449 case PCI_DEVICE_ID_RFLY: 2450 m = (typeof(m)){"LP952", "PCI", 2451 "Obsolete, Unsupported Fibre Channel Adapter"}; 2452 break; 2453 case PCI_DEVICE_ID_PEGASUS: 2454 m = (typeof(m)){"LP9802", "PCI-X", 2455 "Obsolete, Unsupported Fibre Channel Adapter"}; 2456 break; 2457 case PCI_DEVICE_ID_THOR: 2458 m = (typeof(m)){"LP10000", "PCI-X", 2459 "Obsolete, Unsupported Fibre Channel Adapter"}; 2460 break; 2461 case PCI_DEVICE_ID_VIPER: 2462 m = (typeof(m)){"LPX1000", "PCI-X", 2463 "Obsolete, Unsupported Fibre Channel Adapter"}; 2464 break; 2465 case PCI_DEVICE_ID_PFLY: 2466 m = (typeof(m)){"LP982", "PCI-X", 2467 "Obsolete, Unsupported Fibre Channel Adapter"}; 2468 break; 2469 case PCI_DEVICE_ID_TFLY: 2470 m = (typeof(m)){"LP1050", "PCI-X", 2471 "Obsolete, Unsupported Fibre Channel Adapter"}; 2472 break; 2473 case PCI_DEVICE_ID_HELIOS: 2474 m = (typeof(m)){"LP11000", "PCI-X2", 2475 "Obsolete, Unsupported Fibre Channel Adapter"}; 2476 break; 2477 case PCI_DEVICE_ID_HELIOS_SCSP: 2478 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2479 "Obsolete, Unsupported Fibre Channel Adapter"}; 2480 break; 2481 case PCI_DEVICE_ID_HELIOS_DCSP: 2482 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2483 "Obsolete, Unsupported Fibre Channel Adapter"}; 2484 break; 2485 case PCI_DEVICE_ID_NEPTUNE: 2486 m = (typeof(m)){"LPe1000", "PCIe", 2487 "Obsolete, Unsupported Fibre Channel Adapter"}; 2488 break; 2489 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2490 m = (typeof(m)){"LPe1000-SP", "PCIe", 2491 "Obsolete, Unsupported Fibre Channel Adapter"}; 2492 break; 2493 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2494 m = (typeof(m)){"LPe1002-SP", "PCIe", 2495 "Obsolete, Unsupported Fibre Channel Adapter"}; 2496 break; 2497 case PCI_DEVICE_ID_BMID: 2498 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2499 break; 2500 case PCI_DEVICE_ID_BSMB: 2501 m = (typeof(m)){"LP111", "PCI-X2", 2502 "Obsolete, Unsupported Fibre Channel Adapter"}; 2503 break; 2504 case PCI_DEVICE_ID_ZEPHYR: 2505 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2506 break; 2507 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2508 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2509 break; 2510 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2511 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2512 GE = 1; 2513 break; 2514 case PCI_DEVICE_ID_ZMID: 2515 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2516 break; 2517 case PCI_DEVICE_ID_ZSMB: 2518 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2519 break; 2520 case PCI_DEVICE_ID_LP101: 2521 m = (typeof(m)){"LP101", "PCI-X", 2522 "Obsolete, Unsupported Fibre Channel Adapter"}; 2523 break; 2524 case PCI_DEVICE_ID_LP10000S: 2525 m = (typeof(m)){"LP10000-S", "PCI", 2526 "Obsolete, Unsupported Fibre Channel Adapter"}; 2527 break; 2528 case PCI_DEVICE_ID_LP11000S: 2529 m = (typeof(m)){"LP11000-S", "PCI-X2", 2530 "Obsolete, Unsupported Fibre Channel Adapter"}; 2531 break; 2532 case PCI_DEVICE_ID_LPE11000S: 2533 m = (typeof(m)){"LPe11000-S", "PCIe", 2534 "Obsolete, Unsupported Fibre Channel Adapter"}; 2535 break; 2536 case PCI_DEVICE_ID_SAT: 2537 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2538 break; 2539 case PCI_DEVICE_ID_SAT_MID: 2540 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2541 break; 2542 case PCI_DEVICE_ID_SAT_SMB: 2543 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2544 break; 2545 case PCI_DEVICE_ID_SAT_DCSP: 2546 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2547 break; 2548 case PCI_DEVICE_ID_SAT_SCSP: 2549 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2550 break; 2551 case PCI_DEVICE_ID_SAT_S: 2552 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2553 break; 2554 case PCI_DEVICE_ID_HORNET: 2555 m = (typeof(m)){"LP21000", "PCIe", 2556 "Obsolete, Unsupported FCoE Adapter"}; 2557 GE = 1; 2558 break; 2559 case PCI_DEVICE_ID_PROTEUS_VF: 2560 m = (typeof(m)){"LPev12000", "PCIe IOV", 2561 "Obsolete, Unsupported Fibre Channel Adapter"}; 2562 break; 2563 case PCI_DEVICE_ID_PROTEUS_PF: 2564 m = (typeof(m)){"LPev12000", "PCIe IOV", 2565 "Obsolete, Unsupported Fibre Channel Adapter"}; 2566 break; 2567 case PCI_DEVICE_ID_PROTEUS_S: 2568 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2569 "Obsolete, Unsupported Fibre Channel Adapter"}; 2570 break; 2571 case PCI_DEVICE_ID_TIGERSHARK: 2572 oneConnect = 1; 2573 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2574 break; 2575 case PCI_DEVICE_ID_TOMCAT: 2576 oneConnect = 1; 2577 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2578 break; 2579 case PCI_DEVICE_ID_FALCON: 2580 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2581 "EmulexSecure Fibre"}; 2582 break; 2583 case PCI_DEVICE_ID_BALIUS: 2584 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2585 "Obsolete, Unsupported Fibre Channel Adapter"}; 2586 break; 2587 case PCI_DEVICE_ID_LANCER_FC: 2588 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2589 break; 2590 case PCI_DEVICE_ID_LANCER_FC_VF: 2591 m = (typeof(m)){"LPe16000", "PCIe", 2592 "Obsolete, Unsupported Fibre Channel Adapter"}; 2593 break; 2594 case PCI_DEVICE_ID_LANCER_FCOE: 2595 oneConnect = 1; 2596 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2597 break; 2598 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2599 oneConnect = 1; 2600 m = (typeof(m)){"OCe15100", "PCIe", 2601 "Obsolete, Unsupported FCoE"}; 2602 break; 2603 case PCI_DEVICE_ID_LANCER_G6_FC: 2604 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2605 break; 2606 case PCI_DEVICE_ID_LANCER_G7_FC: 2607 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2608 break; 2609 case PCI_DEVICE_ID_LANCER_G7P_FC: 2610 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2611 break; 2612 case PCI_DEVICE_ID_SKYHAWK: 2613 case PCI_DEVICE_ID_SKYHAWK_VF: 2614 oneConnect = 1; 2615 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2616 break; 2617 default: 2618 m = (typeof(m)){"Unknown", "", ""}; 2619 break; 2620 } 2621 2622 if (mdp && mdp[0] == '\0') 2623 snprintf(mdp, 79,"%s", m.name); 2624 /* 2625 * oneConnect hba requires special processing, they are all initiators 2626 * and we put the port number on the end 2627 */ 2628 if (descp && descp[0] == '\0') { 2629 if (oneConnect) 2630 snprintf(descp, 255, 2631 "Emulex OneConnect %s, %s Initiator %s", 2632 m.name, m.function, 2633 phba->Port); 2634 else if (max_speed == 0) 2635 snprintf(descp, 255, 2636 "Emulex %s %s %s", 2637 m.name, m.bus, m.function); 2638 else 2639 snprintf(descp, 255, 2640 "Emulex %s %d%s %s %s", 2641 m.name, max_speed, (GE) ? "GE" : "Gb", 2642 m.bus, m.function); 2643 } 2644 } 2645 2646 /** 2647 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2648 * @phba: pointer to lpfc hba data structure. 2649 * @pring: pointer to a IOCB ring. 2650 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2651 * 2652 * This routine posts a given number of IOCBs with the associated DMA buffer 2653 * descriptors specified by the cnt argument to the given IOCB ring. 2654 * 2655 * Return codes 2656 * The number of IOCBs NOT able to be posted to the IOCB ring. 2657 **/ 2658 int 2659 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2660 { 2661 IOCB_t *icmd; 2662 struct lpfc_iocbq *iocb; 2663 struct lpfc_dmabuf *mp1, *mp2; 2664 2665 cnt += pring->missbufcnt; 2666 2667 /* While there are buffers to post */ 2668 while (cnt > 0) { 2669 /* Allocate buffer for command iocb */ 2670 iocb = lpfc_sli_get_iocbq(phba); 2671 if (iocb == NULL) { 2672 pring->missbufcnt = cnt; 2673 return cnt; 2674 } 2675 icmd = &iocb->iocb; 2676 2677 /* 2 buffers can be posted per command */ 2678 /* Allocate buffer to post */ 2679 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2680 if (mp1) 2681 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2682 if (!mp1 || !mp1->virt) { 2683 kfree(mp1); 2684 lpfc_sli_release_iocbq(phba, iocb); 2685 pring->missbufcnt = cnt; 2686 return cnt; 2687 } 2688 2689 INIT_LIST_HEAD(&mp1->list); 2690 /* Allocate buffer to post */ 2691 if (cnt > 1) { 2692 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2693 if (mp2) 2694 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2695 &mp2->phys); 2696 if (!mp2 || !mp2->virt) { 2697 kfree(mp2); 2698 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2699 kfree(mp1); 2700 lpfc_sli_release_iocbq(phba, iocb); 2701 pring->missbufcnt = cnt; 2702 return cnt; 2703 } 2704 2705 INIT_LIST_HEAD(&mp2->list); 2706 } else { 2707 mp2 = NULL; 2708 } 2709 2710 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2711 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2712 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2713 icmd->ulpBdeCount = 1; 2714 cnt--; 2715 if (mp2) { 2716 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2717 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2718 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2719 cnt--; 2720 icmd->ulpBdeCount = 2; 2721 } 2722 2723 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2724 icmd->ulpLe = 1; 2725 2726 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2727 IOCB_ERROR) { 2728 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2729 kfree(mp1); 2730 cnt++; 2731 if (mp2) { 2732 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2733 kfree(mp2); 2734 cnt++; 2735 } 2736 lpfc_sli_release_iocbq(phba, iocb); 2737 pring->missbufcnt = cnt; 2738 return cnt; 2739 } 2740 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2741 if (mp2) 2742 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2743 } 2744 pring->missbufcnt = 0; 2745 return 0; 2746 } 2747 2748 /** 2749 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2750 * @phba: pointer to lpfc hba data structure. 2751 * 2752 * This routine posts initial receive IOCB buffers to the ELS ring. The 2753 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2754 * set to 64 IOCBs. SLI3 only. 2755 * 2756 * Return codes 2757 * 0 - success (currently always success) 2758 **/ 2759 static int 2760 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2761 { 2762 struct lpfc_sli *psli = &phba->sli; 2763 2764 /* Ring 0, ELS / CT buffers */ 2765 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2766 /* Ring 2 - FCP no buffers needed */ 2767 2768 return 0; 2769 } 2770 2771 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2772 2773 /** 2774 * lpfc_sha_init - Set up initial array of hash table entries 2775 * @HashResultPointer: pointer to an array as hash table. 2776 * 2777 * This routine sets up the initial values to the array of hash table entries 2778 * for the LC HBAs. 2779 **/ 2780 static void 2781 lpfc_sha_init(uint32_t * HashResultPointer) 2782 { 2783 HashResultPointer[0] = 0x67452301; 2784 HashResultPointer[1] = 0xEFCDAB89; 2785 HashResultPointer[2] = 0x98BADCFE; 2786 HashResultPointer[3] = 0x10325476; 2787 HashResultPointer[4] = 0xC3D2E1F0; 2788 } 2789 2790 /** 2791 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2792 * @HashResultPointer: pointer to an initial/result hash table. 2793 * @HashWorkingPointer: pointer to an working hash table. 2794 * 2795 * This routine iterates an initial hash table pointed by @HashResultPointer 2796 * with the values from the working hash table pointeed by @HashWorkingPointer. 2797 * The results are putting back to the initial hash table, returned through 2798 * the @HashResultPointer as the result hash table. 2799 **/ 2800 static void 2801 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2802 { 2803 int t; 2804 uint32_t TEMP; 2805 uint32_t A, B, C, D, E; 2806 t = 16; 2807 do { 2808 HashWorkingPointer[t] = 2809 S(1, 2810 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2811 8] ^ 2812 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2813 } while (++t <= 79); 2814 t = 0; 2815 A = HashResultPointer[0]; 2816 B = HashResultPointer[1]; 2817 C = HashResultPointer[2]; 2818 D = HashResultPointer[3]; 2819 E = HashResultPointer[4]; 2820 2821 do { 2822 if (t < 20) { 2823 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2824 } else if (t < 40) { 2825 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2826 } else if (t < 60) { 2827 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2828 } else { 2829 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2830 } 2831 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2832 E = D; 2833 D = C; 2834 C = S(30, B); 2835 B = A; 2836 A = TEMP; 2837 } while (++t <= 79); 2838 2839 HashResultPointer[0] += A; 2840 HashResultPointer[1] += B; 2841 HashResultPointer[2] += C; 2842 HashResultPointer[3] += D; 2843 HashResultPointer[4] += E; 2844 2845 } 2846 2847 /** 2848 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2849 * @RandomChallenge: pointer to the entry of host challenge random number array. 2850 * @HashWorking: pointer to the entry of the working hash array. 2851 * 2852 * This routine calculates the working hash array referred by @HashWorking 2853 * from the challenge random numbers associated with the host, referred by 2854 * @RandomChallenge. The result is put into the entry of the working hash 2855 * array and returned by reference through @HashWorking. 2856 **/ 2857 static void 2858 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2859 { 2860 *HashWorking = (*RandomChallenge ^ *HashWorking); 2861 } 2862 2863 /** 2864 * lpfc_hba_init - Perform special handling for LC HBA initialization 2865 * @phba: pointer to lpfc hba data structure. 2866 * @hbainit: pointer to an array of unsigned 32-bit integers. 2867 * 2868 * This routine performs the special handling for LC HBA initialization. 2869 **/ 2870 void 2871 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2872 { 2873 int t; 2874 uint32_t *HashWorking; 2875 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2876 2877 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2878 if (!HashWorking) 2879 return; 2880 2881 HashWorking[0] = HashWorking[78] = *pwwnn++; 2882 HashWorking[1] = HashWorking[79] = *pwwnn; 2883 2884 for (t = 0; t < 7; t++) 2885 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2886 2887 lpfc_sha_init(hbainit); 2888 lpfc_sha_iterate(hbainit, HashWorking); 2889 kfree(HashWorking); 2890 } 2891 2892 /** 2893 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2894 * @vport: pointer to a virtual N_Port data structure. 2895 * 2896 * This routine performs the necessary cleanups before deleting the @vport. 2897 * It invokes the discovery state machine to perform necessary state 2898 * transitions and to release the ndlps associated with the @vport. Note, 2899 * the physical port is treated as @vport 0. 2900 **/ 2901 void 2902 lpfc_cleanup(struct lpfc_vport *vport) 2903 { 2904 struct lpfc_hba *phba = vport->phba; 2905 struct lpfc_nodelist *ndlp, *next_ndlp; 2906 int i = 0; 2907 2908 if (phba->link_state > LPFC_LINK_DOWN) 2909 lpfc_port_link_failure(vport); 2910 2911 /* Clean up VMID resources */ 2912 if (lpfc_is_vmid_enabled(phba)) 2913 lpfc_vmid_vport_cleanup(vport); 2914 2915 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2916 if (vport->port_type != LPFC_PHYSICAL_PORT && 2917 ndlp->nlp_DID == Fabric_DID) { 2918 /* Just free up ndlp with Fabric_DID for vports */ 2919 lpfc_nlp_put(ndlp); 2920 continue; 2921 } 2922 2923 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2924 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2925 lpfc_nlp_put(ndlp); 2926 continue; 2927 } 2928 2929 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2930 * DEVICE_RM event. 2931 */ 2932 if (ndlp->nlp_type & NLP_FABRIC && 2933 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2934 lpfc_disc_state_machine(vport, ndlp, NULL, 2935 NLP_EVT_DEVICE_RECOVERY); 2936 2937 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2938 lpfc_disc_state_machine(vport, ndlp, NULL, 2939 NLP_EVT_DEVICE_RM); 2940 } 2941 2942 /* At this point, ALL ndlp's should be gone 2943 * because of the previous NLP_EVT_DEVICE_RM. 2944 * Lets wait for this to happen, if needed. 2945 */ 2946 while (!list_empty(&vport->fc_nodes)) { 2947 if (i++ > 3000) { 2948 lpfc_printf_vlog(vport, KERN_ERR, 2949 LOG_TRACE_EVENT, 2950 "0233 Nodelist not empty\n"); 2951 list_for_each_entry_safe(ndlp, next_ndlp, 2952 &vport->fc_nodes, nlp_listp) { 2953 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2954 LOG_TRACE_EVENT, 2955 "0282 did:x%x ndlp:x%px " 2956 "refcnt:%d xflags x%x nflag x%x\n", 2957 ndlp->nlp_DID, (void *)ndlp, 2958 kref_read(&ndlp->kref), 2959 ndlp->fc4_xpt_flags, 2960 ndlp->nlp_flag); 2961 } 2962 break; 2963 } 2964 2965 /* Wait for any activity on ndlps to settle */ 2966 msleep(10); 2967 } 2968 lpfc_cleanup_vports_rrqs(vport, NULL); 2969 } 2970 2971 /** 2972 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2973 * @vport: pointer to a virtual N_Port data structure. 2974 * 2975 * This routine stops all the timers associated with a @vport. This function 2976 * is invoked before disabling or deleting a @vport. Note that the physical 2977 * port is treated as @vport 0. 2978 **/ 2979 void 2980 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2981 { 2982 del_timer_sync(&vport->els_tmofunc); 2983 del_timer_sync(&vport->delayed_disc_tmo); 2984 lpfc_can_disctmo(vport); 2985 return; 2986 } 2987 2988 /** 2989 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2990 * @phba: pointer to lpfc hba data structure. 2991 * 2992 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2993 * caller of this routine should already hold the host lock. 2994 **/ 2995 void 2996 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2997 { 2998 /* Clear pending FCF rediscovery wait flag */ 2999 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3000 3001 /* Now, try to stop the timer */ 3002 del_timer(&phba->fcf.redisc_wait); 3003 } 3004 3005 /** 3006 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3007 * @phba: pointer to lpfc hba data structure. 3008 * 3009 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3010 * checks whether the FCF rediscovery wait timer is pending with the host 3011 * lock held before proceeding with disabling the timer and clearing the 3012 * wait timer pendig flag. 3013 **/ 3014 void 3015 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3016 { 3017 spin_lock_irq(&phba->hbalock); 3018 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3019 /* FCF rediscovery timer already fired or stopped */ 3020 spin_unlock_irq(&phba->hbalock); 3021 return; 3022 } 3023 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3024 /* Clear failover in progress flags */ 3025 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3026 spin_unlock_irq(&phba->hbalock); 3027 } 3028 3029 /** 3030 * lpfc_cmf_stop - Stop CMF processing 3031 * @phba: pointer to lpfc hba data structure. 3032 * 3033 * This is called when the link goes down or if CMF mode is turned OFF. 3034 * It is also called when going offline or unloaded just before the 3035 * congestion info buffer is unregistered. 3036 **/ 3037 void 3038 lpfc_cmf_stop(struct lpfc_hba *phba) 3039 { 3040 int cpu; 3041 struct lpfc_cgn_stat *cgs; 3042 3043 /* We only do something if CMF is enabled */ 3044 if (!phba->sli4_hba.pc_sli4_params.cmf) 3045 return; 3046 3047 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3048 "6221 Stop CMF / Cancel Timer\n"); 3049 3050 /* Cancel the CMF timer */ 3051 hrtimer_cancel(&phba->cmf_timer); 3052 3053 /* Zero CMF counters */ 3054 atomic_set(&phba->cmf_busy, 0); 3055 for_each_present_cpu(cpu) { 3056 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3057 atomic64_set(&cgs->total_bytes, 0); 3058 atomic64_set(&cgs->rcv_bytes, 0); 3059 atomic_set(&cgs->rx_io_cnt, 0); 3060 atomic64_set(&cgs->rx_latency, 0); 3061 } 3062 atomic_set(&phba->cmf_bw_wait, 0); 3063 3064 /* Resume any blocked IO - Queue unblock on workqueue */ 3065 queue_work(phba->wq, &phba->unblock_request_work); 3066 } 3067 3068 static inline uint64_t 3069 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3070 { 3071 uint64_t rate = lpfc_sli_port_speed_get(phba); 3072 3073 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3074 } 3075 3076 void 3077 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3078 { 3079 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3080 "6223 Signal CMF init\n"); 3081 3082 /* Use the new fc_linkspeed to recalculate */ 3083 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3084 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3085 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3086 phba->cmf_interval_rate, 1000); 3087 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3088 3089 /* This is a signal to firmware to sync up CMF BW with link speed */ 3090 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3091 } 3092 3093 /** 3094 * lpfc_cmf_start - Start CMF processing 3095 * @phba: pointer to lpfc hba data structure. 3096 * 3097 * This is called when the link comes up or if CMF mode is turned OFF 3098 * to Monitor or Managed. 3099 **/ 3100 void 3101 lpfc_cmf_start(struct lpfc_hba *phba) 3102 { 3103 struct lpfc_cgn_stat *cgs; 3104 int cpu; 3105 3106 /* We only do something if CMF is enabled */ 3107 if (!phba->sli4_hba.pc_sli4_params.cmf || 3108 phba->cmf_active_mode == LPFC_CFG_OFF) 3109 return; 3110 3111 /* Reinitialize congestion buffer info */ 3112 lpfc_init_congestion_buf(phba); 3113 3114 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3115 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3116 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3117 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3118 3119 atomic_set(&phba->cmf_busy, 0); 3120 for_each_present_cpu(cpu) { 3121 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3122 atomic64_set(&cgs->total_bytes, 0); 3123 atomic64_set(&cgs->rcv_bytes, 0); 3124 atomic_set(&cgs->rx_io_cnt, 0); 3125 atomic64_set(&cgs->rx_latency, 0); 3126 } 3127 phba->cmf_latency.tv_sec = 0; 3128 phba->cmf_latency.tv_nsec = 0; 3129 3130 lpfc_cmf_signal_init(phba); 3131 3132 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3133 "6222 Start CMF / Timer\n"); 3134 3135 phba->cmf_timer_cnt = 0; 3136 hrtimer_start(&phba->cmf_timer, 3137 ktime_set(0, LPFC_CMF_INTERVAL * 1000000), 3138 HRTIMER_MODE_REL); 3139 /* Setup for latency check in IO cmpl routines */ 3140 ktime_get_real_ts64(&phba->cmf_latency); 3141 3142 atomic_set(&phba->cmf_bw_wait, 0); 3143 atomic_set(&phba->cmf_stop_io, 0); 3144 } 3145 3146 /** 3147 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3148 * @phba: pointer to lpfc hba data structure. 3149 * 3150 * This routine stops all the timers associated with a HBA. This function is 3151 * invoked before either putting a HBA offline or unloading the driver. 3152 **/ 3153 void 3154 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3155 { 3156 if (phba->pport) 3157 lpfc_stop_vport_timers(phba->pport); 3158 cancel_delayed_work_sync(&phba->eq_delay_work); 3159 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3160 del_timer_sync(&phba->sli.mbox_tmo); 3161 del_timer_sync(&phba->fabric_block_timer); 3162 del_timer_sync(&phba->eratt_poll); 3163 del_timer_sync(&phba->hb_tmofunc); 3164 if (phba->sli_rev == LPFC_SLI_REV4) { 3165 del_timer_sync(&phba->rrq_tmr); 3166 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3167 } 3168 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3169 3170 switch (phba->pci_dev_grp) { 3171 case LPFC_PCI_DEV_LP: 3172 /* Stop any LightPulse device specific driver timers */ 3173 del_timer_sync(&phba->fcp_poll_timer); 3174 break; 3175 case LPFC_PCI_DEV_OC: 3176 /* Stop any OneConnect device specific driver timers */ 3177 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3178 break; 3179 default: 3180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3181 "0297 Invalid device group (x%x)\n", 3182 phba->pci_dev_grp); 3183 break; 3184 } 3185 return; 3186 } 3187 3188 /** 3189 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3190 * @phba: pointer to lpfc hba data structure. 3191 * @mbx_action: flag for mailbox no wait action. 3192 * 3193 * This routine marks a HBA's management interface as blocked. Once the HBA's 3194 * management interface is marked as blocked, all the user space access to 3195 * the HBA, whether they are from sysfs interface or libdfc interface will 3196 * all be blocked. The HBA is set to block the management interface when the 3197 * driver prepares the HBA interface for online or offline. 3198 **/ 3199 static void 3200 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3201 { 3202 unsigned long iflag; 3203 uint8_t actcmd = MBX_HEARTBEAT; 3204 unsigned long timeout; 3205 3206 spin_lock_irqsave(&phba->hbalock, iflag); 3207 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3208 spin_unlock_irqrestore(&phba->hbalock, iflag); 3209 if (mbx_action == LPFC_MBX_NO_WAIT) 3210 return; 3211 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3212 spin_lock_irqsave(&phba->hbalock, iflag); 3213 if (phba->sli.mbox_active) { 3214 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3215 /* Determine how long we might wait for the active mailbox 3216 * command to be gracefully completed by firmware. 3217 */ 3218 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3219 phba->sli.mbox_active) * 1000) + jiffies; 3220 } 3221 spin_unlock_irqrestore(&phba->hbalock, iflag); 3222 3223 /* Wait for the outstnading mailbox command to complete */ 3224 while (phba->sli.mbox_active) { 3225 /* Check active mailbox complete status every 2ms */ 3226 msleep(2); 3227 if (time_after(jiffies, timeout)) { 3228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3229 "2813 Mgmt IO is Blocked %x " 3230 "- mbox cmd %x still active\n", 3231 phba->sli.sli_flag, actcmd); 3232 break; 3233 } 3234 } 3235 } 3236 3237 /** 3238 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3239 * @phba: pointer to lpfc hba data structure. 3240 * 3241 * Allocate RPIs for all active remote nodes. This is needed whenever 3242 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3243 * is to fixup the temporary rpi assignments. 3244 **/ 3245 void 3246 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3247 { 3248 struct lpfc_nodelist *ndlp, *next_ndlp; 3249 struct lpfc_vport **vports; 3250 int i, rpi; 3251 3252 if (phba->sli_rev != LPFC_SLI_REV4) 3253 return; 3254 3255 vports = lpfc_create_vport_work_array(phba); 3256 if (vports == NULL) 3257 return; 3258 3259 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3260 if (vports[i]->load_flag & FC_UNLOADING) 3261 continue; 3262 3263 list_for_each_entry_safe(ndlp, next_ndlp, 3264 &vports[i]->fc_nodes, 3265 nlp_listp) { 3266 rpi = lpfc_sli4_alloc_rpi(phba); 3267 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3268 /* TODO print log? */ 3269 continue; 3270 } 3271 ndlp->nlp_rpi = rpi; 3272 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3273 LOG_NODE | LOG_DISCOVERY, 3274 "0009 Assign RPI x%x to ndlp x%px " 3275 "DID:x%06x flg:x%x\n", 3276 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3277 ndlp->nlp_flag); 3278 } 3279 } 3280 lpfc_destroy_vport_work_array(phba, vports); 3281 } 3282 3283 /** 3284 * lpfc_create_expedite_pool - create expedite pool 3285 * @phba: pointer to lpfc hba data structure. 3286 * 3287 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3288 * to expedite pool. Mark them as expedite. 3289 **/ 3290 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3291 { 3292 struct lpfc_sli4_hdw_queue *qp; 3293 struct lpfc_io_buf *lpfc_ncmd; 3294 struct lpfc_io_buf *lpfc_ncmd_next; 3295 struct lpfc_epd_pool *epd_pool; 3296 unsigned long iflag; 3297 3298 epd_pool = &phba->epd_pool; 3299 qp = &phba->sli4_hba.hdwq[0]; 3300 3301 spin_lock_init(&epd_pool->lock); 3302 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3303 spin_lock(&epd_pool->lock); 3304 INIT_LIST_HEAD(&epd_pool->list); 3305 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3306 &qp->lpfc_io_buf_list_put, list) { 3307 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3308 lpfc_ncmd->expedite = true; 3309 qp->put_io_bufs--; 3310 epd_pool->count++; 3311 if (epd_pool->count >= XRI_BATCH) 3312 break; 3313 } 3314 spin_unlock(&epd_pool->lock); 3315 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3316 } 3317 3318 /** 3319 * lpfc_destroy_expedite_pool - destroy expedite pool 3320 * @phba: pointer to lpfc hba data structure. 3321 * 3322 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3323 * of HWQ 0. Clear the mark. 3324 **/ 3325 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3326 { 3327 struct lpfc_sli4_hdw_queue *qp; 3328 struct lpfc_io_buf *lpfc_ncmd; 3329 struct lpfc_io_buf *lpfc_ncmd_next; 3330 struct lpfc_epd_pool *epd_pool; 3331 unsigned long iflag; 3332 3333 epd_pool = &phba->epd_pool; 3334 qp = &phba->sli4_hba.hdwq[0]; 3335 3336 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3337 spin_lock(&epd_pool->lock); 3338 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3339 &epd_pool->list, list) { 3340 list_move_tail(&lpfc_ncmd->list, 3341 &qp->lpfc_io_buf_list_put); 3342 lpfc_ncmd->flags = false; 3343 qp->put_io_bufs++; 3344 epd_pool->count--; 3345 } 3346 spin_unlock(&epd_pool->lock); 3347 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3348 } 3349 3350 /** 3351 * lpfc_create_multixri_pools - create multi-XRI pools 3352 * @phba: pointer to lpfc hba data structure. 3353 * 3354 * This routine initialize public, private per HWQ. Then, move XRIs from 3355 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3356 * Initialized. 3357 **/ 3358 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3359 { 3360 u32 i, j; 3361 u32 hwq_count; 3362 u32 count_per_hwq; 3363 struct lpfc_io_buf *lpfc_ncmd; 3364 struct lpfc_io_buf *lpfc_ncmd_next; 3365 unsigned long iflag; 3366 struct lpfc_sli4_hdw_queue *qp; 3367 struct lpfc_multixri_pool *multixri_pool; 3368 struct lpfc_pbl_pool *pbl_pool; 3369 struct lpfc_pvt_pool *pvt_pool; 3370 3371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3372 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3373 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3374 phba->sli4_hba.io_xri_cnt); 3375 3376 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3377 lpfc_create_expedite_pool(phba); 3378 3379 hwq_count = phba->cfg_hdw_queue; 3380 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3381 3382 for (i = 0; i < hwq_count; i++) { 3383 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3384 3385 if (!multixri_pool) { 3386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3387 "1238 Failed to allocate memory for " 3388 "multixri_pool\n"); 3389 3390 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3391 lpfc_destroy_expedite_pool(phba); 3392 3393 j = 0; 3394 while (j < i) { 3395 qp = &phba->sli4_hba.hdwq[j]; 3396 kfree(qp->p_multixri_pool); 3397 j++; 3398 } 3399 phba->cfg_xri_rebalancing = 0; 3400 return; 3401 } 3402 3403 qp = &phba->sli4_hba.hdwq[i]; 3404 qp->p_multixri_pool = multixri_pool; 3405 3406 multixri_pool->xri_limit = count_per_hwq; 3407 multixri_pool->rrb_next_hwqid = i; 3408 3409 /* Deal with public free xri pool */ 3410 pbl_pool = &multixri_pool->pbl_pool; 3411 spin_lock_init(&pbl_pool->lock); 3412 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3413 spin_lock(&pbl_pool->lock); 3414 INIT_LIST_HEAD(&pbl_pool->list); 3415 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3416 &qp->lpfc_io_buf_list_put, list) { 3417 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3418 qp->put_io_bufs--; 3419 pbl_pool->count++; 3420 } 3421 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3422 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3423 pbl_pool->count, i); 3424 spin_unlock(&pbl_pool->lock); 3425 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3426 3427 /* Deal with private free xri pool */ 3428 pvt_pool = &multixri_pool->pvt_pool; 3429 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3430 pvt_pool->low_watermark = XRI_BATCH; 3431 spin_lock_init(&pvt_pool->lock); 3432 spin_lock_irqsave(&pvt_pool->lock, iflag); 3433 INIT_LIST_HEAD(&pvt_pool->list); 3434 pvt_pool->count = 0; 3435 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3436 } 3437 } 3438 3439 /** 3440 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3441 * @phba: pointer to lpfc hba data structure. 3442 * 3443 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3444 **/ 3445 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3446 { 3447 u32 i; 3448 u32 hwq_count; 3449 struct lpfc_io_buf *lpfc_ncmd; 3450 struct lpfc_io_buf *lpfc_ncmd_next; 3451 unsigned long iflag; 3452 struct lpfc_sli4_hdw_queue *qp; 3453 struct lpfc_multixri_pool *multixri_pool; 3454 struct lpfc_pbl_pool *pbl_pool; 3455 struct lpfc_pvt_pool *pvt_pool; 3456 3457 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3458 lpfc_destroy_expedite_pool(phba); 3459 3460 if (!(phba->pport->load_flag & FC_UNLOADING)) 3461 lpfc_sli_flush_io_rings(phba); 3462 3463 hwq_count = phba->cfg_hdw_queue; 3464 3465 for (i = 0; i < hwq_count; i++) { 3466 qp = &phba->sli4_hba.hdwq[i]; 3467 multixri_pool = qp->p_multixri_pool; 3468 if (!multixri_pool) 3469 continue; 3470 3471 qp->p_multixri_pool = NULL; 3472 3473 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3474 3475 /* Deal with public free xri pool */ 3476 pbl_pool = &multixri_pool->pbl_pool; 3477 spin_lock(&pbl_pool->lock); 3478 3479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3480 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3481 pbl_pool->count, i); 3482 3483 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3484 &pbl_pool->list, list) { 3485 list_move_tail(&lpfc_ncmd->list, 3486 &qp->lpfc_io_buf_list_put); 3487 qp->put_io_bufs++; 3488 pbl_pool->count--; 3489 } 3490 3491 INIT_LIST_HEAD(&pbl_pool->list); 3492 pbl_pool->count = 0; 3493 3494 spin_unlock(&pbl_pool->lock); 3495 3496 /* Deal with private free xri pool */ 3497 pvt_pool = &multixri_pool->pvt_pool; 3498 spin_lock(&pvt_pool->lock); 3499 3500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3501 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3502 pvt_pool->count, i); 3503 3504 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3505 &pvt_pool->list, list) { 3506 list_move_tail(&lpfc_ncmd->list, 3507 &qp->lpfc_io_buf_list_put); 3508 qp->put_io_bufs++; 3509 pvt_pool->count--; 3510 } 3511 3512 INIT_LIST_HEAD(&pvt_pool->list); 3513 pvt_pool->count = 0; 3514 3515 spin_unlock(&pvt_pool->lock); 3516 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3517 3518 kfree(multixri_pool); 3519 } 3520 } 3521 3522 /** 3523 * lpfc_online - Initialize and bring a HBA online 3524 * @phba: pointer to lpfc hba data structure. 3525 * 3526 * This routine initializes the HBA and brings a HBA online. During this 3527 * process, the management interface is blocked to prevent user space access 3528 * to the HBA interfering with the driver initialization. 3529 * 3530 * Return codes 3531 * 0 - successful 3532 * 1 - failed 3533 **/ 3534 int 3535 lpfc_online(struct lpfc_hba *phba) 3536 { 3537 struct lpfc_vport *vport; 3538 struct lpfc_vport **vports; 3539 int i, error = 0; 3540 bool vpis_cleared = false; 3541 3542 if (!phba) 3543 return 0; 3544 vport = phba->pport; 3545 3546 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3547 return 0; 3548 3549 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3550 "0458 Bring Adapter online\n"); 3551 3552 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3553 3554 if (phba->sli_rev == LPFC_SLI_REV4) { 3555 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3556 lpfc_unblock_mgmt_io(phba); 3557 return 1; 3558 } 3559 spin_lock_irq(&phba->hbalock); 3560 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3561 vpis_cleared = true; 3562 spin_unlock_irq(&phba->hbalock); 3563 3564 /* Reestablish the local initiator port. 3565 * The offline process destroyed the previous lport. 3566 */ 3567 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3568 !phba->nvmet_support) { 3569 error = lpfc_nvme_create_localport(phba->pport); 3570 if (error) 3571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3572 "6132 NVME restore reg failed " 3573 "on nvmei error x%x\n", error); 3574 } 3575 } else { 3576 lpfc_sli_queue_init(phba); 3577 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3578 lpfc_unblock_mgmt_io(phba); 3579 return 1; 3580 } 3581 } 3582 3583 vports = lpfc_create_vport_work_array(phba); 3584 if (vports != NULL) { 3585 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3586 struct Scsi_Host *shost; 3587 shost = lpfc_shost_from_vport(vports[i]); 3588 spin_lock_irq(shost->host_lock); 3589 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3590 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3591 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3592 if (phba->sli_rev == LPFC_SLI_REV4) { 3593 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3594 if ((vpis_cleared) && 3595 (vports[i]->port_type != 3596 LPFC_PHYSICAL_PORT)) 3597 vports[i]->vpi = 0; 3598 } 3599 spin_unlock_irq(shost->host_lock); 3600 } 3601 } 3602 lpfc_destroy_vport_work_array(phba, vports); 3603 3604 if (phba->cfg_xri_rebalancing) 3605 lpfc_create_multixri_pools(phba); 3606 3607 lpfc_cpuhp_add(phba); 3608 3609 lpfc_unblock_mgmt_io(phba); 3610 return 0; 3611 } 3612 3613 /** 3614 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3615 * @phba: pointer to lpfc hba data structure. 3616 * 3617 * This routine marks a HBA's management interface as not blocked. Once the 3618 * HBA's management interface is marked as not blocked, all the user space 3619 * access to the HBA, whether they are from sysfs interface or libdfc 3620 * interface will be allowed. The HBA is set to block the management interface 3621 * when the driver prepares the HBA interface for online or offline and then 3622 * set to unblock the management interface afterwards. 3623 **/ 3624 void 3625 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3626 { 3627 unsigned long iflag; 3628 3629 spin_lock_irqsave(&phba->hbalock, iflag); 3630 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3631 spin_unlock_irqrestore(&phba->hbalock, iflag); 3632 } 3633 3634 /** 3635 * lpfc_offline_prep - Prepare a HBA to be brought offline 3636 * @phba: pointer to lpfc hba data structure. 3637 * @mbx_action: flag for mailbox shutdown action. 3638 * 3639 * This routine is invoked to prepare a HBA to be brought offline. It performs 3640 * unregistration login to all the nodes on all vports and flushes the mailbox 3641 * queue to make it ready to be brought offline. 3642 **/ 3643 void 3644 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3645 { 3646 struct lpfc_vport *vport = phba->pport; 3647 struct lpfc_nodelist *ndlp, *next_ndlp; 3648 struct lpfc_vport **vports; 3649 struct Scsi_Host *shost; 3650 int i; 3651 int offline = 0; 3652 3653 if (vport->fc_flag & FC_OFFLINE_MODE) 3654 return; 3655 3656 lpfc_block_mgmt_io(phba, mbx_action); 3657 3658 lpfc_linkdown(phba); 3659 3660 offline = pci_channel_offline(phba->pcidev); 3661 3662 /* Issue an unreg_login to all nodes on all vports */ 3663 vports = lpfc_create_vport_work_array(phba); 3664 if (vports != NULL) { 3665 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3666 if (vports[i]->load_flag & FC_UNLOADING) 3667 continue; 3668 shost = lpfc_shost_from_vport(vports[i]); 3669 spin_lock_irq(shost->host_lock); 3670 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3671 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3672 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3673 spin_unlock_irq(shost->host_lock); 3674 3675 shost = lpfc_shost_from_vport(vports[i]); 3676 list_for_each_entry_safe(ndlp, next_ndlp, 3677 &vports[i]->fc_nodes, 3678 nlp_listp) { 3679 3680 spin_lock_irq(&ndlp->lock); 3681 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3682 spin_unlock_irq(&ndlp->lock); 3683 3684 if (offline) { 3685 spin_lock_irq(&ndlp->lock); 3686 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3687 NLP_RPI_REGISTERED); 3688 spin_unlock_irq(&ndlp->lock); 3689 } else { 3690 lpfc_unreg_rpi(vports[i], ndlp); 3691 } 3692 /* 3693 * Whenever an SLI4 port goes offline, free the 3694 * RPI. Get a new RPI when the adapter port 3695 * comes back online. 3696 */ 3697 if (phba->sli_rev == LPFC_SLI_REV4) { 3698 lpfc_printf_vlog(vports[i], KERN_INFO, 3699 LOG_NODE | LOG_DISCOVERY, 3700 "0011 Free RPI x%x on " 3701 "ndlp: x%px did x%x\n", 3702 ndlp->nlp_rpi, ndlp, 3703 ndlp->nlp_DID); 3704 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3705 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3706 } 3707 3708 if (ndlp->nlp_type & NLP_FABRIC) { 3709 lpfc_disc_state_machine(vports[i], ndlp, 3710 NULL, NLP_EVT_DEVICE_RECOVERY); 3711 3712 /* Don't remove the node unless the 3713 * has been unregistered with the 3714 * transport. If so, let dev_loss 3715 * take care of the node. 3716 */ 3717 if (!(ndlp->fc4_xpt_flags & 3718 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3719 lpfc_disc_state_machine 3720 (vports[i], ndlp, 3721 NULL, 3722 NLP_EVT_DEVICE_RM); 3723 } 3724 } 3725 } 3726 } 3727 lpfc_destroy_vport_work_array(phba, vports); 3728 3729 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3730 3731 if (phba->wq) 3732 flush_workqueue(phba->wq); 3733 } 3734 3735 /** 3736 * lpfc_offline - Bring a HBA offline 3737 * @phba: pointer to lpfc hba data structure. 3738 * 3739 * This routine actually brings a HBA offline. It stops all the timers 3740 * associated with the HBA, brings down the SLI layer, and eventually 3741 * marks the HBA as in offline state for the upper layer protocol. 3742 **/ 3743 void 3744 lpfc_offline(struct lpfc_hba *phba) 3745 { 3746 struct Scsi_Host *shost; 3747 struct lpfc_vport **vports; 3748 int i; 3749 3750 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3751 return; 3752 3753 /* stop port and all timers associated with this hba */ 3754 lpfc_stop_port(phba); 3755 3756 /* Tear down the local and target port registrations. The 3757 * nvme transports need to cleanup. 3758 */ 3759 lpfc_nvmet_destroy_targetport(phba); 3760 lpfc_nvme_destroy_localport(phba->pport); 3761 3762 vports = lpfc_create_vport_work_array(phba); 3763 if (vports != NULL) 3764 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3765 lpfc_stop_vport_timers(vports[i]); 3766 lpfc_destroy_vport_work_array(phba, vports); 3767 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3768 "0460 Bring Adapter offline\n"); 3769 /* Bring down the SLI Layer and cleanup. The HBA is offline 3770 now. */ 3771 lpfc_sli_hba_down(phba); 3772 spin_lock_irq(&phba->hbalock); 3773 phba->work_ha = 0; 3774 spin_unlock_irq(&phba->hbalock); 3775 vports = lpfc_create_vport_work_array(phba); 3776 if (vports != NULL) 3777 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3778 shost = lpfc_shost_from_vport(vports[i]); 3779 spin_lock_irq(shost->host_lock); 3780 vports[i]->work_port_events = 0; 3781 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3782 spin_unlock_irq(shost->host_lock); 3783 } 3784 lpfc_destroy_vport_work_array(phba, vports); 3785 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3786 * in hba_unset 3787 */ 3788 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3789 __lpfc_cpuhp_remove(phba); 3790 3791 if (phba->cfg_xri_rebalancing) 3792 lpfc_destroy_multixri_pools(phba); 3793 } 3794 3795 /** 3796 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3797 * @phba: pointer to lpfc hba data structure. 3798 * 3799 * This routine is to free all the SCSI buffers and IOCBs from the driver 3800 * list back to kernel. It is called from lpfc_pci_remove_one to free 3801 * the internal resources before the device is removed from the system. 3802 **/ 3803 static void 3804 lpfc_scsi_free(struct lpfc_hba *phba) 3805 { 3806 struct lpfc_io_buf *sb, *sb_next; 3807 3808 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3809 return; 3810 3811 spin_lock_irq(&phba->hbalock); 3812 3813 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3814 3815 spin_lock(&phba->scsi_buf_list_put_lock); 3816 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3817 list) { 3818 list_del(&sb->list); 3819 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3820 sb->dma_handle); 3821 kfree(sb); 3822 phba->total_scsi_bufs--; 3823 } 3824 spin_unlock(&phba->scsi_buf_list_put_lock); 3825 3826 spin_lock(&phba->scsi_buf_list_get_lock); 3827 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3828 list) { 3829 list_del(&sb->list); 3830 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3831 sb->dma_handle); 3832 kfree(sb); 3833 phba->total_scsi_bufs--; 3834 } 3835 spin_unlock(&phba->scsi_buf_list_get_lock); 3836 spin_unlock_irq(&phba->hbalock); 3837 } 3838 3839 /** 3840 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3841 * @phba: pointer to lpfc hba data structure. 3842 * 3843 * This routine is to free all the IO buffers and IOCBs from the driver 3844 * list back to kernel. It is called from lpfc_pci_remove_one to free 3845 * the internal resources before the device is removed from the system. 3846 **/ 3847 void 3848 lpfc_io_free(struct lpfc_hba *phba) 3849 { 3850 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3851 struct lpfc_sli4_hdw_queue *qp; 3852 int idx; 3853 3854 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3855 qp = &phba->sli4_hba.hdwq[idx]; 3856 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3857 spin_lock(&qp->io_buf_list_put_lock); 3858 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3859 &qp->lpfc_io_buf_list_put, 3860 list) { 3861 list_del(&lpfc_ncmd->list); 3862 qp->put_io_bufs--; 3863 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3864 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3865 if (phba->cfg_xpsgl && !phba->nvmet_support) 3866 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3867 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3868 kfree(lpfc_ncmd); 3869 qp->total_io_bufs--; 3870 } 3871 spin_unlock(&qp->io_buf_list_put_lock); 3872 3873 spin_lock(&qp->io_buf_list_get_lock); 3874 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3875 &qp->lpfc_io_buf_list_get, 3876 list) { 3877 list_del(&lpfc_ncmd->list); 3878 qp->get_io_bufs--; 3879 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3880 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3881 if (phba->cfg_xpsgl && !phba->nvmet_support) 3882 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3883 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3884 kfree(lpfc_ncmd); 3885 qp->total_io_bufs--; 3886 } 3887 spin_unlock(&qp->io_buf_list_get_lock); 3888 } 3889 } 3890 3891 /** 3892 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3893 * @phba: pointer to lpfc hba data structure. 3894 * 3895 * This routine first calculates the sizes of the current els and allocated 3896 * scsi sgl lists, and then goes through all sgls to updates the physical 3897 * XRIs assigned due to port function reset. During port initialization, the 3898 * current els and allocated scsi sgl lists are 0s. 3899 * 3900 * Return codes 3901 * 0 - successful (for now, it always returns 0) 3902 **/ 3903 int 3904 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3905 { 3906 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3907 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3908 LIST_HEAD(els_sgl_list); 3909 int rc; 3910 3911 /* 3912 * update on pci function's els xri-sgl list 3913 */ 3914 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3915 3916 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3917 /* els xri-sgl expanded */ 3918 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3919 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3920 "3157 ELS xri-sgl count increased from " 3921 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3922 els_xri_cnt); 3923 /* allocate the additional els sgls */ 3924 for (i = 0; i < xri_cnt; i++) { 3925 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3926 GFP_KERNEL); 3927 if (sglq_entry == NULL) { 3928 lpfc_printf_log(phba, KERN_ERR, 3929 LOG_TRACE_EVENT, 3930 "2562 Failure to allocate an " 3931 "ELS sgl entry:%d\n", i); 3932 rc = -ENOMEM; 3933 goto out_free_mem; 3934 } 3935 sglq_entry->buff_type = GEN_BUFF_TYPE; 3936 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3937 &sglq_entry->phys); 3938 if (sglq_entry->virt == NULL) { 3939 kfree(sglq_entry); 3940 lpfc_printf_log(phba, KERN_ERR, 3941 LOG_TRACE_EVENT, 3942 "2563 Failure to allocate an " 3943 "ELS mbuf:%d\n", i); 3944 rc = -ENOMEM; 3945 goto out_free_mem; 3946 } 3947 sglq_entry->sgl = sglq_entry->virt; 3948 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3949 sglq_entry->state = SGL_FREED; 3950 list_add_tail(&sglq_entry->list, &els_sgl_list); 3951 } 3952 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 3953 list_splice_init(&els_sgl_list, 3954 &phba->sli4_hba.lpfc_els_sgl_list); 3955 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 3956 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3957 /* els xri-sgl shrinked */ 3958 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3959 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3960 "3158 ELS xri-sgl count decreased from " 3961 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3962 els_xri_cnt); 3963 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 3964 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3965 &els_sgl_list); 3966 /* release extra els sgls from list */ 3967 for (i = 0; i < xri_cnt; i++) { 3968 list_remove_head(&els_sgl_list, 3969 sglq_entry, struct lpfc_sglq, list); 3970 if (sglq_entry) { 3971 __lpfc_mbuf_free(phba, sglq_entry->virt, 3972 sglq_entry->phys); 3973 kfree(sglq_entry); 3974 } 3975 } 3976 list_splice_init(&els_sgl_list, 3977 &phba->sli4_hba.lpfc_els_sgl_list); 3978 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 3979 } else 3980 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3981 "3163 ELS xri-sgl count unchanged: %d\n", 3982 els_xri_cnt); 3983 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3984 3985 /* update xris to els sgls on the list */ 3986 sglq_entry = NULL; 3987 sglq_entry_next = NULL; 3988 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3989 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3990 lxri = lpfc_sli4_next_xritag(phba); 3991 if (lxri == NO_XRI) { 3992 lpfc_printf_log(phba, KERN_ERR, 3993 LOG_TRACE_EVENT, 3994 "2400 Failed to allocate xri for " 3995 "ELS sgl\n"); 3996 rc = -ENOMEM; 3997 goto out_free_mem; 3998 } 3999 sglq_entry->sli4_lxritag = lxri; 4000 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4001 } 4002 return 0; 4003 4004 out_free_mem: 4005 lpfc_free_els_sgl_list(phba); 4006 return rc; 4007 } 4008 4009 /** 4010 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4011 * @phba: pointer to lpfc hba data structure. 4012 * 4013 * This routine first calculates the sizes of the current els and allocated 4014 * scsi sgl lists, and then goes through all sgls to updates the physical 4015 * XRIs assigned due to port function reset. During port initialization, the 4016 * current els and allocated scsi sgl lists are 0s. 4017 * 4018 * Return codes 4019 * 0 - successful (for now, it always returns 0) 4020 **/ 4021 int 4022 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4023 { 4024 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4025 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4026 uint16_t nvmet_xri_cnt; 4027 LIST_HEAD(nvmet_sgl_list); 4028 int rc; 4029 4030 /* 4031 * update on pci function's nvmet xri-sgl list 4032 */ 4033 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4034 4035 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4036 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4037 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4038 /* els xri-sgl expanded */ 4039 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4040 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4041 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4042 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4043 /* allocate the additional nvmet sgls */ 4044 for (i = 0; i < xri_cnt; i++) { 4045 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4046 GFP_KERNEL); 4047 if (sglq_entry == NULL) { 4048 lpfc_printf_log(phba, KERN_ERR, 4049 LOG_TRACE_EVENT, 4050 "6303 Failure to allocate an " 4051 "NVMET sgl entry:%d\n", i); 4052 rc = -ENOMEM; 4053 goto out_free_mem; 4054 } 4055 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4056 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4057 &sglq_entry->phys); 4058 if (sglq_entry->virt == NULL) { 4059 kfree(sglq_entry); 4060 lpfc_printf_log(phba, KERN_ERR, 4061 LOG_TRACE_EVENT, 4062 "6304 Failure to allocate an " 4063 "NVMET buf:%d\n", i); 4064 rc = -ENOMEM; 4065 goto out_free_mem; 4066 } 4067 sglq_entry->sgl = sglq_entry->virt; 4068 memset(sglq_entry->sgl, 0, 4069 phba->cfg_sg_dma_buf_size); 4070 sglq_entry->state = SGL_FREED; 4071 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4072 } 4073 spin_lock_irq(&phba->hbalock); 4074 spin_lock(&phba->sli4_hba.sgl_list_lock); 4075 list_splice_init(&nvmet_sgl_list, 4076 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4077 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4078 spin_unlock_irq(&phba->hbalock); 4079 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4080 /* nvmet xri-sgl shrunk */ 4081 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4083 "6305 NVMET xri-sgl count decreased from " 4084 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4085 nvmet_xri_cnt); 4086 spin_lock_irq(&phba->hbalock); 4087 spin_lock(&phba->sli4_hba.sgl_list_lock); 4088 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4089 &nvmet_sgl_list); 4090 /* release extra nvmet sgls from list */ 4091 for (i = 0; i < xri_cnt; i++) { 4092 list_remove_head(&nvmet_sgl_list, 4093 sglq_entry, struct lpfc_sglq, list); 4094 if (sglq_entry) { 4095 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4096 sglq_entry->phys); 4097 kfree(sglq_entry); 4098 } 4099 } 4100 list_splice_init(&nvmet_sgl_list, 4101 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4102 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4103 spin_unlock_irq(&phba->hbalock); 4104 } else 4105 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4106 "6306 NVMET xri-sgl count unchanged: %d\n", 4107 nvmet_xri_cnt); 4108 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4109 4110 /* update xris to nvmet sgls on the list */ 4111 sglq_entry = NULL; 4112 sglq_entry_next = NULL; 4113 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4114 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4115 lxri = lpfc_sli4_next_xritag(phba); 4116 if (lxri == NO_XRI) { 4117 lpfc_printf_log(phba, KERN_ERR, 4118 LOG_TRACE_EVENT, 4119 "6307 Failed to allocate xri for " 4120 "NVMET sgl\n"); 4121 rc = -ENOMEM; 4122 goto out_free_mem; 4123 } 4124 sglq_entry->sli4_lxritag = lxri; 4125 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4126 } 4127 return 0; 4128 4129 out_free_mem: 4130 lpfc_free_nvmet_sgl_list(phba); 4131 return rc; 4132 } 4133 4134 int 4135 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4136 { 4137 LIST_HEAD(blist); 4138 struct lpfc_sli4_hdw_queue *qp; 4139 struct lpfc_io_buf *lpfc_cmd; 4140 struct lpfc_io_buf *iobufp, *prev_iobufp; 4141 int idx, cnt, xri, inserted; 4142 4143 cnt = 0; 4144 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4145 qp = &phba->sli4_hba.hdwq[idx]; 4146 spin_lock_irq(&qp->io_buf_list_get_lock); 4147 spin_lock(&qp->io_buf_list_put_lock); 4148 4149 /* Take everything off the get and put lists */ 4150 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4151 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4152 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4153 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4154 cnt += qp->get_io_bufs + qp->put_io_bufs; 4155 qp->get_io_bufs = 0; 4156 qp->put_io_bufs = 0; 4157 qp->total_io_bufs = 0; 4158 spin_unlock(&qp->io_buf_list_put_lock); 4159 spin_unlock_irq(&qp->io_buf_list_get_lock); 4160 } 4161 4162 /* 4163 * Take IO buffers off blist and put on cbuf sorted by XRI. 4164 * This is because POST_SGL takes a sequential range of XRIs 4165 * to post to the firmware. 4166 */ 4167 for (idx = 0; idx < cnt; idx++) { 4168 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4169 if (!lpfc_cmd) 4170 return cnt; 4171 if (idx == 0) { 4172 list_add_tail(&lpfc_cmd->list, cbuf); 4173 continue; 4174 } 4175 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4176 inserted = 0; 4177 prev_iobufp = NULL; 4178 list_for_each_entry(iobufp, cbuf, list) { 4179 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4180 if (prev_iobufp) 4181 list_add(&lpfc_cmd->list, 4182 &prev_iobufp->list); 4183 else 4184 list_add(&lpfc_cmd->list, cbuf); 4185 inserted = 1; 4186 break; 4187 } 4188 prev_iobufp = iobufp; 4189 } 4190 if (!inserted) 4191 list_add_tail(&lpfc_cmd->list, cbuf); 4192 } 4193 return cnt; 4194 } 4195 4196 int 4197 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4198 { 4199 struct lpfc_sli4_hdw_queue *qp; 4200 struct lpfc_io_buf *lpfc_cmd; 4201 int idx, cnt; 4202 4203 qp = phba->sli4_hba.hdwq; 4204 cnt = 0; 4205 while (!list_empty(cbuf)) { 4206 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4207 list_remove_head(cbuf, lpfc_cmd, 4208 struct lpfc_io_buf, list); 4209 if (!lpfc_cmd) 4210 return cnt; 4211 cnt++; 4212 qp = &phba->sli4_hba.hdwq[idx]; 4213 lpfc_cmd->hdwq_no = idx; 4214 lpfc_cmd->hdwq = qp; 4215 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 4216 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 4217 spin_lock(&qp->io_buf_list_put_lock); 4218 list_add_tail(&lpfc_cmd->list, 4219 &qp->lpfc_io_buf_list_put); 4220 qp->put_io_bufs++; 4221 qp->total_io_bufs++; 4222 spin_unlock(&qp->io_buf_list_put_lock); 4223 } 4224 } 4225 return cnt; 4226 } 4227 4228 /** 4229 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4230 * @phba: pointer to lpfc hba data structure. 4231 * 4232 * This routine first calculates the sizes of the current els and allocated 4233 * scsi sgl lists, and then goes through all sgls to updates the physical 4234 * XRIs assigned due to port function reset. During port initialization, the 4235 * current els and allocated scsi sgl lists are 0s. 4236 * 4237 * Return codes 4238 * 0 - successful (for now, it always returns 0) 4239 **/ 4240 int 4241 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4242 { 4243 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4244 uint16_t i, lxri, els_xri_cnt; 4245 uint16_t io_xri_cnt, io_xri_max; 4246 LIST_HEAD(io_sgl_list); 4247 int rc, cnt; 4248 4249 /* 4250 * update on pci function's allocated nvme xri-sgl list 4251 */ 4252 4253 /* maximum number of xris available for nvme buffers */ 4254 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4255 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4256 phba->sli4_hba.io_xri_max = io_xri_max; 4257 4258 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4259 "6074 Current allocated XRI sgl count:%d, " 4260 "maximum XRI count:%d\n", 4261 phba->sli4_hba.io_xri_cnt, 4262 phba->sli4_hba.io_xri_max); 4263 4264 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4265 4266 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4267 /* max nvme xri shrunk below the allocated nvme buffers */ 4268 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4269 phba->sli4_hba.io_xri_max; 4270 /* release the extra allocated nvme buffers */ 4271 for (i = 0; i < io_xri_cnt; i++) { 4272 list_remove_head(&io_sgl_list, lpfc_ncmd, 4273 struct lpfc_io_buf, list); 4274 if (lpfc_ncmd) { 4275 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4276 lpfc_ncmd->data, 4277 lpfc_ncmd->dma_handle); 4278 kfree(lpfc_ncmd); 4279 } 4280 } 4281 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4282 } 4283 4284 /* update xris associated to remaining allocated nvme buffers */ 4285 lpfc_ncmd = NULL; 4286 lpfc_ncmd_next = NULL; 4287 phba->sli4_hba.io_xri_cnt = cnt; 4288 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4289 &io_sgl_list, list) { 4290 lxri = lpfc_sli4_next_xritag(phba); 4291 if (lxri == NO_XRI) { 4292 lpfc_printf_log(phba, KERN_ERR, 4293 LOG_TRACE_EVENT, 4294 "6075 Failed to allocate xri for " 4295 "nvme buffer\n"); 4296 rc = -ENOMEM; 4297 goto out_free_mem; 4298 } 4299 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4300 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4301 } 4302 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4303 return 0; 4304 4305 out_free_mem: 4306 lpfc_io_free(phba); 4307 return rc; 4308 } 4309 4310 /** 4311 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4312 * @phba: Pointer to lpfc hba data structure. 4313 * @num_to_alloc: The requested number of buffers to allocate. 4314 * 4315 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4316 * the nvme buffer contains all the necessary information needed to initiate 4317 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4318 * them on a list, it post them to the port by using SGL block post. 4319 * 4320 * Return codes: 4321 * int - number of IO buffers that were allocated and posted. 4322 * 0 = failure, less than num_to_alloc is a partial failure. 4323 **/ 4324 int 4325 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4326 { 4327 struct lpfc_io_buf *lpfc_ncmd; 4328 struct lpfc_iocbq *pwqeq; 4329 uint16_t iotag, lxri = 0; 4330 int bcnt, num_posted; 4331 LIST_HEAD(prep_nblist); 4332 LIST_HEAD(post_nblist); 4333 LIST_HEAD(nvme_nblist); 4334 4335 phba->sli4_hba.io_xri_cnt = 0; 4336 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4337 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4338 if (!lpfc_ncmd) 4339 break; 4340 /* 4341 * Get memory from the pci pool to map the virt space to 4342 * pci bus space for an I/O. The DMA buffer includes the 4343 * number of SGE's necessary to support the sg_tablesize. 4344 */ 4345 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4346 GFP_KERNEL, 4347 &lpfc_ncmd->dma_handle); 4348 if (!lpfc_ncmd->data) { 4349 kfree(lpfc_ncmd); 4350 break; 4351 } 4352 4353 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4354 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4355 } else { 4356 /* 4357 * 4K Page alignment is CRITICAL to BlockGuard, double 4358 * check to be sure. 4359 */ 4360 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4361 (((unsigned long)(lpfc_ncmd->data) & 4362 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4363 lpfc_printf_log(phba, KERN_ERR, 4364 LOG_TRACE_EVENT, 4365 "3369 Memory alignment err: " 4366 "addr=%lx\n", 4367 (unsigned long)lpfc_ncmd->data); 4368 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4369 lpfc_ncmd->data, 4370 lpfc_ncmd->dma_handle); 4371 kfree(lpfc_ncmd); 4372 break; 4373 } 4374 } 4375 4376 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4377 4378 lxri = lpfc_sli4_next_xritag(phba); 4379 if (lxri == NO_XRI) { 4380 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4381 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4382 kfree(lpfc_ncmd); 4383 break; 4384 } 4385 pwqeq = &lpfc_ncmd->cur_iocbq; 4386 4387 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4388 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4389 if (iotag == 0) { 4390 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4391 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4392 kfree(lpfc_ncmd); 4393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4394 "6121 Failed to allocate IOTAG for" 4395 " XRI:0x%x\n", lxri); 4396 lpfc_sli4_free_xri(phba, lxri); 4397 break; 4398 } 4399 pwqeq->sli4_lxritag = lxri; 4400 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4401 pwqeq->context1 = lpfc_ncmd; 4402 4403 /* Initialize local short-hand pointers. */ 4404 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4405 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4406 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4407 spin_lock_init(&lpfc_ncmd->buf_lock); 4408 4409 /* add the nvme buffer to a post list */ 4410 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4411 phba->sli4_hba.io_xri_cnt++; 4412 } 4413 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4414 "6114 Allocate %d out of %d requested new NVME " 4415 "buffers\n", bcnt, num_to_alloc); 4416 4417 /* post the list of nvme buffer sgls to port if available */ 4418 if (!list_empty(&post_nblist)) 4419 num_posted = lpfc_sli4_post_io_sgl_list( 4420 phba, &post_nblist, bcnt); 4421 else 4422 num_posted = 0; 4423 4424 return num_posted; 4425 } 4426 4427 static uint64_t 4428 lpfc_get_wwpn(struct lpfc_hba *phba) 4429 { 4430 uint64_t wwn; 4431 int rc; 4432 LPFC_MBOXQ_t *mboxq; 4433 MAILBOX_t *mb; 4434 4435 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4436 GFP_KERNEL); 4437 if (!mboxq) 4438 return (uint64_t)-1; 4439 4440 /* First get WWN of HBA instance */ 4441 lpfc_read_nv(phba, mboxq); 4442 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4443 if (rc != MBX_SUCCESS) { 4444 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4445 "6019 Mailbox failed , mbxCmd x%x " 4446 "READ_NV, mbxStatus x%x\n", 4447 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4448 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4449 mempool_free(mboxq, phba->mbox_mem_pool); 4450 return (uint64_t) -1; 4451 } 4452 mb = &mboxq->u.mb; 4453 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4454 /* wwn is WWPN of HBA instance */ 4455 mempool_free(mboxq, phba->mbox_mem_pool); 4456 if (phba->sli_rev == LPFC_SLI_REV4) 4457 return be64_to_cpu(wwn); 4458 else 4459 return rol64(wwn, 32); 4460 } 4461 4462 /** 4463 * lpfc_vmid_res_alloc - Allocates resources for VMID 4464 * @phba: pointer to lpfc hba data structure. 4465 * @vport: pointer to vport data structure 4466 * 4467 * This routine allocated the resources needed for the VMID. 4468 * 4469 * Return codes 4470 * 0 on Success 4471 * Non-0 on Failure 4472 */ 4473 static int 4474 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4475 { 4476 /* VMID feature is supported only on SLI4 */ 4477 if (phba->sli_rev == LPFC_SLI_REV3) { 4478 phba->cfg_vmid_app_header = 0; 4479 phba->cfg_vmid_priority_tagging = 0; 4480 } 4481 4482 if (lpfc_is_vmid_enabled(phba)) { 4483 vport->vmid = 4484 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4485 GFP_KERNEL); 4486 if (!vport->vmid) 4487 return -ENOMEM; 4488 4489 rwlock_init(&vport->vmid_lock); 4490 4491 /* Set the VMID parameters for the vport */ 4492 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4493 vport->vmid_inactivity_timeout = 4494 phba->cfg_vmid_inactivity_timeout; 4495 vport->max_vmid = phba->cfg_max_vmid; 4496 vport->cur_vmid_cnt = 0; 4497 4498 vport->vmid_priority_range = bitmap_zalloc 4499 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4500 4501 if (!vport->vmid_priority_range) { 4502 kfree(vport->vmid); 4503 return -ENOMEM; 4504 } 4505 4506 hash_init(vport->hash_table); 4507 } 4508 return 0; 4509 } 4510 4511 /** 4512 * lpfc_create_port - Create an FC port 4513 * @phba: pointer to lpfc hba data structure. 4514 * @instance: a unique integer ID to this FC port. 4515 * @dev: pointer to the device data structure. 4516 * 4517 * This routine creates a FC port for the upper layer protocol. The FC port 4518 * can be created on top of either a physical port or a virtual port provided 4519 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4520 * and associates the FC port created before adding the shost into the SCSI 4521 * layer. 4522 * 4523 * Return codes 4524 * @vport - pointer to the virtual N_Port data structure. 4525 * NULL - port create failed. 4526 **/ 4527 struct lpfc_vport * 4528 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4529 { 4530 struct lpfc_vport *vport; 4531 struct Scsi_Host *shost = NULL; 4532 struct scsi_host_template *template; 4533 int error = 0; 4534 int i; 4535 uint64_t wwn; 4536 bool use_no_reset_hba = false; 4537 int rc; 4538 4539 if (lpfc_no_hba_reset_cnt) { 4540 if (phba->sli_rev < LPFC_SLI_REV4 && 4541 dev == &phba->pcidev->dev) { 4542 /* Reset the port first */ 4543 lpfc_sli_brdrestart(phba); 4544 rc = lpfc_sli_chipset_init(phba); 4545 if (rc) 4546 return NULL; 4547 } 4548 wwn = lpfc_get_wwpn(phba); 4549 } 4550 4551 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4552 if (wwn == lpfc_no_hba_reset[i]) { 4553 lpfc_printf_log(phba, KERN_ERR, 4554 LOG_TRACE_EVENT, 4555 "6020 Setting use_no_reset port=%llx\n", 4556 wwn); 4557 use_no_reset_hba = true; 4558 break; 4559 } 4560 } 4561 4562 /* Seed template for SCSI host registration */ 4563 if (dev == &phba->pcidev->dev) { 4564 template = &phba->port_template; 4565 4566 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4567 /* Seed physical port template */ 4568 memcpy(template, &lpfc_template, sizeof(*template)); 4569 4570 if (use_no_reset_hba) 4571 /* template is for a no reset SCSI Host */ 4572 template->eh_host_reset_handler = NULL; 4573 4574 /* Template for all vports this physical port creates */ 4575 memcpy(&phba->vport_template, &lpfc_template, 4576 sizeof(*template)); 4577 phba->vport_template.shost_groups = lpfc_vport_groups; 4578 phba->vport_template.eh_bus_reset_handler = NULL; 4579 phba->vport_template.eh_host_reset_handler = NULL; 4580 phba->vport_template.vendor_id = 0; 4581 4582 /* Initialize the host templates with updated value */ 4583 if (phba->sli_rev == LPFC_SLI_REV4) { 4584 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4585 phba->vport_template.sg_tablesize = 4586 phba->cfg_scsi_seg_cnt; 4587 } else { 4588 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4589 phba->vport_template.sg_tablesize = 4590 phba->cfg_sg_seg_cnt; 4591 } 4592 4593 } else { 4594 /* NVMET is for physical port only */ 4595 memcpy(template, &lpfc_template_nvme, 4596 sizeof(*template)); 4597 } 4598 } else { 4599 template = &phba->vport_template; 4600 } 4601 4602 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4603 if (!shost) 4604 goto out; 4605 4606 vport = (struct lpfc_vport *) shost->hostdata; 4607 vport->phba = phba; 4608 vport->load_flag |= FC_LOADING; 4609 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4610 vport->fc_rscn_flush = 0; 4611 lpfc_get_vport_cfgparam(vport); 4612 4613 /* Adjust value in vport */ 4614 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4615 4616 shost->unique_id = instance; 4617 shost->max_id = LPFC_MAX_TARGET; 4618 shost->max_lun = vport->cfg_max_luns; 4619 shost->this_id = -1; 4620 shost->max_cmd_len = 16; 4621 4622 if (phba->sli_rev == LPFC_SLI_REV4) { 4623 if (!phba->cfg_fcp_mq_threshold || 4624 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4625 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4626 4627 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4628 phba->cfg_fcp_mq_threshold); 4629 4630 shost->dma_boundary = 4631 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4632 4633 if (phba->cfg_xpsgl && !phba->nvmet_support) 4634 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4635 else 4636 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4637 } else 4638 /* SLI-3 has a limited number of hardware queues (3), 4639 * thus there is only one for FCP processing. 4640 */ 4641 shost->nr_hw_queues = 1; 4642 4643 /* 4644 * Set initial can_queue value since 0 is no longer supported and 4645 * scsi_add_host will fail. This will be adjusted later based on the 4646 * max xri value determined in hba setup. 4647 */ 4648 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4649 if (dev != &phba->pcidev->dev) { 4650 shost->transportt = lpfc_vport_transport_template; 4651 vport->port_type = LPFC_NPIV_PORT; 4652 } else { 4653 shost->transportt = lpfc_transport_template; 4654 vport->port_type = LPFC_PHYSICAL_PORT; 4655 } 4656 4657 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4658 "9081 CreatePort TMPLATE type %x TBLsize %d " 4659 "SEGcnt %d/%d\n", 4660 vport->port_type, shost->sg_tablesize, 4661 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4662 4663 /* Allocate the resources for VMID */ 4664 rc = lpfc_vmid_res_alloc(phba, vport); 4665 4666 if (rc) 4667 goto out; 4668 4669 /* Initialize all internally managed lists. */ 4670 INIT_LIST_HEAD(&vport->fc_nodes); 4671 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4672 spin_lock_init(&vport->work_port_lock); 4673 4674 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4675 4676 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4677 4678 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4679 4680 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4681 lpfc_setup_bg(phba, shost); 4682 4683 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4684 if (error) 4685 goto out_put_shost; 4686 4687 spin_lock_irq(&phba->port_list_lock); 4688 list_add_tail(&vport->listentry, &phba->port_list); 4689 spin_unlock_irq(&phba->port_list_lock); 4690 return vport; 4691 4692 out_put_shost: 4693 kfree(vport->vmid); 4694 bitmap_free(vport->vmid_priority_range); 4695 scsi_host_put(shost); 4696 out: 4697 return NULL; 4698 } 4699 4700 /** 4701 * destroy_port - destroy an FC port 4702 * @vport: pointer to an lpfc virtual N_Port data structure. 4703 * 4704 * This routine destroys a FC port from the upper layer protocol. All the 4705 * resources associated with the port are released. 4706 **/ 4707 void 4708 destroy_port(struct lpfc_vport *vport) 4709 { 4710 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4711 struct lpfc_hba *phba = vport->phba; 4712 4713 lpfc_debugfs_terminate(vport); 4714 fc_remove_host(shost); 4715 scsi_remove_host(shost); 4716 4717 spin_lock_irq(&phba->port_list_lock); 4718 list_del_init(&vport->listentry); 4719 spin_unlock_irq(&phba->port_list_lock); 4720 4721 lpfc_cleanup(vport); 4722 return; 4723 } 4724 4725 /** 4726 * lpfc_get_instance - Get a unique integer ID 4727 * 4728 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4729 * uses the kernel idr facility to perform the task. 4730 * 4731 * Return codes: 4732 * instance - a unique integer ID allocated as the new instance. 4733 * -1 - lpfc get instance failed. 4734 **/ 4735 int 4736 lpfc_get_instance(void) 4737 { 4738 int ret; 4739 4740 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4741 return ret < 0 ? -1 : ret; 4742 } 4743 4744 /** 4745 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4746 * @shost: pointer to SCSI host data structure. 4747 * @time: elapsed time of the scan in jiffies. 4748 * 4749 * This routine is called by the SCSI layer with a SCSI host to determine 4750 * whether the scan host is finished. 4751 * 4752 * Note: there is no scan_start function as adapter initialization will have 4753 * asynchronously kicked off the link initialization. 4754 * 4755 * Return codes 4756 * 0 - SCSI host scan is not over yet. 4757 * 1 - SCSI host scan is over. 4758 **/ 4759 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4760 { 4761 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4762 struct lpfc_hba *phba = vport->phba; 4763 int stat = 0; 4764 4765 spin_lock_irq(shost->host_lock); 4766 4767 if (vport->load_flag & FC_UNLOADING) { 4768 stat = 1; 4769 goto finished; 4770 } 4771 if (time >= msecs_to_jiffies(30 * 1000)) { 4772 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4773 "0461 Scanning longer than 30 " 4774 "seconds. Continuing initialization\n"); 4775 stat = 1; 4776 goto finished; 4777 } 4778 if (time >= msecs_to_jiffies(15 * 1000) && 4779 phba->link_state <= LPFC_LINK_DOWN) { 4780 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4781 "0465 Link down longer than 15 " 4782 "seconds. Continuing initialization\n"); 4783 stat = 1; 4784 goto finished; 4785 } 4786 4787 if (vport->port_state != LPFC_VPORT_READY) 4788 goto finished; 4789 if (vport->num_disc_nodes || vport->fc_prli_sent) 4790 goto finished; 4791 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4792 goto finished; 4793 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4794 goto finished; 4795 4796 stat = 1; 4797 4798 finished: 4799 spin_unlock_irq(shost->host_lock); 4800 return stat; 4801 } 4802 4803 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4804 { 4805 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4806 struct lpfc_hba *phba = vport->phba; 4807 4808 fc_host_supported_speeds(shost) = 0; 4809 /* 4810 * Avoid reporting supported link speed for FCoE as it can't be 4811 * controlled via FCoE. 4812 */ 4813 if (phba->hba_flag & HBA_FCOE_MODE) 4814 return; 4815 4816 if (phba->lmt & LMT_256Gb) 4817 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4818 if (phba->lmt & LMT_128Gb) 4819 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4820 if (phba->lmt & LMT_64Gb) 4821 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4822 if (phba->lmt & LMT_32Gb) 4823 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4824 if (phba->lmt & LMT_16Gb) 4825 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4826 if (phba->lmt & LMT_10Gb) 4827 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4828 if (phba->lmt & LMT_8Gb) 4829 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4830 if (phba->lmt & LMT_4Gb) 4831 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4832 if (phba->lmt & LMT_2Gb) 4833 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4834 if (phba->lmt & LMT_1Gb) 4835 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4836 } 4837 4838 /** 4839 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4840 * @shost: pointer to SCSI host data structure. 4841 * 4842 * This routine initializes a given SCSI host attributes on a FC port. The 4843 * SCSI host can be either on top of a physical port or a virtual port. 4844 **/ 4845 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4846 { 4847 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4848 struct lpfc_hba *phba = vport->phba; 4849 /* 4850 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4851 */ 4852 4853 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4854 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4855 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4856 4857 memset(fc_host_supported_fc4s(shost), 0, 4858 sizeof(fc_host_supported_fc4s(shost))); 4859 fc_host_supported_fc4s(shost)[2] = 1; 4860 fc_host_supported_fc4s(shost)[7] = 1; 4861 4862 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4863 sizeof fc_host_symbolic_name(shost)); 4864 4865 lpfc_host_supported_speeds_set(shost); 4866 4867 fc_host_maxframe_size(shost) = 4868 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4869 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4870 4871 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4872 4873 /* This value is also unchanging */ 4874 memset(fc_host_active_fc4s(shost), 0, 4875 sizeof(fc_host_active_fc4s(shost))); 4876 fc_host_active_fc4s(shost)[2] = 1; 4877 fc_host_active_fc4s(shost)[7] = 1; 4878 4879 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4880 spin_lock_irq(shost->host_lock); 4881 vport->load_flag &= ~FC_LOADING; 4882 spin_unlock_irq(shost->host_lock); 4883 } 4884 4885 /** 4886 * lpfc_stop_port_s3 - Stop SLI3 device port 4887 * @phba: pointer to lpfc hba data structure. 4888 * 4889 * This routine is invoked to stop an SLI3 device port, it stops the device 4890 * from generating interrupts and stops the device driver's timers for the 4891 * device. 4892 **/ 4893 static void 4894 lpfc_stop_port_s3(struct lpfc_hba *phba) 4895 { 4896 /* Clear all interrupt enable conditions */ 4897 writel(0, phba->HCregaddr); 4898 readl(phba->HCregaddr); /* flush */ 4899 /* Clear all pending interrupts */ 4900 writel(0xffffffff, phba->HAregaddr); 4901 readl(phba->HAregaddr); /* flush */ 4902 4903 /* Reset some HBA SLI setup states */ 4904 lpfc_stop_hba_timers(phba); 4905 phba->pport->work_port_events = 0; 4906 } 4907 4908 /** 4909 * lpfc_stop_port_s4 - Stop SLI4 device port 4910 * @phba: pointer to lpfc hba data structure. 4911 * 4912 * This routine is invoked to stop an SLI4 device port, it stops the device 4913 * from generating interrupts and stops the device driver's timers for the 4914 * device. 4915 **/ 4916 static void 4917 lpfc_stop_port_s4(struct lpfc_hba *phba) 4918 { 4919 /* Reset some HBA SLI4 setup states */ 4920 lpfc_stop_hba_timers(phba); 4921 if (phba->pport) 4922 phba->pport->work_port_events = 0; 4923 phba->sli4_hba.intr_enable = 0; 4924 } 4925 4926 /** 4927 * lpfc_stop_port - Wrapper function for stopping hba port 4928 * @phba: Pointer to HBA context object. 4929 * 4930 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4931 * the API jump table function pointer from the lpfc_hba struct. 4932 **/ 4933 void 4934 lpfc_stop_port(struct lpfc_hba *phba) 4935 { 4936 phba->lpfc_stop_port(phba); 4937 4938 if (phba->wq) 4939 flush_workqueue(phba->wq); 4940 } 4941 4942 /** 4943 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4944 * @phba: Pointer to hba for which this call is being executed. 4945 * 4946 * This routine starts the timer waiting for the FCF rediscovery to complete. 4947 **/ 4948 void 4949 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4950 { 4951 unsigned long fcf_redisc_wait_tmo = 4952 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4953 /* Start fcf rediscovery wait period timer */ 4954 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4955 spin_lock_irq(&phba->hbalock); 4956 /* Allow action to new fcf asynchronous event */ 4957 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4958 /* Mark the FCF rediscovery pending state */ 4959 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4960 spin_unlock_irq(&phba->hbalock); 4961 } 4962 4963 /** 4964 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4965 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 4966 * 4967 * This routine is invoked when waiting for FCF table rediscover has been 4968 * timed out. If new FCF record(s) has (have) been discovered during the 4969 * wait period, a new FCF event shall be added to the FCOE async event 4970 * list, and then worker thread shall be waked up for processing from the 4971 * worker thread context. 4972 **/ 4973 static void 4974 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4975 { 4976 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4977 4978 /* Don't send FCF rediscovery event if timer cancelled */ 4979 spin_lock_irq(&phba->hbalock); 4980 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4981 spin_unlock_irq(&phba->hbalock); 4982 return; 4983 } 4984 /* Clear FCF rediscovery timer pending flag */ 4985 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4986 /* FCF rediscovery event to worker thread */ 4987 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4988 spin_unlock_irq(&phba->hbalock); 4989 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4990 "2776 FCF rediscover quiescent timer expired\n"); 4991 /* wake up worker thread */ 4992 lpfc_worker_wake_up(phba); 4993 } 4994 4995 /** 4996 * lpfc_vmid_poll - VMID timeout detection 4997 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 4998 * 4999 * This routine is invoked when there is no I/O on by a VM for the specified 5000 * amount of time. When this situation is detected, the VMID has to be 5001 * deregistered from the switch and all the local resources freed. The VMID 5002 * will be reassigned to the VM once the I/O begins. 5003 **/ 5004 static void 5005 lpfc_vmid_poll(struct timer_list *t) 5006 { 5007 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5008 u32 wake_up = 0; 5009 5010 /* check if there is a need to issue QFPA */ 5011 if (phba->pport->vmid_priority_tagging) { 5012 wake_up = 1; 5013 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5014 } 5015 5016 /* Is the vmid inactivity timer enabled */ 5017 if (phba->pport->vmid_inactivity_timeout || 5018 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 5019 wake_up = 1; 5020 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5021 } 5022 5023 if (wake_up) 5024 lpfc_worker_wake_up(phba); 5025 5026 /* restart the timer for the next iteration */ 5027 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5028 LPFC_VMID_TIMER)); 5029 } 5030 5031 /** 5032 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5033 * @phba: pointer to lpfc hba data structure. 5034 * @acqe_link: pointer to the async link completion queue entry. 5035 * 5036 * This routine is to parse the SLI4 link-attention link fault code. 5037 **/ 5038 static void 5039 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5040 struct lpfc_acqe_link *acqe_link) 5041 { 5042 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5043 case LPFC_ASYNC_LINK_FAULT_NONE: 5044 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5045 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5046 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5047 break; 5048 default: 5049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5050 "0398 Unknown link fault code: x%x\n", 5051 bf_get(lpfc_acqe_link_fault, acqe_link)); 5052 break; 5053 } 5054 } 5055 5056 /** 5057 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5058 * @phba: pointer to lpfc hba data structure. 5059 * @acqe_link: pointer to the async link completion queue entry. 5060 * 5061 * This routine is to parse the SLI4 link attention type and translate it 5062 * into the base driver's link attention type coding. 5063 * 5064 * Return: Link attention type in terms of base driver's coding. 5065 **/ 5066 static uint8_t 5067 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5068 struct lpfc_acqe_link *acqe_link) 5069 { 5070 uint8_t att_type; 5071 5072 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5073 case LPFC_ASYNC_LINK_STATUS_DOWN: 5074 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5075 att_type = LPFC_ATT_LINK_DOWN; 5076 break; 5077 case LPFC_ASYNC_LINK_STATUS_UP: 5078 /* Ignore physical link up events - wait for logical link up */ 5079 att_type = LPFC_ATT_RESERVED; 5080 break; 5081 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5082 att_type = LPFC_ATT_LINK_UP; 5083 break; 5084 default: 5085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5086 "0399 Invalid link attention type: x%x\n", 5087 bf_get(lpfc_acqe_link_status, acqe_link)); 5088 att_type = LPFC_ATT_RESERVED; 5089 break; 5090 } 5091 return att_type; 5092 } 5093 5094 /** 5095 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5096 * @phba: pointer to lpfc hba data structure. 5097 * 5098 * This routine is to get an SLI3 FC port's link speed in Mbps. 5099 * 5100 * Return: link speed in terms of Mbps. 5101 **/ 5102 uint32_t 5103 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5104 { 5105 uint32_t link_speed; 5106 5107 if (!lpfc_is_link_up(phba)) 5108 return 0; 5109 5110 if (phba->sli_rev <= LPFC_SLI_REV3) { 5111 switch (phba->fc_linkspeed) { 5112 case LPFC_LINK_SPEED_1GHZ: 5113 link_speed = 1000; 5114 break; 5115 case LPFC_LINK_SPEED_2GHZ: 5116 link_speed = 2000; 5117 break; 5118 case LPFC_LINK_SPEED_4GHZ: 5119 link_speed = 4000; 5120 break; 5121 case LPFC_LINK_SPEED_8GHZ: 5122 link_speed = 8000; 5123 break; 5124 case LPFC_LINK_SPEED_10GHZ: 5125 link_speed = 10000; 5126 break; 5127 case LPFC_LINK_SPEED_16GHZ: 5128 link_speed = 16000; 5129 break; 5130 default: 5131 link_speed = 0; 5132 } 5133 } else { 5134 if (phba->sli4_hba.link_state.logical_speed) 5135 link_speed = 5136 phba->sli4_hba.link_state.logical_speed; 5137 else 5138 link_speed = phba->sli4_hba.link_state.speed; 5139 } 5140 return link_speed; 5141 } 5142 5143 /** 5144 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5145 * @phba: pointer to lpfc hba data structure. 5146 * @evt_code: asynchronous event code. 5147 * @speed_code: asynchronous event link speed code. 5148 * 5149 * This routine is to parse the giving SLI4 async event link speed code into 5150 * value of Mbps for the link speed. 5151 * 5152 * Return: link speed in terms of Mbps. 5153 **/ 5154 static uint32_t 5155 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5156 uint8_t speed_code) 5157 { 5158 uint32_t port_speed; 5159 5160 switch (evt_code) { 5161 case LPFC_TRAILER_CODE_LINK: 5162 switch (speed_code) { 5163 case LPFC_ASYNC_LINK_SPEED_ZERO: 5164 port_speed = 0; 5165 break; 5166 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5167 port_speed = 10; 5168 break; 5169 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5170 port_speed = 100; 5171 break; 5172 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5173 port_speed = 1000; 5174 break; 5175 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5176 port_speed = 10000; 5177 break; 5178 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5179 port_speed = 20000; 5180 break; 5181 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5182 port_speed = 25000; 5183 break; 5184 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5185 port_speed = 40000; 5186 break; 5187 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5188 port_speed = 100000; 5189 break; 5190 default: 5191 port_speed = 0; 5192 } 5193 break; 5194 case LPFC_TRAILER_CODE_FC: 5195 switch (speed_code) { 5196 case LPFC_FC_LA_SPEED_UNKNOWN: 5197 port_speed = 0; 5198 break; 5199 case LPFC_FC_LA_SPEED_1G: 5200 port_speed = 1000; 5201 break; 5202 case LPFC_FC_LA_SPEED_2G: 5203 port_speed = 2000; 5204 break; 5205 case LPFC_FC_LA_SPEED_4G: 5206 port_speed = 4000; 5207 break; 5208 case LPFC_FC_LA_SPEED_8G: 5209 port_speed = 8000; 5210 break; 5211 case LPFC_FC_LA_SPEED_10G: 5212 port_speed = 10000; 5213 break; 5214 case LPFC_FC_LA_SPEED_16G: 5215 port_speed = 16000; 5216 break; 5217 case LPFC_FC_LA_SPEED_32G: 5218 port_speed = 32000; 5219 break; 5220 case LPFC_FC_LA_SPEED_64G: 5221 port_speed = 64000; 5222 break; 5223 case LPFC_FC_LA_SPEED_128G: 5224 port_speed = 128000; 5225 break; 5226 case LPFC_FC_LA_SPEED_256G: 5227 port_speed = 256000; 5228 break; 5229 default: 5230 port_speed = 0; 5231 } 5232 break; 5233 default: 5234 port_speed = 0; 5235 } 5236 return port_speed; 5237 } 5238 5239 /** 5240 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5241 * @phba: pointer to lpfc hba data structure. 5242 * @acqe_link: pointer to the async link completion queue entry. 5243 * 5244 * This routine is to handle the SLI4 asynchronous FCoE link event. 5245 **/ 5246 static void 5247 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5248 struct lpfc_acqe_link *acqe_link) 5249 { 5250 struct lpfc_dmabuf *mp; 5251 LPFC_MBOXQ_t *pmb; 5252 MAILBOX_t *mb; 5253 struct lpfc_mbx_read_top *la; 5254 uint8_t att_type; 5255 int rc; 5256 5257 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5258 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5259 return; 5260 phba->fcoe_eventtag = acqe_link->event_tag; 5261 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5262 if (!pmb) { 5263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5264 "0395 The mboxq allocation failed\n"); 5265 return; 5266 } 5267 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5268 if (!mp) { 5269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5270 "0396 The lpfc_dmabuf allocation failed\n"); 5271 goto out_free_pmb; 5272 } 5273 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5274 if (!mp->virt) { 5275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5276 "0397 The mbuf allocation failed\n"); 5277 goto out_free_dmabuf; 5278 } 5279 5280 /* Cleanup any outstanding ELS commands */ 5281 lpfc_els_flush_all_cmd(phba); 5282 5283 /* Block ELS IOCBs until we have done process link event */ 5284 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5285 5286 /* Update link event statistics */ 5287 phba->sli.slistat.link_event++; 5288 5289 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5290 lpfc_read_topology(phba, pmb, mp); 5291 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5292 pmb->vport = phba->pport; 5293 5294 /* Keep the link status for extra SLI4 state machine reference */ 5295 phba->sli4_hba.link_state.speed = 5296 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5297 bf_get(lpfc_acqe_link_speed, acqe_link)); 5298 phba->sli4_hba.link_state.duplex = 5299 bf_get(lpfc_acqe_link_duplex, acqe_link); 5300 phba->sli4_hba.link_state.status = 5301 bf_get(lpfc_acqe_link_status, acqe_link); 5302 phba->sli4_hba.link_state.type = 5303 bf_get(lpfc_acqe_link_type, acqe_link); 5304 phba->sli4_hba.link_state.number = 5305 bf_get(lpfc_acqe_link_number, acqe_link); 5306 phba->sli4_hba.link_state.fault = 5307 bf_get(lpfc_acqe_link_fault, acqe_link); 5308 phba->sli4_hba.link_state.logical_speed = 5309 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5310 5311 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5312 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5313 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5314 "Logical speed:%dMbps Fault:%d\n", 5315 phba->sli4_hba.link_state.speed, 5316 phba->sli4_hba.link_state.topology, 5317 phba->sli4_hba.link_state.status, 5318 phba->sli4_hba.link_state.type, 5319 phba->sli4_hba.link_state.number, 5320 phba->sli4_hba.link_state.logical_speed, 5321 phba->sli4_hba.link_state.fault); 5322 /* 5323 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5324 * topology info. Note: Optional for non FC-AL ports. 5325 */ 5326 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5327 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5328 if (rc == MBX_NOT_FINISHED) 5329 goto out_free_dmabuf; 5330 return; 5331 } 5332 /* 5333 * For FCoE Mode: fill in all the topology information we need and call 5334 * the READ_TOPOLOGY completion routine to continue without actually 5335 * sending the READ_TOPOLOGY mailbox command to the port. 5336 */ 5337 /* Initialize completion status */ 5338 mb = &pmb->u.mb; 5339 mb->mbxStatus = MBX_SUCCESS; 5340 5341 /* Parse port fault information field */ 5342 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5343 5344 /* Parse and translate link attention fields */ 5345 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5346 la->eventTag = acqe_link->event_tag; 5347 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5348 bf_set(lpfc_mbx_read_top_link_spd, la, 5349 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5350 5351 /* Fake the the following irrelvant fields */ 5352 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5353 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5354 bf_set(lpfc_mbx_read_top_il, la, 0); 5355 bf_set(lpfc_mbx_read_top_pb, la, 0); 5356 bf_set(lpfc_mbx_read_top_fa, la, 0); 5357 bf_set(lpfc_mbx_read_top_mm, la, 0); 5358 5359 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5360 lpfc_mbx_cmpl_read_topology(phba, pmb); 5361 5362 return; 5363 5364 out_free_dmabuf: 5365 kfree(mp); 5366 out_free_pmb: 5367 mempool_free(pmb, phba->mbox_mem_pool); 5368 } 5369 5370 /** 5371 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5372 * topology. 5373 * @phba: pointer to lpfc hba data structure. 5374 * @speed_code: asynchronous event link speed code. 5375 * 5376 * This routine is to parse the giving SLI4 async event link speed code into 5377 * value of Read topology link speed. 5378 * 5379 * Return: link speed in terms of Read topology. 5380 **/ 5381 static uint8_t 5382 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5383 { 5384 uint8_t port_speed; 5385 5386 switch (speed_code) { 5387 case LPFC_FC_LA_SPEED_1G: 5388 port_speed = LPFC_LINK_SPEED_1GHZ; 5389 break; 5390 case LPFC_FC_LA_SPEED_2G: 5391 port_speed = LPFC_LINK_SPEED_2GHZ; 5392 break; 5393 case LPFC_FC_LA_SPEED_4G: 5394 port_speed = LPFC_LINK_SPEED_4GHZ; 5395 break; 5396 case LPFC_FC_LA_SPEED_8G: 5397 port_speed = LPFC_LINK_SPEED_8GHZ; 5398 break; 5399 case LPFC_FC_LA_SPEED_16G: 5400 port_speed = LPFC_LINK_SPEED_16GHZ; 5401 break; 5402 case LPFC_FC_LA_SPEED_32G: 5403 port_speed = LPFC_LINK_SPEED_32GHZ; 5404 break; 5405 case LPFC_FC_LA_SPEED_64G: 5406 port_speed = LPFC_LINK_SPEED_64GHZ; 5407 break; 5408 case LPFC_FC_LA_SPEED_128G: 5409 port_speed = LPFC_LINK_SPEED_128GHZ; 5410 break; 5411 case LPFC_FC_LA_SPEED_256G: 5412 port_speed = LPFC_LINK_SPEED_256GHZ; 5413 break; 5414 default: 5415 port_speed = 0; 5416 break; 5417 } 5418 5419 return port_speed; 5420 } 5421 5422 void 5423 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5424 { 5425 struct rxtable_entry *entry; 5426 int cnt = 0, head, tail, last, start; 5427 5428 head = atomic_read(&phba->rxtable_idx_head); 5429 tail = atomic_read(&phba->rxtable_idx_tail); 5430 if (!phba->rxtable || head == tail) { 5431 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 5432 "4411 Rxtable is empty\n"); 5433 return; 5434 } 5435 last = tail; 5436 start = head; 5437 5438 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ 5439 while (start != last) { 5440 if (start) 5441 start--; 5442 else 5443 start = LPFC_MAX_RXMONITOR_ENTRY - 1; 5444 entry = &phba->rxtable[start]; 5445 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5446 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " 5447 "Lat %lld ASz %lld Info %02d BWUtil %d " 5448 "Int %d slot %d\n", 5449 cnt, entry->max_bytes_per_interval, 5450 entry->total_bytes, entry->rcv_bytes, 5451 entry->avg_io_latency, entry->avg_io_size, 5452 entry->cmf_info, entry->timer_utilization, 5453 entry->timer_interval, start); 5454 cnt++; 5455 if (cnt >= LPFC_MAX_RXMONITOR_DUMP) 5456 return; 5457 } 5458 } 5459 5460 /** 5461 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5462 * @phba: pointer to lpfc hba data structure. 5463 * @dtag: FPIN descriptor received 5464 * 5465 * Increment the FPIN received counter/time when it happens. 5466 */ 5467 void 5468 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5469 { 5470 struct lpfc_cgn_info *cp; 5471 struct tm broken; 5472 struct timespec64 cur_time; 5473 u32 cnt; 5474 u16 value; 5475 5476 /* Make sure we have a congestion info buffer */ 5477 if (!phba->cgn_i) 5478 return; 5479 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5480 ktime_get_real_ts64(&cur_time); 5481 time64_to_tm(cur_time.tv_sec, 0, &broken); 5482 5483 /* Update congestion statistics */ 5484 switch (dtag) { 5485 case ELS_DTAG_LNK_INTEGRITY: 5486 cnt = le32_to_cpu(cp->link_integ_notification); 5487 cnt++; 5488 cp->link_integ_notification = cpu_to_le32(cnt); 5489 5490 cp->cgn_stat_lnk_month = broken.tm_mon + 1; 5491 cp->cgn_stat_lnk_day = broken.tm_mday; 5492 cp->cgn_stat_lnk_year = broken.tm_year - 100; 5493 cp->cgn_stat_lnk_hour = broken.tm_hour; 5494 cp->cgn_stat_lnk_min = broken.tm_min; 5495 cp->cgn_stat_lnk_sec = broken.tm_sec; 5496 break; 5497 case ELS_DTAG_DELIVERY: 5498 cnt = le32_to_cpu(cp->delivery_notification); 5499 cnt++; 5500 cp->delivery_notification = cpu_to_le32(cnt); 5501 5502 cp->cgn_stat_del_month = broken.tm_mon + 1; 5503 cp->cgn_stat_del_day = broken.tm_mday; 5504 cp->cgn_stat_del_year = broken.tm_year - 100; 5505 cp->cgn_stat_del_hour = broken.tm_hour; 5506 cp->cgn_stat_del_min = broken.tm_min; 5507 cp->cgn_stat_del_sec = broken.tm_sec; 5508 break; 5509 case ELS_DTAG_PEER_CONGEST: 5510 cnt = le32_to_cpu(cp->cgn_peer_notification); 5511 cnt++; 5512 cp->cgn_peer_notification = cpu_to_le32(cnt); 5513 5514 cp->cgn_stat_peer_month = broken.tm_mon + 1; 5515 cp->cgn_stat_peer_day = broken.tm_mday; 5516 cp->cgn_stat_peer_year = broken.tm_year - 100; 5517 cp->cgn_stat_peer_hour = broken.tm_hour; 5518 cp->cgn_stat_peer_min = broken.tm_min; 5519 cp->cgn_stat_peer_sec = broken.tm_sec; 5520 break; 5521 case ELS_DTAG_CONGESTION: 5522 cnt = le32_to_cpu(cp->cgn_notification); 5523 cnt++; 5524 cp->cgn_notification = cpu_to_le32(cnt); 5525 5526 cp->cgn_stat_cgn_month = broken.tm_mon + 1; 5527 cp->cgn_stat_cgn_day = broken.tm_mday; 5528 cp->cgn_stat_cgn_year = broken.tm_year - 100; 5529 cp->cgn_stat_cgn_hour = broken.tm_hour; 5530 cp->cgn_stat_cgn_min = broken.tm_min; 5531 cp->cgn_stat_cgn_sec = broken.tm_sec; 5532 } 5533 if (phba->cgn_fpin_frequency && 5534 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5535 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5536 cp->cgn_stat_npm = value; 5537 } 5538 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5539 LPFC_CGN_CRC32_SEED); 5540 cp->cgn_info_crc = cpu_to_le32(value); 5541 } 5542 5543 /** 5544 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer 5545 * @phba: pointer to lpfc hba data structure. 5546 * 5547 * Save the congestion event data every minute. 5548 * On the hour collapse all the minute data into hour data. Every day 5549 * collapse all the hour data into daily data. Separate driver 5550 * and fabrc congestion event counters that will be saved out 5551 * to the registered congestion buffer every minute. 5552 */ 5553 static void 5554 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) 5555 { 5556 struct lpfc_cgn_info *cp; 5557 struct tm broken; 5558 struct timespec64 cur_time; 5559 uint32_t i, index; 5560 uint16_t value, mvalue; 5561 uint64_t bps; 5562 uint32_t mbps; 5563 uint32_t dvalue, wvalue, lvalue, avalue; 5564 uint64_t latsum; 5565 __le16 *ptr; 5566 __le32 *lptr; 5567 __le16 *mptr; 5568 5569 /* Make sure we have a congestion info buffer */ 5570 if (!phba->cgn_i) 5571 return; 5572 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5573 5574 if (time_before(jiffies, phba->cgn_evt_timestamp)) 5575 return; 5576 phba->cgn_evt_timestamp = jiffies + 5577 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5578 phba->cgn_evt_minute++; 5579 5580 /* We should get to this point in the routine on 1 minute intervals */ 5581 5582 ktime_get_real_ts64(&cur_time); 5583 time64_to_tm(cur_time.tv_sec, 0, &broken); 5584 5585 if (phba->cgn_fpin_frequency && 5586 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5587 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5588 cp->cgn_stat_npm = value; 5589 } 5590 5591 /* Read and clear the latency counters for this minute */ 5592 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5593 latsum = atomic64_read(&phba->cgn_latency_evt); 5594 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5595 atomic64_set(&phba->cgn_latency_evt, 0); 5596 5597 /* We need to store MB/sec bandwidth in the congestion information. 5598 * block_cnt is count of 512 byte blocks for the entire minute, 5599 * bps will get bytes per sec before finally converting to MB/sec. 5600 */ 5601 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5602 phba->rx_block_cnt = 0; 5603 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5604 5605 /* Every minute */ 5606 /* cgn parameters */ 5607 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5608 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5609 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5610 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5611 5612 /* Fill in default LUN qdepth */ 5613 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5614 cp->cgn_lunq = cpu_to_le16(value); 5615 5616 /* Record congestion buffer info - every minute 5617 * cgn_driver_evt_cnt (Driver events) 5618 * cgn_fabric_warn_cnt (Congestion Warnings) 5619 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5620 * cgn_fabric_alarm_cnt (Congestion Alarms) 5621 */ 5622 index = ++cp->cgn_index_minute; 5623 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5624 cp->cgn_index_minute = 0; 5625 index = 0; 5626 } 5627 5628 /* Get the number of driver events in this sample and reset counter */ 5629 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5630 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5631 5632 /* Get the number of warning events - FPIN and Signal for this minute */ 5633 wvalue = 0; 5634 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5635 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5636 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5637 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5638 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5639 5640 /* Get the number of alarm events - FPIN and Signal for this minute */ 5641 avalue = 0; 5642 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5643 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5644 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5645 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5646 5647 /* Collect the driver, warning, alarm and latency counts for this 5648 * minute into the driver congestion buffer. 5649 */ 5650 ptr = &cp->cgn_drvr_min[index]; 5651 value = (uint16_t)dvalue; 5652 *ptr = cpu_to_le16(value); 5653 5654 ptr = &cp->cgn_warn_min[index]; 5655 value = (uint16_t)wvalue; 5656 *ptr = cpu_to_le16(value); 5657 5658 ptr = &cp->cgn_alarm_min[index]; 5659 value = (uint16_t)avalue; 5660 *ptr = cpu_to_le16(value); 5661 5662 lptr = &cp->cgn_latency_min[index]; 5663 if (lvalue) { 5664 lvalue = (uint32_t)div_u64(latsum, lvalue); 5665 *lptr = cpu_to_le32(lvalue); 5666 } else { 5667 *lptr = 0; 5668 } 5669 5670 /* Collect the bandwidth value into the driver's congesion buffer. */ 5671 mptr = &cp->cgn_bw_min[index]; 5672 *mptr = cpu_to_le16(mvalue); 5673 5674 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5675 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5676 index, dvalue, wvalue, *lptr, mvalue, avalue); 5677 5678 /* Every hour */ 5679 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5680 /* Record congestion buffer info - every hour 5681 * Collapse all minutes into an hour 5682 */ 5683 index = ++cp->cgn_index_hour; 5684 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5685 cp->cgn_index_hour = 0; 5686 index = 0; 5687 } 5688 5689 dvalue = 0; 5690 wvalue = 0; 5691 lvalue = 0; 5692 avalue = 0; 5693 mvalue = 0; 5694 mbps = 0; 5695 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5696 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5697 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5698 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5699 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5700 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5701 } 5702 if (lvalue) /* Avg of latency averages */ 5703 lvalue /= LPFC_MIN_HOUR; 5704 if (mbps) /* Avg of Bandwidth averages */ 5705 mvalue = mbps / LPFC_MIN_HOUR; 5706 5707 lptr = &cp->cgn_drvr_hr[index]; 5708 *lptr = cpu_to_le32(dvalue); 5709 lptr = &cp->cgn_warn_hr[index]; 5710 *lptr = cpu_to_le32(wvalue); 5711 lptr = &cp->cgn_latency_hr[index]; 5712 *lptr = cpu_to_le32(lvalue); 5713 mptr = &cp->cgn_bw_hr[index]; 5714 *mptr = cpu_to_le16(mvalue); 5715 lptr = &cp->cgn_alarm_hr[index]; 5716 *lptr = cpu_to_le32(avalue); 5717 5718 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5719 "2419 Congestion Info - hour " 5720 "(%d): %d %d %d %d %d\n", 5721 index, dvalue, wvalue, lvalue, mvalue, avalue); 5722 } 5723 5724 /* Every day */ 5725 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5726 /* Record congestion buffer info - every hour 5727 * Collapse all hours into a day. Rotate days 5728 * after LPFC_MAX_CGN_DAYS. 5729 */ 5730 index = ++cp->cgn_index_day; 5731 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5732 cp->cgn_index_day = 0; 5733 index = 0; 5734 } 5735 5736 /* Anytime we overwrite daily index 0, after we wrap, 5737 * we will be overwriting the oldest day, so we must 5738 * update the congestion data start time for that day. 5739 * That start time should have previously been saved after 5740 * we wrote the last days worth of data. 5741 */ 5742 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { 5743 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); 5744 5745 cp->cgn_info_month = broken.tm_mon + 1; 5746 cp->cgn_info_day = broken.tm_mday; 5747 cp->cgn_info_year = broken.tm_year - 100; 5748 cp->cgn_info_hour = broken.tm_hour; 5749 cp->cgn_info_minute = broken.tm_min; 5750 cp->cgn_info_second = broken.tm_sec; 5751 5752 lpfc_printf_log 5753 (phba, KERN_INFO, LOG_CGN_MGMT, 5754 "2646 CGNInfo idx0 Start Time: " 5755 "%d/%d/%d %d:%d:%d\n", 5756 cp->cgn_info_day, cp->cgn_info_month, 5757 cp->cgn_info_year, cp->cgn_info_hour, 5758 cp->cgn_info_minute, cp->cgn_info_second); 5759 } 5760 5761 dvalue = 0; 5762 wvalue = 0; 5763 lvalue = 0; 5764 mvalue = 0; 5765 mbps = 0; 5766 avalue = 0; 5767 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5768 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5769 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5770 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5771 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5772 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5773 } 5774 if (lvalue) /* Avg of latency averages */ 5775 lvalue /= LPFC_HOUR_DAY; 5776 if (mbps) /* Avg of Bandwidth averages */ 5777 mvalue = mbps / LPFC_HOUR_DAY; 5778 5779 lptr = &cp->cgn_drvr_day[index]; 5780 *lptr = cpu_to_le32(dvalue); 5781 lptr = &cp->cgn_warn_day[index]; 5782 *lptr = cpu_to_le32(wvalue); 5783 lptr = &cp->cgn_latency_day[index]; 5784 *lptr = cpu_to_le32(lvalue); 5785 mptr = &cp->cgn_bw_day[index]; 5786 *mptr = cpu_to_le16(mvalue); 5787 lptr = &cp->cgn_alarm_day[index]; 5788 *lptr = cpu_to_le32(avalue); 5789 5790 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5791 "2420 Congestion Info - daily (%d): " 5792 "%d %d %d %d %d\n", 5793 index, dvalue, wvalue, lvalue, mvalue, avalue); 5794 5795 /* We just wrote LPFC_MAX_CGN_DAYS of data, 5796 * so we are wrapped on any data after this. 5797 * Save this as the start time for the next day. 5798 */ 5799 if (index == (LPFC_MAX_CGN_DAYS - 1)) { 5800 phba->hba_flag |= HBA_CGN_DAY_WRAP; 5801 ktime_get_real_ts64(&phba->cgn_daily_ts); 5802 } 5803 } 5804 5805 /* Use the frequency found in the last rcv'ed FPIN */ 5806 value = phba->cgn_fpin_frequency; 5807 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) 5808 cp->cgn_warn_freq = cpu_to_le16(value); 5809 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) 5810 cp->cgn_alarm_freq = cpu_to_le16(value); 5811 5812 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 5813 * are received by the HBA 5814 */ 5815 value = phba->cgn_sig_freq; 5816 5817 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5818 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5819 cp->cgn_warn_freq = cpu_to_le16(value); 5820 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5821 cp->cgn_alarm_freq = cpu_to_le16(value); 5822 5823 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5824 LPFC_CGN_CRC32_SEED); 5825 cp->cgn_info_crc = cpu_to_le32(lvalue); 5826 } 5827 5828 /** 5829 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5830 * @phba: The Hba for which this call is being executed. 5831 * 5832 * The routine calculates the latency from the beginning of the CMF timer 5833 * interval to the current point in time. It is called from IO completion 5834 * when we exceed our Bandwidth limitation for the time interval. 5835 */ 5836 uint32_t 5837 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5838 { 5839 struct timespec64 cmpl_time; 5840 uint32_t msec = 0; 5841 5842 ktime_get_real_ts64(&cmpl_time); 5843 5844 /* This routine works on a ms granularity so sec and usec are 5845 * converted accordingly. 5846 */ 5847 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5848 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5849 NSEC_PER_MSEC; 5850 } else { 5851 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5852 msec = (cmpl_time.tv_sec - 5853 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5854 msec += ((cmpl_time.tv_nsec - 5855 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5856 } else { 5857 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5858 1) * MSEC_PER_SEC; 5859 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5860 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5861 } 5862 } 5863 return msec; 5864 } 5865 5866 /** 5867 * lpfc_cmf_timer - This is the timer function for one congestion 5868 * rate interval. 5869 * @timer: Pointer to the high resolution timer that expired 5870 */ 5871 static enum hrtimer_restart 5872 lpfc_cmf_timer(struct hrtimer *timer) 5873 { 5874 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5875 cmf_timer); 5876 struct rxtable_entry *entry; 5877 uint32_t io_cnt; 5878 uint32_t head, tail; 5879 uint32_t busy, max_read; 5880 uint64_t total, rcv, lat, mbpi, extra; 5881 int timer_interval = LPFC_CMF_INTERVAL; 5882 uint32_t ms; 5883 struct lpfc_cgn_stat *cgs; 5884 int cpu; 5885 5886 /* Only restart the timer if congestion mgmt is on */ 5887 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5888 !phba->cmf_latency.tv_sec) { 5889 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5890 "6224 CMF timer exit: %d %lld\n", 5891 phba->cmf_active_mode, 5892 (uint64_t)phba->cmf_latency.tv_sec); 5893 return HRTIMER_NORESTART; 5894 } 5895 5896 /* If pport is not ready yet, just exit and wait for 5897 * the next timer cycle to hit. 5898 */ 5899 if (!phba->pport) 5900 goto skip; 5901 5902 /* Do not block SCSI IO while in the timer routine since 5903 * total_bytes will be cleared 5904 */ 5905 atomic_set(&phba->cmf_stop_io, 1); 5906 5907 /* First we need to calculate the actual ms between 5908 * the last timer interrupt and this one. We ask for 5909 * LPFC_CMF_INTERVAL, however the actual time may 5910 * vary depending on system overhead. 5911 */ 5912 ms = lpfc_calc_cmf_latency(phba); 5913 5914 5915 /* Immediately after we calculate the time since the last 5916 * timer interrupt, set the start time for the next 5917 * interrupt 5918 */ 5919 ktime_get_real_ts64(&phba->cmf_latency); 5920 5921 phba->cmf_link_byte_count = 5922 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 5923 5924 /* Collect all the stats from the prior timer interval */ 5925 total = 0; 5926 io_cnt = 0; 5927 lat = 0; 5928 rcv = 0; 5929 for_each_present_cpu(cpu) { 5930 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 5931 total += atomic64_xchg(&cgs->total_bytes, 0); 5932 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 5933 lat += atomic64_xchg(&cgs->rx_latency, 0); 5934 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 5935 } 5936 5937 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 5938 * returned from the last CMF_SYNC_WQE issued, from 5939 * cmf_last_sync_bw. This will be the target BW for 5940 * this next timer interval. 5941 */ 5942 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 5943 phba->link_state != LPFC_LINK_DOWN && 5944 phba->hba_flag & HBA_SETUP) { 5945 mbpi = phba->cmf_last_sync_bw; 5946 phba->cmf_last_sync_bw = 0; 5947 extra = 0; 5948 5949 /* Calculate any extra bytes needed to account for the 5950 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 5951 * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL 5952 * add an extra 2%. The goal is to equalize total with a 5953 * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1 5954 */ 5955 if (ms == LPFC_CMF_INTERVAL) 5956 extra = div_u64(total, 50); 5957 else if (ms < LPFC_CMF_INTERVAL) 5958 extra = div_u64(total, 33); 5959 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 5960 } else { 5961 /* For Monitor mode or link down we want mbpi 5962 * to be the full link speed 5963 */ 5964 mbpi = phba->cmf_link_byte_count; 5965 } 5966 phba->cmf_timer_cnt++; 5967 5968 if (io_cnt) { 5969 /* Update congestion info buffer latency in us */ 5970 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 5971 atomic64_add(lat, &phba->cgn_latency_evt); 5972 } 5973 busy = atomic_xchg(&phba->cmf_busy, 0); 5974 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 5975 5976 /* Calculate MBPI for the next timer interval */ 5977 if (mbpi) { 5978 if (mbpi > phba->cmf_link_byte_count || 5979 phba->cmf_active_mode == LPFC_CFG_MONITOR) 5980 mbpi = phba->cmf_link_byte_count; 5981 5982 /* Change max_bytes_per_interval to what the prior 5983 * CMF_SYNC_WQE cmpl indicated. 5984 */ 5985 if (mbpi != phba->cmf_max_bytes_per_interval) 5986 phba->cmf_max_bytes_per_interval = mbpi; 5987 } 5988 5989 /* Save rxmonitor information for debug */ 5990 if (phba->rxtable) { 5991 head = atomic_xchg(&phba->rxtable_idx_head, 5992 LPFC_RXMONITOR_TABLE_IN_USE); 5993 entry = &phba->rxtable[head]; 5994 entry->total_bytes = total; 5995 entry->rcv_bytes = rcv; 5996 entry->cmf_busy = busy; 5997 entry->cmf_info = phba->cmf_active_info; 5998 if (io_cnt) { 5999 entry->avg_io_latency = div_u64(lat, io_cnt); 6000 entry->avg_io_size = div_u64(rcv, io_cnt); 6001 } else { 6002 entry->avg_io_latency = 0; 6003 entry->avg_io_size = 0; 6004 } 6005 entry->max_read_cnt = max_read; 6006 entry->io_cnt = io_cnt; 6007 entry->max_bytes_per_interval = mbpi; 6008 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6009 entry->timer_utilization = phba->cmf_last_ts; 6010 else 6011 entry->timer_utilization = ms; 6012 entry->timer_interval = ms; 6013 phba->cmf_last_ts = 0; 6014 6015 /* Increment rxtable index */ 6016 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6017 tail = atomic_read(&phba->rxtable_idx_tail); 6018 if (head == tail) { 6019 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6020 atomic_set(&phba->rxtable_idx_tail, tail); 6021 } 6022 atomic_set(&phba->rxtable_idx_head, head); 6023 } 6024 6025 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6026 /* If Monitor mode, check if we are oversubscribed 6027 * against the full line rate. 6028 */ 6029 if (mbpi && total > mbpi) 6030 atomic_inc(&phba->cgn_driver_evt_cnt); 6031 } 6032 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6033 6034 /* Each minute save Fabric and Driver congestion information */ 6035 lpfc_cgn_save_evt_cnt(phba); 6036 6037 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the 6038 * minute, adjust our next timer interval, if needed, to ensure a 6039 * 1 minute granularity when we get the next timer interrupt. 6040 */ 6041 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), 6042 phba->cgn_evt_timestamp)) { 6043 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - 6044 jiffies); 6045 if (timer_interval <= 0) 6046 timer_interval = LPFC_CMF_INTERVAL; 6047 6048 /* If we adjust timer_interval, max_bytes_per_interval 6049 * needs to be adjusted as well. 6050 */ 6051 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 6052 timer_interval, 1000); 6053 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) 6054 phba->cmf_max_bytes_per_interval = 6055 phba->cmf_link_byte_count; 6056 } 6057 6058 /* Since total_bytes has already been zero'ed, its okay to unblock 6059 * after max_bytes_per_interval is setup. 6060 */ 6061 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6062 queue_work(phba->wq, &phba->unblock_request_work); 6063 6064 /* SCSI IO is now unblocked */ 6065 atomic_set(&phba->cmf_stop_io, 0); 6066 6067 skip: 6068 hrtimer_forward_now(timer, 6069 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6070 return HRTIMER_RESTART; 6071 } 6072 6073 #define trunk_link_status(__idx)\ 6074 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6075 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6076 "Link up" : "Link down") : "NA" 6077 /* Did port __idx reported an error */ 6078 #define trunk_port_fault(__idx)\ 6079 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6080 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6081 6082 static void 6083 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6084 struct lpfc_acqe_fc_la *acqe_fc) 6085 { 6086 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6087 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6088 6089 phba->sli4_hba.link_state.speed = 6090 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6091 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6092 6093 phba->sli4_hba.link_state.logical_speed = 6094 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6095 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6096 phba->fc_linkspeed = 6097 lpfc_async_link_speed_to_read_top( 6098 phba, 6099 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6100 6101 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6102 phba->trunk_link.link0.state = 6103 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6104 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6105 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6106 } 6107 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6108 phba->trunk_link.link1.state = 6109 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6110 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6111 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6112 } 6113 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6114 phba->trunk_link.link2.state = 6115 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6116 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6117 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6118 } 6119 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6120 phba->trunk_link.link3.state = 6121 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6122 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6123 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6124 } 6125 6126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6127 "2910 Async FC Trunking Event - Speed:%d\n" 6128 "\tLogical speed:%d " 6129 "port0: %s port1: %s port2: %s port3: %s\n", 6130 phba->sli4_hba.link_state.speed, 6131 phba->sli4_hba.link_state.logical_speed, 6132 trunk_link_status(0), trunk_link_status(1), 6133 trunk_link_status(2), trunk_link_status(3)); 6134 6135 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6136 lpfc_cmf_signal_init(phba); 6137 6138 if (port_fault) 6139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6140 "3202 trunk error:0x%x (%s) seen on port0:%s " 6141 /* 6142 * SLI-4: We have only 0xA error codes 6143 * defined as of now. print an appropriate 6144 * message in case driver needs to be updated. 6145 */ 6146 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6147 "UNDEFINED. update driver." : trunk_errmsg[err], 6148 trunk_port_fault(0), trunk_port_fault(1), 6149 trunk_port_fault(2), trunk_port_fault(3)); 6150 } 6151 6152 6153 /** 6154 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6155 * @phba: pointer to lpfc hba data structure. 6156 * @acqe_fc: pointer to the async fc completion queue entry. 6157 * 6158 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6159 * that the event was received and then issue a read_topology mailbox command so 6160 * that the rest of the driver will treat it the same as SLI3. 6161 **/ 6162 static void 6163 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6164 { 6165 struct lpfc_dmabuf *mp; 6166 LPFC_MBOXQ_t *pmb; 6167 MAILBOX_t *mb; 6168 struct lpfc_mbx_read_top *la; 6169 int rc; 6170 6171 if (bf_get(lpfc_trailer_type, acqe_fc) != 6172 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6174 "2895 Non FC link Event detected.(%d)\n", 6175 bf_get(lpfc_trailer_type, acqe_fc)); 6176 return; 6177 } 6178 6179 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6180 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6181 lpfc_update_trunk_link_status(phba, acqe_fc); 6182 return; 6183 } 6184 6185 /* Keep the link status for extra SLI4 state machine reference */ 6186 phba->sli4_hba.link_state.speed = 6187 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6188 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6189 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6190 phba->sli4_hba.link_state.topology = 6191 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6192 phba->sli4_hba.link_state.status = 6193 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6194 phba->sli4_hba.link_state.type = 6195 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6196 phba->sli4_hba.link_state.number = 6197 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6198 phba->sli4_hba.link_state.fault = 6199 bf_get(lpfc_acqe_link_fault, acqe_fc); 6200 6201 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6202 LPFC_FC_LA_TYPE_LINK_DOWN) 6203 phba->sli4_hba.link_state.logical_speed = 0; 6204 else if (!phba->sli4_hba.conf_trunk) 6205 phba->sli4_hba.link_state.logical_speed = 6206 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6207 6208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6209 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6210 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6211 "%dMbps Fault:%d\n", 6212 phba->sli4_hba.link_state.speed, 6213 phba->sli4_hba.link_state.topology, 6214 phba->sli4_hba.link_state.status, 6215 phba->sli4_hba.link_state.type, 6216 phba->sli4_hba.link_state.number, 6217 phba->sli4_hba.link_state.logical_speed, 6218 phba->sli4_hba.link_state.fault); 6219 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6220 if (!pmb) { 6221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6222 "2897 The mboxq allocation failed\n"); 6223 return; 6224 } 6225 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6226 if (!mp) { 6227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6228 "2898 The lpfc_dmabuf allocation failed\n"); 6229 goto out_free_pmb; 6230 } 6231 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 6232 if (!mp->virt) { 6233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6234 "2899 The mbuf allocation failed\n"); 6235 goto out_free_dmabuf; 6236 } 6237 6238 /* Cleanup any outstanding ELS commands */ 6239 lpfc_els_flush_all_cmd(phba); 6240 6241 /* Block ELS IOCBs until we have done process link event */ 6242 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6243 6244 /* Update link event statistics */ 6245 phba->sli.slistat.link_event++; 6246 6247 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6248 lpfc_read_topology(phba, pmb, mp); 6249 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6250 pmb->vport = phba->pport; 6251 6252 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6253 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6254 6255 switch (phba->sli4_hba.link_state.status) { 6256 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6257 phba->link_flag |= LS_MDS_LINK_DOWN; 6258 break; 6259 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6260 phba->link_flag |= LS_MDS_LOOPBACK; 6261 break; 6262 default: 6263 break; 6264 } 6265 6266 /* Initialize completion status */ 6267 mb = &pmb->u.mb; 6268 mb->mbxStatus = MBX_SUCCESS; 6269 6270 /* Parse port fault information field */ 6271 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6272 6273 /* Parse and translate link attention fields */ 6274 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6275 la->eventTag = acqe_fc->event_tag; 6276 6277 if (phba->sli4_hba.link_state.status == 6278 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6279 bf_set(lpfc_mbx_read_top_att_type, la, 6280 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6281 } else { 6282 bf_set(lpfc_mbx_read_top_att_type, la, 6283 LPFC_FC_LA_TYPE_LINK_DOWN); 6284 } 6285 /* Invoke the mailbox command callback function */ 6286 lpfc_mbx_cmpl_read_topology(phba, pmb); 6287 6288 return; 6289 } 6290 6291 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6292 if (rc == MBX_NOT_FINISHED) 6293 goto out_free_dmabuf; 6294 return; 6295 6296 out_free_dmabuf: 6297 kfree(mp); 6298 out_free_pmb: 6299 mempool_free(pmb, phba->mbox_mem_pool); 6300 } 6301 6302 /** 6303 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6304 * @phba: pointer to lpfc hba data structure. 6305 * @acqe_sli: pointer to the async SLI completion queue entry. 6306 * 6307 * This routine is to handle the SLI4 asynchronous SLI events. 6308 **/ 6309 static void 6310 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6311 { 6312 char port_name; 6313 char message[128]; 6314 uint8_t status; 6315 uint8_t evt_type; 6316 uint8_t operational = 0; 6317 struct temp_event temp_event_data; 6318 struct lpfc_acqe_misconfigured_event *misconfigured; 6319 struct lpfc_acqe_cgn_signal *cgn_signal; 6320 struct Scsi_Host *shost; 6321 struct lpfc_vport **vports; 6322 int rc, i, cnt; 6323 6324 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6325 6326 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6327 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6328 "x%08x x%08x x%08x\n", evt_type, 6329 acqe_sli->event_data1, acqe_sli->event_data2, 6330 acqe_sli->reserved, acqe_sli->trailer); 6331 6332 port_name = phba->Port[0]; 6333 if (port_name == 0x00) 6334 port_name = '?'; /* get port name is empty */ 6335 6336 switch (evt_type) { 6337 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6338 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6339 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6340 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6341 6342 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6343 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6344 acqe_sli->event_data1, port_name); 6345 6346 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6347 shost = lpfc_shost_from_vport(phba->pport); 6348 fc_host_post_vendor_event(shost, fc_get_event_number(), 6349 sizeof(temp_event_data), 6350 (char *)&temp_event_data, 6351 SCSI_NL_VID_TYPE_PCI 6352 | PCI_VENDOR_ID_EMULEX); 6353 break; 6354 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6355 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6356 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6357 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6358 6359 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6360 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6361 acqe_sli->event_data1, port_name); 6362 6363 shost = lpfc_shost_from_vport(phba->pport); 6364 fc_host_post_vendor_event(shost, fc_get_event_number(), 6365 sizeof(temp_event_data), 6366 (char *)&temp_event_data, 6367 SCSI_NL_VID_TYPE_PCI 6368 | PCI_VENDOR_ID_EMULEX); 6369 break; 6370 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6371 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6372 &acqe_sli->event_data1; 6373 6374 /* fetch the status for this port */ 6375 switch (phba->sli4_hba.lnk_info.lnk_no) { 6376 case LPFC_LINK_NUMBER_0: 6377 status = bf_get(lpfc_sli_misconfigured_port0_state, 6378 &misconfigured->theEvent); 6379 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6380 &misconfigured->theEvent); 6381 break; 6382 case LPFC_LINK_NUMBER_1: 6383 status = bf_get(lpfc_sli_misconfigured_port1_state, 6384 &misconfigured->theEvent); 6385 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6386 &misconfigured->theEvent); 6387 break; 6388 case LPFC_LINK_NUMBER_2: 6389 status = bf_get(lpfc_sli_misconfigured_port2_state, 6390 &misconfigured->theEvent); 6391 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6392 &misconfigured->theEvent); 6393 break; 6394 case LPFC_LINK_NUMBER_3: 6395 status = bf_get(lpfc_sli_misconfigured_port3_state, 6396 &misconfigured->theEvent); 6397 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6398 &misconfigured->theEvent); 6399 break; 6400 default: 6401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6402 "3296 " 6403 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6404 "event: Invalid link %d", 6405 phba->sli4_hba.lnk_info.lnk_no); 6406 return; 6407 } 6408 6409 /* Skip if optic state unchanged */ 6410 if (phba->sli4_hba.lnk_info.optic_state == status) 6411 return; 6412 6413 switch (status) { 6414 case LPFC_SLI_EVENT_STATUS_VALID: 6415 sprintf(message, "Physical Link is functional"); 6416 break; 6417 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6418 sprintf(message, "Optics faulted/incorrectly " 6419 "installed/not installed - Reseat optics, " 6420 "if issue not resolved, replace."); 6421 break; 6422 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6423 sprintf(message, 6424 "Optics of two types installed - Remove one " 6425 "optic or install matching pair of optics."); 6426 break; 6427 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6428 sprintf(message, "Incompatible optics - Replace with " 6429 "compatible optics for card to function."); 6430 break; 6431 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6432 sprintf(message, "Unqualified optics - Replace with " 6433 "Avago optics for Warranty and Technical " 6434 "Support - Link is%s operational", 6435 (operational) ? " not" : ""); 6436 break; 6437 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6438 sprintf(message, "Uncertified optics - Replace with " 6439 "Avago-certified optics to enable link " 6440 "operation - Link is%s operational", 6441 (operational) ? " not" : ""); 6442 break; 6443 default: 6444 /* firmware is reporting a status we don't know about */ 6445 sprintf(message, "Unknown event status x%02x", status); 6446 break; 6447 } 6448 6449 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6450 rc = lpfc_sli4_read_config(phba); 6451 if (rc) { 6452 phba->lmt = 0; 6453 lpfc_printf_log(phba, KERN_ERR, 6454 LOG_TRACE_EVENT, 6455 "3194 Unable to retrieve supported " 6456 "speeds, rc = 0x%x\n", rc); 6457 } 6458 vports = lpfc_create_vport_work_array(phba); 6459 if (vports != NULL) { 6460 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6461 i++) { 6462 shost = lpfc_shost_from_vport(vports[i]); 6463 lpfc_host_supported_speeds_set(shost); 6464 } 6465 } 6466 lpfc_destroy_vport_work_array(phba, vports); 6467 6468 phba->sli4_hba.lnk_info.optic_state = status; 6469 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6470 "3176 Port Name %c %s\n", port_name, message); 6471 break; 6472 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6474 "3192 Remote DPort Test Initiated - " 6475 "Event Data1:x%08x Event Data2: x%08x\n", 6476 acqe_sli->event_data1, acqe_sli->event_data2); 6477 break; 6478 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6479 /* Call FW to obtain active parms */ 6480 lpfc_sli4_cgn_parm_chg_evt(phba); 6481 break; 6482 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6483 /* Misconfigured WWN. Reports that the SLI Port is configured 6484 * to use FA-WWN, but the attached device doesn’t support it. 6485 * No driver action is required. 6486 * Event Data1 - N.A, Event Data2 - N.A 6487 */ 6488 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 6489 "2699 Misconfigured FA-WWN - Attached device does " 6490 "not support FA-WWN\n"); 6491 break; 6492 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6493 /* EEPROM failure. No driver action is required */ 6494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6495 "2518 EEPROM failure - " 6496 "Event Data1: x%08x Event Data2: x%08x\n", 6497 acqe_sli->event_data1, acqe_sli->event_data2); 6498 break; 6499 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6500 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6501 break; 6502 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6503 &acqe_sli->event_data1; 6504 phba->cgn_acqe_cnt++; 6505 6506 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6507 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6508 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6509 6510 /* no threshold for CMF, even 1 signal will trigger an event */ 6511 6512 /* Alarm overrides warning, so check that first */ 6513 if (cgn_signal->alarm_cnt) { 6514 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6515 /* Keep track of alarm cnt for cgn_info */ 6516 atomic_add(cgn_signal->alarm_cnt, 6517 &phba->cgn_fabric_alarm_cnt); 6518 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6519 atomic_add(cgn_signal->alarm_cnt, 6520 &phba->cgn_sync_alarm_cnt); 6521 } 6522 } else if (cnt) { 6523 /* signal action needs to be taken */ 6524 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6525 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6526 /* Keep track of warning cnt for cgn_info */ 6527 atomic_add(cnt, &phba->cgn_fabric_warn_cnt); 6528 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6529 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6530 } 6531 } 6532 break; 6533 default: 6534 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6535 "3193 Unrecognized SLI event, type: 0x%x", 6536 evt_type); 6537 break; 6538 } 6539 } 6540 6541 /** 6542 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6543 * @vport: pointer to vport data structure. 6544 * 6545 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6546 * response to a CVL event. 6547 * 6548 * Return the pointer to the ndlp with the vport if successful, otherwise 6549 * return NULL. 6550 **/ 6551 static struct lpfc_nodelist * 6552 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6553 { 6554 struct lpfc_nodelist *ndlp; 6555 struct Scsi_Host *shost; 6556 struct lpfc_hba *phba; 6557 6558 if (!vport) 6559 return NULL; 6560 phba = vport->phba; 6561 if (!phba) 6562 return NULL; 6563 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6564 if (!ndlp) { 6565 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6566 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6567 if (!ndlp) 6568 return NULL; 6569 /* Set the node type */ 6570 ndlp->nlp_type |= NLP_FABRIC; 6571 /* Put ndlp onto node list */ 6572 lpfc_enqueue_node(vport, ndlp); 6573 } 6574 if ((phba->pport->port_state < LPFC_FLOGI) && 6575 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6576 return NULL; 6577 /* If virtual link is not yet instantiated ignore CVL */ 6578 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6579 && (vport->port_state != LPFC_VPORT_FAILED)) 6580 return NULL; 6581 shost = lpfc_shost_from_vport(vport); 6582 if (!shost) 6583 return NULL; 6584 lpfc_linkdown_port(vport); 6585 lpfc_cleanup_pending_mbox(vport); 6586 spin_lock_irq(shost->host_lock); 6587 vport->fc_flag |= FC_VPORT_CVL_RCVD; 6588 spin_unlock_irq(shost->host_lock); 6589 6590 return ndlp; 6591 } 6592 6593 /** 6594 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6595 * @phba: pointer to lpfc hba data structure. 6596 * 6597 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6598 * response to a FCF dead event. 6599 **/ 6600 static void 6601 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6602 { 6603 struct lpfc_vport **vports; 6604 int i; 6605 6606 vports = lpfc_create_vport_work_array(phba); 6607 if (vports) 6608 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6609 lpfc_sli4_perform_vport_cvl(vports[i]); 6610 lpfc_destroy_vport_work_array(phba, vports); 6611 } 6612 6613 /** 6614 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6615 * @phba: pointer to lpfc hba data structure. 6616 * @acqe_fip: pointer to the async fcoe completion queue entry. 6617 * 6618 * This routine is to handle the SLI4 asynchronous fcoe event. 6619 **/ 6620 static void 6621 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6622 struct lpfc_acqe_fip *acqe_fip) 6623 { 6624 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6625 int rc; 6626 struct lpfc_vport *vport; 6627 struct lpfc_nodelist *ndlp; 6628 int active_vlink_present; 6629 struct lpfc_vport **vports; 6630 int i; 6631 6632 phba->fc_eventTag = acqe_fip->event_tag; 6633 phba->fcoe_eventtag = acqe_fip->event_tag; 6634 switch (event_type) { 6635 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6636 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6637 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6639 "2546 New FCF event, evt_tag:x%x, " 6640 "index:x%x\n", 6641 acqe_fip->event_tag, 6642 acqe_fip->index); 6643 else 6644 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6645 LOG_DISCOVERY, 6646 "2788 FCF param modified event, " 6647 "evt_tag:x%x, index:x%x\n", 6648 acqe_fip->event_tag, 6649 acqe_fip->index); 6650 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6651 /* 6652 * During period of FCF discovery, read the FCF 6653 * table record indexed by the event to update 6654 * FCF roundrobin failover eligible FCF bmask. 6655 */ 6656 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6657 LOG_DISCOVERY, 6658 "2779 Read FCF (x%x) for updating " 6659 "roundrobin FCF failover bmask\n", 6660 acqe_fip->index); 6661 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6662 } 6663 6664 /* If the FCF discovery is in progress, do nothing. */ 6665 spin_lock_irq(&phba->hbalock); 6666 if (phba->hba_flag & FCF_TS_INPROG) { 6667 spin_unlock_irq(&phba->hbalock); 6668 break; 6669 } 6670 /* If fast FCF failover rescan event is pending, do nothing */ 6671 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6672 spin_unlock_irq(&phba->hbalock); 6673 break; 6674 } 6675 6676 /* If the FCF has been in discovered state, do nothing. */ 6677 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6678 spin_unlock_irq(&phba->hbalock); 6679 break; 6680 } 6681 spin_unlock_irq(&phba->hbalock); 6682 6683 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6684 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6685 "2770 Start FCF table scan per async FCF " 6686 "event, evt_tag:x%x, index:x%x\n", 6687 acqe_fip->event_tag, acqe_fip->index); 6688 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6689 LPFC_FCOE_FCF_GET_FIRST); 6690 if (rc) 6691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6692 "2547 Issue FCF scan read FCF mailbox " 6693 "command failed (x%x)\n", rc); 6694 break; 6695 6696 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6697 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6698 "2548 FCF Table full count 0x%x tag 0x%x\n", 6699 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6700 acqe_fip->event_tag); 6701 break; 6702 6703 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6704 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6705 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6706 "2549 FCF (x%x) disconnected from network, " 6707 "tag:x%x\n", acqe_fip->index, 6708 acqe_fip->event_tag); 6709 /* 6710 * If we are in the middle of FCF failover process, clear 6711 * the corresponding FCF bit in the roundrobin bitmap. 6712 */ 6713 spin_lock_irq(&phba->hbalock); 6714 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6715 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6716 spin_unlock_irq(&phba->hbalock); 6717 /* Update FLOGI FCF failover eligible FCF bmask */ 6718 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6719 break; 6720 } 6721 spin_unlock_irq(&phba->hbalock); 6722 6723 /* If the event is not for currently used fcf do nothing */ 6724 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6725 break; 6726 6727 /* 6728 * Otherwise, request the port to rediscover the entire FCF 6729 * table for a fast recovery from case that the current FCF 6730 * is no longer valid as we are not in the middle of FCF 6731 * failover process already. 6732 */ 6733 spin_lock_irq(&phba->hbalock); 6734 /* Mark the fast failover process in progress */ 6735 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6736 spin_unlock_irq(&phba->hbalock); 6737 6738 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6739 "2771 Start FCF fast failover process due to " 6740 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6741 "\n", acqe_fip->event_tag, acqe_fip->index); 6742 rc = lpfc_sli4_redisc_fcf_table(phba); 6743 if (rc) { 6744 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6745 LOG_TRACE_EVENT, 6746 "2772 Issue FCF rediscover mailbox " 6747 "command failed, fail through to FCF " 6748 "dead event\n"); 6749 spin_lock_irq(&phba->hbalock); 6750 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6751 spin_unlock_irq(&phba->hbalock); 6752 /* 6753 * Last resort will fail over by treating this 6754 * as a link down to FCF registration. 6755 */ 6756 lpfc_sli4_fcf_dead_failthrough(phba); 6757 } else { 6758 /* Reset FCF roundrobin bmask for new discovery */ 6759 lpfc_sli4_clear_fcf_rr_bmask(phba); 6760 /* 6761 * Handling fast FCF failover to a DEAD FCF event is 6762 * considered equalivant to receiving CVL to all vports. 6763 */ 6764 lpfc_sli4_perform_all_vport_cvl(phba); 6765 } 6766 break; 6767 case LPFC_FIP_EVENT_TYPE_CVL: 6768 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6769 lpfc_printf_log(phba, KERN_ERR, 6770 LOG_TRACE_EVENT, 6771 "2718 Clear Virtual Link Received for VPI 0x%x" 6772 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6773 6774 vport = lpfc_find_vport_by_vpid(phba, 6775 acqe_fip->index); 6776 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6777 if (!ndlp) 6778 break; 6779 active_vlink_present = 0; 6780 6781 vports = lpfc_create_vport_work_array(phba); 6782 if (vports) { 6783 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6784 i++) { 6785 if ((!(vports[i]->fc_flag & 6786 FC_VPORT_CVL_RCVD)) && 6787 (vports[i]->port_state > LPFC_FDISC)) { 6788 active_vlink_present = 1; 6789 break; 6790 } 6791 } 6792 lpfc_destroy_vport_work_array(phba, vports); 6793 } 6794 6795 /* 6796 * Don't re-instantiate if vport is marked for deletion. 6797 * If we are here first then vport_delete is going to wait 6798 * for discovery to complete. 6799 */ 6800 if (!(vport->load_flag & FC_UNLOADING) && 6801 active_vlink_present) { 6802 /* 6803 * If there are other active VLinks present, 6804 * re-instantiate the Vlink using FDISC. 6805 */ 6806 mod_timer(&ndlp->nlp_delayfunc, 6807 jiffies + msecs_to_jiffies(1000)); 6808 spin_lock_irq(&ndlp->lock); 6809 ndlp->nlp_flag |= NLP_DELAY_TMO; 6810 spin_unlock_irq(&ndlp->lock); 6811 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6812 vport->port_state = LPFC_FDISC; 6813 } else { 6814 /* 6815 * Otherwise, we request port to rediscover 6816 * the entire FCF table for a fast recovery 6817 * from possible case that the current FCF 6818 * is no longer valid if we are not already 6819 * in the FCF failover process. 6820 */ 6821 spin_lock_irq(&phba->hbalock); 6822 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6823 spin_unlock_irq(&phba->hbalock); 6824 break; 6825 } 6826 /* Mark the fast failover process in progress */ 6827 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6828 spin_unlock_irq(&phba->hbalock); 6829 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6830 LOG_DISCOVERY, 6831 "2773 Start FCF failover per CVL, " 6832 "evt_tag:x%x\n", acqe_fip->event_tag); 6833 rc = lpfc_sli4_redisc_fcf_table(phba); 6834 if (rc) { 6835 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6836 LOG_TRACE_EVENT, 6837 "2774 Issue FCF rediscover " 6838 "mailbox command failed, " 6839 "through to CVL event\n"); 6840 spin_lock_irq(&phba->hbalock); 6841 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6842 spin_unlock_irq(&phba->hbalock); 6843 /* 6844 * Last resort will be re-try on the 6845 * the current registered FCF entry. 6846 */ 6847 lpfc_retry_pport_discovery(phba); 6848 } else 6849 /* 6850 * Reset FCF roundrobin bmask for new 6851 * discovery. 6852 */ 6853 lpfc_sli4_clear_fcf_rr_bmask(phba); 6854 } 6855 break; 6856 default: 6857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6858 "0288 Unknown FCoE event type 0x%x event tag " 6859 "0x%x\n", event_type, acqe_fip->event_tag); 6860 break; 6861 } 6862 } 6863 6864 /** 6865 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6866 * @phba: pointer to lpfc hba data structure. 6867 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6868 * 6869 * This routine is to handle the SLI4 asynchronous dcbx event. 6870 **/ 6871 static void 6872 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6873 struct lpfc_acqe_dcbx *acqe_dcbx) 6874 { 6875 phba->fc_eventTag = acqe_dcbx->event_tag; 6876 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6877 "0290 The SLI4 DCBX asynchronous event is not " 6878 "handled yet\n"); 6879 } 6880 6881 /** 6882 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6883 * @phba: pointer to lpfc hba data structure. 6884 * @acqe_grp5: pointer to the async grp5 completion queue entry. 6885 * 6886 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 6887 * is an asynchronous notified of a logical link speed change. The Port 6888 * reports the logical link speed in units of 10Mbps. 6889 **/ 6890 static void 6891 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 6892 struct lpfc_acqe_grp5 *acqe_grp5) 6893 { 6894 uint16_t prev_ll_spd; 6895 6896 phba->fc_eventTag = acqe_grp5->event_tag; 6897 phba->fcoe_eventtag = acqe_grp5->event_tag; 6898 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 6899 phba->sli4_hba.link_state.logical_speed = 6900 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 6901 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6902 "2789 GRP5 Async Event: Updating logical link speed " 6903 "from %dMbps to %dMbps\n", prev_ll_spd, 6904 phba->sli4_hba.link_state.logical_speed); 6905 } 6906 6907 /** 6908 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 6909 * @phba: pointer to lpfc hba data structure. 6910 * 6911 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 6912 * is an asynchronous notification of a request to reset CM stats. 6913 **/ 6914 static void 6915 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 6916 { 6917 if (!phba->cgn_i) 6918 return; 6919 lpfc_init_congestion_stat(phba); 6920 } 6921 6922 /** 6923 * lpfc_cgn_params_val - Validate FW congestion parameters. 6924 * @phba: pointer to lpfc hba data structure. 6925 * @p_cfg_param: pointer to FW provided congestion parameters. 6926 * 6927 * This routine validates the congestion parameters passed 6928 * by the FW to the driver via an ACQE event. 6929 **/ 6930 static void 6931 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 6932 { 6933 spin_lock_irq(&phba->hbalock); 6934 6935 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 6936 LPFC_CFG_MONITOR)) { 6937 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 6938 "6225 CMF mode param out of range: %d\n", 6939 p_cfg_param->cgn_param_mode); 6940 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 6941 } 6942 6943 spin_unlock_irq(&phba->hbalock); 6944 } 6945 6946 /** 6947 * lpfc_cgn_params_parse - Process a FW cong parm change event 6948 * @phba: pointer to lpfc hba data structure. 6949 * @p_cgn_param: pointer to a data buffer with the FW cong params. 6950 * @len: the size of pdata in bytes. 6951 * 6952 * This routine validates the congestion management buffer signature 6953 * from the FW, validates the contents and makes corrections for 6954 * valid, in-range values. If the signature magic is correct and 6955 * after parameter validation, the contents are copied to the driver's 6956 * @phba structure. If the magic is incorrect, an error message is 6957 * logged. 6958 **/ 6959 static void 6960 lpfc_cgn_params_parse(struct lpfc_hba *phba, 6961 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 6962 { 6963 struct lpfc_cgn_info *cp; 6964 uint32_t crc, oldmode; 6965 6966 /* Make sure the FW has encoded the correct magic number to 6967 * validate the congestion parameter in FW memory. 6968 */ 6969 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 6970 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 6971 "4668 FW cgn parm buffer data: " 6972 "magic 0x%x version %d mode %d " 6973 "level0 %d level1 %d " 6974 "level2 %d byte13 %d " 6975 "byte14 %d byte15 %d " 6976 "byte11 %d byte12 %d activeMode %d\n", 6977 p_cgn_param->cgn_param_magic, 6978 p_cgn_param->cgn_param_version, 6979 p_cgn_param->cgn_param_mode, 6980 p_cgn_param->cgn_param_level0, 6981 p_cgn_param->cgn_param_level1, 6982 p_cgn_param->cgn_param_level2, 6983 p_cgn_param->byte13, 6984 p_cgn_param->byte14, 6985 p_cgn_param->byte15, 6986 p_cgn_param->byte11, 6987 p_cgn_param->byte12, 6988 phba->cmf_active_mode); 6989 6990 oldmode = phba->cmf_active_mode; 6991 6992 /* Any parameters out of range are corrected to defaults 6993 * by this routine. No need to fail. 6994 */ 6995 lpfc_cgn_params_val(phba, p_cgn_param); 6996 6997 /* Parameters are verified, move them into driver storage */ 6998 spin_lock_irq(&phba->hbalock); 6999 memcpy(&phba->cgn_p, p_cgn_param, 7000 sizeof(struct lpfc_cgn_param)); 7001 7002 /* Update parameters in congestion info buffer now */ 7003 if (phba->cgn_i) { 7004 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7005 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7006 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7007 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7008 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7009 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7010 LPFC_CGN_CRC32_SEED); 7011 cp->cgn_info_crc = cpu_to_le32(crc); 7012 } 7013 spin_unlock_irq(&phba->hbalock); 7014 7015 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7016 7017 switch (oldmode) { 7018 case LPFC_CFG_OFF: 7019 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7020 /* Turning CMF on */ 7021 lpfc_cmf_start(phba); 7022 7023 if (phba->link_state >= LPFC_LINK_UP) { 7024 phba->cgn_reg_fpin = 7025 phba->cgn_init_reg_fpin; 7026 phba->cgn_reg_signal = 7027 phba->cgn_init_reg_signal; 7028 lpfc_issue_els_edc(phba->pport, 0); 7029 } 7030 } 7031 break; 7032 case LPFC_CFG_MANAGED: 7033 switch (phba->cgn_p.cgn_param_mode) { 7034 case LPFC_CFG_OFF: 7035 /* Turning CMF off */ 7036 lpfc_cmf_stop(phba); 7037 if (phba->link_state >= LPFC_LINK_UP) 7038 lpfc_issue_els_edc(phba->pport, 0); 7039 break; 7040 case LPFC_CFG_MONITOR: 7041 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7042 "4661 Switch from MANAGED to " 7043 "`MONITOR mode\n"); 7044 phba->cmf_max_bytes_per_interval = 7045 phba->cmf_link_byte_count; 7046 7047 /* Resume blocked IO - unblock on workqueue */ 7048 queue_work(phba->wq, 7049 &phba->unblock_request_work); 7050 break; 7051 } 7052 break; 7053 case LPFC_CFG_MONITOR: 7054 switch (phba->cgn_p.cgn_param_mode) { 7055 case LPFC_CFG_OFF: 7056 /* Turning CMF off */ 7057 lpfc_cmf_stop(phba); 7058 if (phba->link_state >= LPFC_LINK_UP) 7059 lpfc_issue_els_edc(phba->pport, 0); 7060 break; 7061 case LPFC_CFG_MANAGED: 7062 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7063 "4662 Switch from MONITOR to " 7064 "MANAGED mode\n"); 7065 lpfc_cmf_signal_init(phba); 7066 break; 7067 } 7068 break; 7069 } 7070 } else { 7071 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7072 "4669 FW cgn parm buf wrong magic 0x%x " 7073 "version %d\n", p_cgn_param->cgn_param_magic, 7074 p_cgn_param->cgn_param_version); 7075 } 7076 } 7077 7078 /** 7079 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7080 * @phba: pointer to lpfc hba data structure. 7081 * 7082 * This routine issues a read_object mailbox command to 7083 * get the congestion management parameters from the FW 7084 * parses it and updates the driver maintained values. 7085 * 7086 * Returns 7087 * 0 if the object was empty 7088 * -Eval if an error was encountered 7089 * Count if bytes were read from object 7090 **/ 7091 int 7092 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7093 { 7094 int ret = 0; 7095 struct lpfc_cgn_param *p_cgn_param = NULL; 7096 u32 *pdata = NULL; 7097 u32 len = 0; 7098 7099 /* Find out if the FW has a new set of congestion parameters. */ 7100 len = sizeof(struct lpfc_cgn_param); 7101 pdata = kzalloc(len, GFP_KERNEL); 7102 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7103 pdata, len); 7104 7105 /* 0 means no data. A negative means error. A positive means 7106 * bytes were copied. 7107 */ 7108 if (!ret) { 7109 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7110 "4670 CGN RD OBJ returns no data\n"); 7111 goto rd_obj_err; 7112 } else if (ret < 0) { 7113 /* Some error. Just exit and return it to the caller.*/ 7114 goto rd_obj_err; 7115 } 7116 7117 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7118 "6234 READ CGN PARAMS Successful %d\n", len); 7119 7120 /* Parse data pointer over len and update the phba congestion 7121 * parameters with values passed back. The receive rate values 7122 * may have been altered in FW, but take no action here. 7123 */ 7124 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7125 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7126 7127 rd_obj_err: 7128 kfree(pdata); 7129 return ret; 7130 } 7131 7132 /** 7133 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7134 * @phba: pointer to lpfc hba data structure. 7135 * 7136 * The FW generated Async ACQE SLI event calls this routine when 7137 * the event type is an SLI Internal Port Event and the Event Code 7138 * indicates a change to the FW maintained congestion parameters. 7139 * 7140 * This routine executes a Read_Object mailbox call to obtain the 7141 * current congestion parameters maintained in FW and corrects 7142 * the driver's active congestion parameters. 7143 * 7144 * The acqe event is not passed because there is no further data 7145 * required. 7146 * 7147 * Returns nonzero error if event processing encountered an error. 7148 * Zero otherwise for success. 7149 **/ 7150 static int 7151 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7152 { 7153 int ret = 0; 7154 7155 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7156 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7157 "4664 Cgn Evt when E2E off. Drop event\n"); 7158 return -EACCES; 7159 } 7160 7161 /* If the event is claiming an empty object, it's ok. A write 7162 * could have cleared it. Only error is a negative return 7163 * status. 7164 */ 7165 ret = lpfc_sli4_cgn_params_read(phba); 7166 if (ret < 0) { 7167 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7168 "4667 Error reading Cgn Params (%d)\n", 7169 ret); 7170 } else if (!ret) { 7171 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7172 "4673 CGN Event empty object.\n"); 7173 } 7174 return ret; 7175 } 7176 7177 /** 7178 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7179 * @phba: pointer to lpfc hba data structure. 7180 * 7181 * This routine is invoked by the worker thread to process all the pending 7182 * SLI4 asynchronous events. 7183 **/ 7184 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7185 { 7186 struct lpfc_cq_event *cq_event; 7187 unsigned long iflags; 7188 7189 /* First, declare the async event has been handled */ 7190 spin_lock_irqsave(&phba->hbalock, iflags); 7191 phba->hba_flag &= ~ASYNC_EVENT; 7192 spin_unlock_irqrestore(&phba->hbalock, iflags); 7193 7194 /* Now, handle all the async events */ 7195 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7196 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7197 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7198 cq_event, struct lpfc_cq_event, list); 7199 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7200 iflags); 7201 7202 /* Process the asynchronous event */ 7203 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7204 case LPFC_TRAILER_CODE_LINK: 7205 lpfc_sli4_async_link_evt(phba, 7206 &cq_event->cqe.acqe_link); 7207 break; 7208 case LPFC_TRAILER_CODE_FCOE: 7209 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7210 break; 7211 case LPFC_TRAILER_CODE_DCBX: 7212 lpfc_sli4_async_dcbx_evt(phba, 7213 &cq_event->cqe.acqe_dcbx); 7214 break; 7215 case LPFC_TRAILER_CODE_GRP5: 7216 lpfc_sli4_async_grp5_evt(phba, 7217 &cq_event->cqe.acqe_grp5); 7218 break; 7219 case LPFC_TRAILER_CODE_FC: 7220 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7221 break; 7222 case LPFC_TRAILER_CODE_SLI: 7223 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7224 break; 7225 case LPFC_TRAILER_CODE_CMSTAT: 7226 lpfc_sli4_async_cmstat_evt(phba); 7227 break; 7228 default: 7229 lpfc_printf_log(phba, KERN_ERR, 7230 LOG_TRACE_EVENT, 7231 "1804 Invalid asynchronous event code: " 7232 "x%x\n", bf_get(lpfc_trailer_code, 7233 &cq_event->cqe.mcqe_cmpl)); 7234 break; 7235 } 7236 7237 /* Free the completion event processed to the free pool */ 7238 lpfc_sli4_cq_event_release(phba, cq_event); 7239 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7240 } 7241 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7242 } 7243 7244 /** 7245 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7246 * @phba: pointer to lpfc hba data structure. 7247 * 7248 * This routine is invoked by the worker thread to process FCF table 7249 * rediscovery pending completion event. 7250 **/ 7251 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7252 { 7253 int rc; 7254 7255 spin_lock_irq(&phba->hbalock); 7256 /* Clear FCF rediscovery timeout event */ 7257 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7258 /* Clear driver fast failover FCF record flag */ 7259 phba->fcf.failover_rec.flag = 0; 7260 /* Set state for FCF fast failover */ 7261 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7262 spin_unlock_irq(&phba->hbalock); 7263 7264 /* Scan FCF table from the first entry to re-discover SAN */ 7265 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7266 "2777 Start post-quiescent FCF table scan\n"); 7267 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7268 if (rc) 7269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7270 "2747 Issue FCF scan read FCF mailbox " 7271 "command failed 0x%x\n", rc); 7272 } 7273 7274 /** 7275 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7276 * @phba: pointer to lpfc hba data structure. 7277 * @dev_grp: The HBA PCI-Device group number. 7278 * 7279 * This routine is invoked to set up the per HBA PCI-Device group function 7280 * API jump table entries. 7281 * 7282 * Return: 0 if success, otherwise -ENODEV 7283 **/ 7284 int 7285 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7286 { 7287 int rc; 7288 7289 /* Set up lpfc PCI-device group */ 7290 phba->pci_dev_grp = dev_grp; 7291 7292 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7293 if (dev_grp == LPFC_PCI_DEV_OC) 7294 phba->sli_rev = LPFC_SLI_REV4; 7295 7296 /* Set up device INIT API function jump table */ 7297 rc = lpfc_init_api_table_setup(phba, dev_grp); 7298 if (rc) 7299 return -ENODEV; 7300 /* Set up SCSI API function jump table */ 7301 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7302 if (rc) 7303 return -ENODEV; 7304 /* Set up SLI API function jump table */ 7305 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7306 if (rc) 7307 return -ENODEV; 7308 /* Set up MBOX API function jump table */ 7309 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7310 if (rc) 7311 return -ENODEV; 7312 7313 return 0; 7314 } 7315 7316 /** 7317 * lpfc_log_intr_mode - Log the active interrupt mode 7318 * @phba: pointer to lpfc hba data structure. 7319 * @intr_mode: active interrupt mode adopted. 7320 * 7321 * This routine it invoked to log the currently used active interrupt mode 7322 * to the device. 7323 **/ 7324 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7325 { 7326 switch (intr_mode) { 7327 case 0: 7328 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7329 "0470 Enable INTx interrupt mode.\n"); 7330 break; 7331 case 1: 7332 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7333 "0481 Enabled MSI interrupt mode.\n"); 7334 break; 7335 case 2: 7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7337 "0480 Enabled MSI-X interrupt mode.\n"); 7338 break; 7339 default: 7340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7341 "0482 Illegal interrupt mode.\n"); 7342 break; 7343 } 7344 return; 7345 } 7346 7347 /** 7348 * lpfc_enable_pci_dev - Enable a generic PCI device. 7349 * @phba: pointer to lpfc hba data structure. 7350 * 7351 * This routine is invoked to enable the PCI device that is common to all 7352 * PCI devices. 7353 * 7354 * Return codes 7355 * 0 - successful 7356 * other values - error 7357 **/ 7358 static int 7359 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7360 { 7361 struct pci_dev *pdev; 7362 7363 /* Obtain PCI device reference */ 7364 if (!phba->pcidev) 7365 goto out_error; 7366 else 7367 pdev = phba->pcidev; 7368 /* Enable PCI device */ 7369 if (pci_enable_device_mem(pdev)) 7370 goto out_error; 7371 /* Request PCI resource for the device */ 7372 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7373 goto out_disable_device; 7374 /* Set up device as PCI master and save state for EEH */ 7375 pci_set_master(pdev); 7376 pci_try_set_mwi(pdev); 7377 pci_save_state(pdev); 7378 7379 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7380 if (pci_is_pcie(pdev)) 7381 pdev->needs_freset = 1; 7382 7383 return 0; 7384 7385 out_disable_device: 7386 pci_disable_device(pdev); 7387 out_error: 7388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7389 "1401 Failed to enable pci device\n"); 7390 return -ENODEV; 7391 } 7392 7393 /** 7394 * lpfc_disable_pci_dev - Disable a generic PCI device. 7395 * @phba: pointer to lpfc hba data structure. 7396 * 7397 * This routine is invoked to disable the PCI device that is common to all 7398 * PCI devices. 7399 **/ 7400 static void 7401 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7402 { 7403 struct pci_dev *pdev; 7404 7405 /* Obtain PCI device reference */ 7406 if (!phba->pcidev) 7407 return; 7408 else 7409 pdev = phba->pcidev; 7410 /* Release PCI resource and disable PCI device */ 7411 pci_release_mem_regions(pdev); 7412 pci_disable_device(pdev); 7413 7414 return; 7415 } 7416 7417 /** 7418 * lpfc_reset_hba - Reset a hba 7419 * @phba: pointer to lpfc hba data structure. 7420 * 7421 * This routine is invoked to reset a hba device. It brings the HBA 7422 * offline, performs a board restart, and then brings the board back 7423 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7424 * on outstanding mailbox commands. 7425 **/ 7426 void 7427 lpfc_reset_hba(struct lpfc_hba *phba) 7428 { 7429 /* If resets are disabled then set error state and return. */ 7430 if (!phba->cfg_enable_hba_reset) { 7431 phba->link_state = LPFC_HBA_ERROR; 7432 return; 7433 } 7434 7435 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7436 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7437 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7438 } else { 7439 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7440 lpfc_sli_flush_io_rings(phba); 7441 } 7442 lpfc_offline(phba); 7443 lpfc_sli_brdrestart(phba); 7444 lpfc_online(phba); 7445 lpfc_unblock_mgmt_io(phba); 7446 } 7447 7448 /** 7449 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7450 * @phba: pointer to lpfc hba data structure. 7451 * 7452 * This function enables the PCI SR-IOV virtual functions to a physical 7453 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7454 * enable the number of virtual functions to the physical function. As 7455 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7456 * API call does not considered as an error condition for most of the device. 7457 **/ 7458 uint16_t 7459 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7460 { 7461 struct pci_dev *pdev = phba->pcidev; 7462 uint16_t nr_virtfn; 7463 int pos; 7464 7465 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7466 if (pos == 0) 7467 return 0; 7468 7469 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7470 return nr_virtfn; 7471 } 7472 7473 /** 7474 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7475 * @phba: pointer to lpfc hba data structure. 7476 * @nr_vfn: number of virtual functions to be enabled. 7477 * 7478 * This function enables the PCI SR-IOV virtual functions to a physical 7479 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7480 * enable the number of virtual functions to the physical function. As 7481 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7482 * API call does not considered as an error condition for most of the device. 7483 **/ 7484 int 7485 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7486 { 7487 struct pci_dev *pdev = phba->pcidev; 7488 uint16_t max_nr_vfn; 7489 int rc; 7490 7491 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7492 if (nr_vfn > max_nr_vfn) { 7493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7494 "3057 Requested vfs (%d) greater than " 7495 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7496 return -EINVAL; 7497 } 7498 7499 rc = pci_enable_sriov(pdev, nr_vfn); 7500 if (rc) { 7501 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7502 "2806 Failed to enable sriov on this device " 7503 "with vfn number nr_vf:%d, rc:%d\n", 7504 nr_vfn, rc); 7505 } else 7506 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7507 "2807 Successful enable sriov on this device " 7508 "with vfn number nr_vf:%d\n", nr_vfn); 7509 return rc; 7510 } 7511 7512 static void 7513 lpfc_unblock_requests_work(struct work_struct *work) 7514 { 7515 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7516 unblock_request_work); 7517 7518 lpfc_unblock_requests(phba); 7519 } 7520 7521 /** 7522 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7523 * @phba: pointer to lpfc hba data structure. 7524 * 7525 * This routine is invoked to set up the driver internal resources before the 7526 * device specific resource setup to support the HBA device it attached to. 7527 * 7528 * Return codes 7529 * 0 - successful 7530 * other values - error 7531 **/ 7532 static int 7533 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7534 { 7535 struct lpfc_sli *psli = &phba->sli; 7536 7537 /* 7538 * Driver resources common to all SLI revisions 7539 */ 7540 atomic_set(&phba->fast_event_count, 0); 7541 atomic_set(&phba->dbg_log_idx, 0); 7542 atomic_set(&phba->dbg_log_cnt, 0); 7543 atomic_set(&phba->dbg_log_dmping, 0); 7544 spin_lock_init(&phba->hbalock); 7545 7546 /* Initialize port_list spinlock */ 7547 spin_lock_init(&phba->port_list_lock); 7548 INIT_LIST_HEAD(&phba->port_list); 7549 7550 INIT_LIST_HEAD(&phba->work_list); 7551 init_waitqueue_head(&phba->wait_4_mlo_m_q); 7552 7553 /* Initialize the wait queue head for the kernel thread */ 7554 init_waitqueue_head(&phba->work_waitq); 7555 7556 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7557 "1403 Protocols supported %s %s %s\n", 7558 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7559 "SCSI" : " "), 7560 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7561 "NVME" : " "), 7562 (phba->nvmet_support ? "NVMET" : " ")); 7563 7564 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7565 spin_lock_init(&phba->scsi_buf_list_get_lock); 7566 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7567 spin_lock_init(&phba->scsi_buf_list_put_lock); 7568 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7569 7570 /* Initialize the fabric iocb list */ 7571 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7572 7573 /* Initialize list to save ELS buffers */ 7574 INIT_LIST_HEAD(&phba->elsbuf); 7575 7576 /* Initialize FCF connection rec list */ 7577 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7578 7579 /* Initialize OAS configuration list */ 7580 spin_lock_init(&phba->devicelock); 7581 INIT_LIST_HEAD(&phba->luns); 7582 7583 /* MBOX heartbeat timer */ 7584 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7585 /* Fabric block timer */ 7586 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7587 /* EA polling mode timer */ 7588 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7589 /* Heartbeat timer */ 7590 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7591 7592 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7593 7594 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7595 lpfc_idle_stat_delay_work); 7596 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7597 return 0; 7598 } 7599 7600 /** 7601 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7602 * @phba: pointer to lpfc hba data structure. 7603 * 7604 * This routine is invoked to set up the driver internal resources specific to 7605 * support the SLI-3 HBA device it attached to. 7606 * 7607 * Return codes 7608 * 0 - successful 7609 * other values - error 7610 **/ 7611 static int 7612 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7613 { 7614 int rc, entry_sz; 7615 7616 /* 7617 * Initialize timers used by driver 7618 */ 7619 7620 /* FCP polling mode timer */ 7621 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7622 7623 /* Host attention work mask setup */ 7624 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7625 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7626 7627 /* Get all the module params for configuring this host */ 7628 lpfc_get_cfgparam(phba); 7629 /* Set up phase-1 common device driver resources */ 7630 7631 rc = lpfc_setup_driver_resource_phase1(phba); 7632 if (rc) 7633 return -ENODEV; 7634 7635 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 7636 phba->menlo_flag |= HBA_MENLO_SUPPORT; 7637 /* check for menlo minimum sg count */ 7638 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 7639 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 7640 } 7641 7642 if (!phba->sli.sli3_ring) 7643 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7644 sizeof(struct lpfc_sli_ring), 7645 GFP_KERNEL); 7646 if (!phba->sli.sli3_ring) 7647 return -ENOMEM; 7648 7649 /* 7650 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7651 * used to create the sg_dma_buf_pool must be dynamically calculated. 7652 */ 7653 7654 if (phba->sli_rev == LPFC_SLI_REV4) 7655 entry_sz = sizeof(struct sli4_sge); 7656 else 7657 entry_sz = sizeof(struct ulp_bde64); 7658 7659 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7660 if (phba->cfg_enable_bg) { 7661 /* 7662 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7663 * the FCP rsp, and a BDE for each. Sice we have no control 7664 * over how many protection data segments the SCSI Layer 7665 * will hand us (ie: there could be one for every block 7666 * in the IO), we just allocate enough BDEs to accomidate 7667 * our max amount and we need to limit lpfc_sg_seg_cnt to 7668 * minimize the risk of running out. 7669 */ 7670 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7671 sizeof(struct fcp_rsp) + 7672 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7673 7674 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7675 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7676 7677 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7678 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7679 } else { 7680 /* 7681 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7682 * the FCP rsp, a BDE for each, and a BDE for up to 7683 * cfg_sg_seg_cnt data segments. 7684 */ 7685 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7686 sizeof(struct fcp_rsp) + 7687 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7688 7689 /* Total BDEs in BPL for scsi_sg_list */ 7690 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7691 } 7692 7693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7694 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7695 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7696 phba->cfg_total_seg_cnt); 7697 7698 phba->max_vpi = LPFC_MAX_VPI; 7699 /* This will be set to correct value after config_port mbox */ 7700 phba->max_vports = 0; 7701 7702 /* 7703 * Initialize the SLI Layer to run with lpfc HBAs. 7704 */ 7705 lpfc_sli_setup(phba); 7706 lpfc_sli_queue_init(phba); 7707 7708 /* Allocate device driver memory */ 7709 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7710 return -ENOMEM; 7711 7712 phba->lpfc_sg_dma_buf_pool = 7713 dma_pool_create("lpfc_sg_dma_buf_pool", 7714 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7715 BPL_ALIGN_SZ, 0); 7716 7717 if (!phba->lpfc_sg_dma_buf_pool) 7718 goto fail_free_mem; 7719 7720 phba->lpfc_cmd_rsp_buf_pool = 7721 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7722 &phba->pcidev->dev, 7723 sizeof(struct fcp_cmnd) + 7724 sizeof(struct fcp_rsp), 7725 BPL_ALIGN_SZ, 0); 7726 7727 if (!phba->lpfc_cmd_rsp_buf_pool) 7728 goto fail_free_dma_buf_pool; 7729 7730 /* 7731 * Enable sr-iov virtual functions if supported and configured 7732 * through the module parameter. 7733 */ 7734 if (phba->cfg_sriov_nr_virtfn > 0) { 7735 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7736 phba->cfg_sriov_nr_virtfn); 7737 if (rc) { 7738 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7739 "2808 Requested number of SR-IOV " 7740 "virtual functions (%d) is not " 7741 "supported\n", 7742 phba->cfg_sriov_nr_virtfn); 7743 phba->cfg_sriov_nr_virtfn = 0; 7744 } 7745 } 7746 7747 return 0; 7748 7749 fail_free_dma_buf_pool: 7750 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7751 phba->lpfc_sg_dma_buf_pool = NULL; 7752 fail_free_mem: 7753 lpfc_mem_free(phba); 7754 return -ENOMEM; 7755 } 7756 7757 /** 7758 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7759 * @phba: pointer to lpfc hba data structure. 7760 * 7761 * This routine is invoked to unset the driver internal resources set up 7762 * specific for supporting the SLI-3 HBA device it attached to. 7763 **/ 7764 static void 7765 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7766 { 7767 /* Free device driver memory allocated */ 7768 lpfc_mem_free_all(phba); 7769 7770 return; 7771 } 7772 7773 /** 7774 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7775 * @phba: pointer to lpfc hba data structure. 7776 * 7777 * This routine is invoked to set up the driver internal resources specific to 7778 * support the SLI-4 HBA device it attached to. 7779 * 7780 * Return codes 7781 * 0 - successful 7782 * other values - error 7783 **/ 7784 static int 7785 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7786 { 7787 LPFC_MBOXQ_t *mboxq; 7788 MAILBOX_t *mb; 7789 int rc, i, max_buf_size; 7790 int longs; 7791 int extra; 7792 uint64_t wwn; 7793 u32 if_type; 7794 u32 if_fam; 7795 7796 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7797 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7798 phba->sli4_hba.curr_disp_cpu = 0; 7799 7800 /* Get all the module params for configuring this host */ 7801 lpfc_get_cfgparam(phba); 7802 7803 /* Set up phase-1 common device driver resources */ 7804 rc = lpfc_setup_driver_resource_phase1(phba); 7805 if (rc) 7806 return -ENODEV; 7807 7808 /* Before proceed, wait for POST done and device ready */ 7809 rc = lpfc_sli4_post_status_check(phba); 7810 if (rc) 7811 return -ENODEV; 7812 7813 /* Allocate all driver workqueues here */ 7814 7815 /* The lpfc_wq workqueue for deferred irq use */ 7816 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7817 7818 /* 7819 * Initialize timers used by driver 7820 */ 7821 7822 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7823 7824 /* FCF rediscover timer */ 7825 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7826 7827 /* CMF congestion timer */ 7828 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7829 phba->cmf_timer.function = lpfc_cmf_timer; 7830 7831 /* 7832 * Control structure for handling external multi-buffer mailbox 7833 * command pass-through. 7834 */ 7835 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7836 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7837 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7838 7839 phba->max_vpi = LPFC_MAX_VPI; 7840 7841 /* This will be set to correct value after the read_config mbox */ 7842 phba->max_vports = 0; 7843 7844 /* Program the default value of vlan_id and fc_map */ 7845 phba->valid_vlan = 0; 7846 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7847 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7848 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7849 7850 /* 7851 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7852 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7853 * The WQ create will allocate the ring. 7854 */ 7855 7856 /* Initialize buffer queue management fields */ 7857 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 7858 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 7859 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 7860 7861 /* for VMID idle timeout if VMID is enabled */ 7862 if (lpfc_is_vmid_enabled(phba)) 7863 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 7864 7865 /* 7866 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 7867 */ 7868 /* Initialize the Abort buffer list used by driver */ 7869 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 7870 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 7871 7872 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7873 /* Initialize the Abort nvme buffer list used by driver */ 7874 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 7875 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7876 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 7877 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 7878 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 7879 } 7880 7881 /* This abort list used by worker thread */ 7882 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 7883 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 7884 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 7885 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 7886 7887 /* 7888 * Initialize driver internal slow-path work queues 7889 */ 7890 7891 /* Driver internel slow-path CQ Event pool */ 7892 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 7893 /* Response IOCB work queue list */ 7894 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 7895 /* Asynchronous event CQ Event work queue list */ 7896 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 7897 /* Slow-path XRI aborted CQ Event work queue list */ 7898 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 7899 /* Receive queue CQ Event work queue list */ 7900 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 7901 7902 /* Initialize extent block lists. */ 7903 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 7904 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 7905 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 7906 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 7907 7908 /* Initialize mboxq lists. If the early init routines fail 7909 * these lists need to be correctly initialized. 7910 */ 7911 INIT_LIST_HEAD(&phba->sli.mboxq); 7912 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 7913 7914 /* initialize optic_state to 0xFF */ 7915 phba->sli4_hba.lnk_info.optic_state = 0xff; 7916 7917 /* Allocate device driver memory */ 7918 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 7919 if (rc) 7920 return -ENOMEM; 7921 7922 /* IF Type 2 ports get initialized now. */ 7923 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 7924 LPFC_SLI_INTF_IF_TYPE_2) { 7925 rc = lpfc_pci_function_reset(phba); 7926 if (unlikely(rc)) { 7927 rc = -ENODEV; 7928 goto out_free_mem; 7929 } 7930 phba->temp_sensor_support = 1; 7931 } 7932 7933 /* Create the bootstrap mailbox command */ 7934 rc = lpfc_create_bootstrap_mbox(phba); 7935 if (unlikely(rc)) 7936 goto out_free_mem; 7937 7938 /* Set up the host's endian order with the device. */ 7939 rc = lpfc_setup_endian_order(phba); 7940 if (unlikely(rc)) 7941 goto out_free_bsmbx; 7942 7943 /* Set up the hba's configuration parameters. */ 7944 rc = lpfc_sli4_read_config(phba); 7945 if (unlikely(rc)) 7946 goto out_free_bsmbx; 7947 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 7948 if (unlikely(rc)) 7949 goto out_free_bsmbx; 7950 7951 /* IF Type 0 ports get initialized now. */ 7952 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7953 LPFC_SLI_INTF_IF_TYPE_0) { 7954 rc = lpfc_pci_function_reset(phba); 7955 if (unlikely(rc)) 7956 goto out_free_bsmbx; 7957 } 7958 7959 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7960 GFP_KERNEL); 7961 if (!mboxq) { 7962 rc = -ENOMEM; 7963 goto out_free_bsmbx; 7964 } 7965 7966 /* Check for NVMET being configured */ 7967 phba->nvmet_support = 0; 7968 if (lpfc_enable_nvmet_cnt) { 7969 7970 /* First get WWN of HBA instance */ 7971 lpfc_read_nv(phba, mboxq); 7972 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7973 if (rc != MBX_SUCCESS) { 7974 lpfc_printf_log(phba, KERN_ERR, 7975 LOG_TRACE_EVENT, 7976 "6016 Mailbox failed , mbxCmd x%x " 7977 "READ_NV, mbxStatus x%x\n", 7978 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7979 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 7980 mempool_free(mboxq, phba->mbox_mem_pool); 7981 rc = -EIO; 7982 goto out_free_bsmbx; 7983 } 7984 mb = &mboxq->u.mb; 7985 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 7986 sizeof(uint64_t)); 7987 wwn = cpu_to_be64(wwn); 7988 phba->sli4_hba.wwnn.u.name = wwn; 7989 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 7990 sizeof(uint64_t)); 7991 /* wwn is WWPN of HBA instance */ 7992 wwn = cpu_to_be64(wwn); 7993 phba->sli4_hba.wwpn.u.name = wwn; 7994 7995 /* Check to see if it matches any module parameter */ 7996 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 7997 if (wwn == lpfc_enable_nvmet[i]) { 7998 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 7999 if (lpfc_nvmet_mem_alloc(phba)) 8000 break; 8001 8002 phba->nvmet_support = 1; /* a match */ 8003 8004 lpfc_printf_log(phba, KERN_ERR, 8005 LOG_TRACE_EVENT, 8006 "6017 NVME Target %016llx\n", 8007 wwn); 8008 #else 8009 lpfc_printf_log(phba, KERN_ERR, 8010 LOG_TRACE_EVENT, 8011 "6021 Can't enable NVME Target." 8012 " NVME_TARGET_FC infrastructure" 8013 " is not in kernel\n"); 8014 #endif 8015 /* Not supported for NVMET */ 8016 phba->cfg_xri_rebalancing = 0; 8017 if (phba->irq_chann_mode == NHT_MODE) { 8018 phba->cfg_irq_chann = 8019 phba->sli4_hba.num_present_cpu; 8020 phba->cfg_hdw_queue = 8021 phba->sli4_hba.num_present_cpu; 8022 phba->irq_chann_mode = NORMAL_MODE; 8023 } 8024 break; 8025 } 8026 } 8027 } 8028 8029 lpfc_nvme_mod_param_dep(phba); 8030 8031 /* 8032 * Get sli4 parameters that override parameters from Port capabilities. 8033 * If this call fails, it isn't critical unless the SLI4 parameters come 8034 * back in conflict. 8035 */ 8036 rc = lpfc_get_sli4_parameters(phba, mboxq); 8037 if (rc) { 8038 if_type = bf_get(lpfc_sli_intf_if_type, 8039 &phba->sli4_hba.sli_intf); 8040 if_fam = bf_get(lpfc_sli_intf_sli_family, 8041 &phba->sli4_hba.sli_intf); 8042 if (phba->sli4_hba.extents_in_use && 8043 phba->sli4_hba.rpi_hdrs_in_use) { 8044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8045 "2999 Unsupported SLI4 Parameters " 8046 "Extents and RPI headers enabled.\n"); 8047 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8048 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8049 mempool_free(mboxq, phba->mbox_mem_pool); 8050 rc = -EIO; 8051 goto out_free_bsmbx; 8052 } 8053 } 8054 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8055 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8056 mempool_free(mboxq, phba->mbox_mem_pool); 8057 rc = -EIO; 8058 goto out_free_bsmbx; 8059 } 8060 } 8061 8062 /* 8063 * 1 for cmd, 1 for rsp, NVME adds an extra one 8064 * for boundary conditions in its max_sgl_segment template. 8065 */ 8066 extra = 2; 8067 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8068 extra++; 8069 8070 /* 8071 * It doesn't matter what family our adapter is in, we are 8072 * limited to 2 Pages, 512 SGEs, for our SGL. 8073 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8074 */ 8075 max_buf_size = (2 * SLI4_PAGE_SIZE); 8076 8077 /* 8078 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8079 * used to create the sg_dma_buf_pool must be calculated. 8080 */ 8081 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8082 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8083 8084 /* 8085 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8086 * the FCP rsp, and a SGE. Sice we have no control 8087 * over how many protection segments the SCSI Layer 8088 * will hand us (ie: there could be one for every block 8089 * in the IO), just allocate enough SGEs to accomidate 8090 * our max amount and we need to limit lpfc_sg_seg_cnt 8091 * to minimize the risk of running out. 8092 */ 8093 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8094 sizeof(struct fcp_rsp) + max_buf_size; 8095 8096 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8097 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8098 8099 /* 8100 * If supporting DIF, reduce the seg count for scsi to 8101 * allow room for the DIF sges. 8102 */ 8103 if (phba->cfg_enable_bg && 8104 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8105 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8106 else 8107 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8108 8109 } else { 8110 /* 8111 * The scsi_buf for a regular I/O holds the FCP cmnd, 8112 * the FCP rsp, a SGE for each, and a SGE for up to 8113 * cfg_sg_seg_cnt data segments. 8114 */ 8115 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8116 sizeof(struct fcp_rsp) + 8117 ((phba->cfg_sg_seg_cnt + extra) * 8118 sizeof(struct sli4_sge)); 8119 8120 /* Total SGEs for scsi_sg_list */ 8121 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8122 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8123 8124 /* 8125 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8126 * need to post 1 page for the SGL. 8127 */ 8128 } 8129 8130 if (phba->cfg_xpsgl && !phba->nvmet_support) 8131 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8132 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8133 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8134 else 8135 phba->cfg_sg_dma_buf_size = 8136 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8137 8138 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8139 sizeof(struct sli4_sge); 8140 8141 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8142 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8143 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8144 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8145 "6300 Reducing NVME sg segment " 8146 "cnt to %d\n", 8147 LPFC_MAX_NVME_SEG_CNT); 8148 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8149 } else 8150 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8151 } 8152 8153 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8154 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8155 "total:%d scsi:%d nvme:%d\n", 8156 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8157 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8158 phba->cfg_nvme_seg_cnt); 8159 8160 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8161 i = phba->cfg_sg_dma_buf_size; 8162 else 8163 i = SLI4_PAGE_SIZE; 8164 8165 phba->lpfc_sg_dma_buf_pool = 8166 dma_pool_create("lpfc_sg_dma_buf_pool", 8167 &phba->pcidev->dev, 8168 phba->cfg_sg_dma_buf_size, 8169 i, 0); 8170 if (!phba->lpfc_sg_dma_buf_pool) 8171 goto out_free_bsmbx; 8172 8173 phba->lpfc_cmd_rsp_buf_pool = 8174 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8175 &phba->pcidev->dev, 8176 sizeof(struct fcp_cmnd) + 8177 sizeof(struct fcp_rsp), 8178 i, 0); 8179 if (!phba->lpfc_cmd_rsp_buf_pool) 8180 goto out_free_sg_dma_buf; 8181 8182 mempool_free(mboxq, phba->mbox_mem_pool); 8183 8184 /* Verify OAS is supported */ 8185 lpfc_sli4_oas_verify(phba); 8186 8187 /* Verify RAS support on adapter */ 8188 lpfc_sli4_ras_init(phba); 8189 8190 /* Verify all the SLI4 queues */ 8191 rc = lpfc_sli4_queue_verify(phba); 8192 if (rc) 8193 goto out_free_cmd_rsp_buf; 8194 8195 /* Create driver internal CQE event pool */ 8196 rc = lpfc_sli4_cq_event_pool_create(phba); 8197 if (rc) 8198 goto out_free_cmd_rsp_buf; 8199 8200 /* Initialize sgl lists per host */ 8201 lpfc_init_sgl_list(phba); 8202 8203 /* Allocate and initialize active sgl array */ 8204 rc = lpfc_init_active_sgl_array(phba); 8205 if (rc) { 8206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8207 "1430 Failed to initialize sgl list.\n"); 8208 goto out_destroy_cq_event_pool; 8209 } 8210 rc = lpfc_sli4_init_rpi_hdrs(phba); 8211 if (rc) { 8212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8213 "1432 Failed to initialize rpi headers.\n"); 8214 goto out_free_active_sgl; 8215 } 8216 8217 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8218 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8219 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8220 GFP_KERNEL); 8221 if (!phba->fcf.fcf_rr_bmask) { 8222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8223 "2759 Failed allocate memory for FCF round " 8224 "robin failover bmask\n"); 8225 rc = -ENOMEM; 8226 goto out_remove_rpi_hdrs; 8227 } 8228 8229 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8230 sizeof(struct lpfc_hba_eq_hdl), 8231 GFP_KERNEL); 8232 if (!phba->sli4_hba.hba_eq_hdl) { 8233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8234 "2572 Failed allocate memory for " 8235 "fast-path per-EQ handle array\n"); 8236 rc = -ENOMEM; 8237 goto out_free_fcf_rr_bmask; 8238 } 8239 8240 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8241 sizeof(struct lpfc_vector_map_info), 8242 GFP_KERNEL); 8243 if (!phba->sli4_hba.cpu_map) { 8244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8245 "3327 Failed allocate memory for msi-x " 8246 "interrupt vector mapping\n"); 8247 rc = -ENOMEM; 8248 goto out_free_hba_eq_hdl; 8249 } 8250 8251 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8252 if (!phba->sli4_hba.eq_info) { 8253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8254 "3321 Failed allocation for per_cpu stats\n"); 8255 rc = -ENOMEM; 8256 goto out_free_hba_cpu_map; 8257 } 8258 8259 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8260 sizeof(*phba->sli4_hba.idle_stat), 8261 GFP_KERNEL); 8262 if (!phba->sli4_hba.idle_stat) { 8263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8264 "3390 Failed allocation for idle_stat\n"); 8265 rc = -ENOMEM; 8266 goto out_free_hba_eq_info; 8267 } 8268 8269 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8270 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8271 if (!phba->sli4_hba.c_stat) { 8272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8273 "3332 Failed allocating per cpu hdwq stats\n"); 8274 rc = -ENOMEM; 8275 goto out_free_hba_idle_stat; 8276 } 8277 #endif 8278 8279 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8280 if (!phba->cmf_stat) { 8281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8282 "3331 Failed allocating per cpu cgn stats\n"); 8283 rc = -ENOMEM; 8284 goto out_free_hba_hdwq_info; 8285 } 8286 8287 /* 8288 * Enable sr-iov virtual functions if supported and configured 8289 * through the module parameter. 8290 */ 8291 if (phba->cfg_sriov_nr_virtfn > 0) { 8292 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8293 phba->cfg_sriov_nr_virtfn); 8294 if (rc) { 8295 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8296 "3020 Requested number of SR-IOV " 8297 "virtual functions (%d) is not " 8298 "supported\n", 8299 phba->cfg_sriov_nr_virtfn); 8300 phba->cfg_sriov_nr_virtfn = 0; 8301 } 8302 } 8303 8304 return 0; 8305 8306 out_free_hba_hdwq_info: 8307 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8308 free_percpu(phba->sli4_hba.c_stat); 8309 out_free_hba_idle_stat: 8310 #endif 8311 kfree(phba->sli4_hba.idle_stat); 8312 out_free_hba_eq_info: 8313 free_percpu(phba->sli4_hba.eq_info); 8314 out_free_hba_cpu_map: 8315 kfree(phba->sli4_hba.cpu_map); 8316 out_free_hba_eq_hdl: 8317 kfree(phba->sli4_hba.hba_eq_hdl); 8318 out_free_fcf_rr_bmask: 8319 kfree(phba->fcf.fcf_rr_bmask); 8320 out_remove_rpi_hdrs: 8321 lpfc_sli4_remove_rpi_hdrs(phba); 8322 out_free_active_sgl: 8323 lpfc_free_active_sgl(phba); 8324 out_destroy_cq_event_pool: 8325 lpfc_sli4_cq_event_pool_destroy(phba); 8326 out_free_cmd_rsp_buf: 8327 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8328 phba->lpfc_cmd_rsp_buf_pool = NULL; 8329 out_free_sg_dma_buf: 8330 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8331 phba->lpfc_sg_dma_buf_pool = NULL; 8332 out_free_bsmbx: 8333 lpfc_destroy_bootstrap_mbox(phba); 8334 out_free_mem: 8335 lpfc_mem_free(phba); 8336 return rc; 8337 } 8338 8339 /** 8340 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8341 * @phba: pointer to lpfc hba data structure. 8342 * 8343 * This routine is invoked to unset the driver internal resources set up 8344 * specific for supporting the SLI-4 HBA device it attached to. 8345 **/ 8346 static void 8347 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8348 { 8349 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8350 8351 free_percpu(phba->sli4_hba.eq_info); 8352 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8353 free_percpu(phba->sli4_hba.c_stat); 8354 #endif 8355 free_percpu(phba->cmf_stat); 8356 kfree(phba->sli4_hba.idle_stat); 8357 8358 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8359 kfree(phba->sli4_hba.cpu_map); 8360 phba->sli4_hba.num_possible_cpu = 0; 8361 phba->sli4_hba.num_present_cpu = 0; 8362 phba->sli4_hba.curr_disp_cpu = 0; 8363 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8364 8365 /* Free memory allocated for fast-path work queue handles */ 8366 kfree(phba->sli4_hba.hba_eq_hdl); 8367 8368 /* Free the allocated rpi headers. */ 8369 lpfc_sli4_remove_rpi_hdrs(phba); 8370 lpfc_sli4_remove_rpis(phba); 8371 8372 /* Free eligible FCF index bmask */ 8373 kfree(phba->fcf.fcf_rr_bmask); 8374 8375 /* Free the ELS sgl list */ 8376 lpfc_free_active_sgl(phba); 8377 lpfc_free_els_sgl_list(phba); 8378 lpfc_free_nvmet_sgl_list(phba); 8379 8380 /* Free the completion queue EQ event pool */ 8381 lpfc_sli4_cq_event_release_all(phba); 8382 lpfc_sli4_cq_event_pool_destroy(phba); 8383 8384 /* Release resource identifiers. */ 8385 lpfc_sli4_dealloc_resource_identifiers(phba); 8386 8387 /* Free the bsmbx region. */ 8388 lpfc_destroy_bootstrap_mbox(phba); 8389 8390 /* Free the SLI Layer memory with SLI4 HBAs */ 8391 lpfc_mem_free_all(phba); 8392 8393 /* Free the current connect table */ 8394 list_for_each_entry_safe(conn_entry, next_conn_entry, 8395 &phba->fcf_conn_rec_list, list) { 8396 list_del_init(&conn_entry->list); 8397 kfree(conn_entry); 8398 } 8399 8400 return; 8401 } 8402 8403 /** 8404 * lpfc_init_api_table_setup - Set up init api function jump table 8405 * @phba: The hba struct for which this call is being executed. 8406 * @dev_grp: The HBA PCI-Device group number. 8407 * 8408 * This routine sets up the device INIT interface API function jump table 8409 * in @phba struct. 8410 * 8411 * Returns: 0 - success, -ENODEV - failure. 8412 **/ 8413 int 8414 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8415 { 8416 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8417 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8418 phba->lpfc_selective_reset = lpfc_selective_reset; 8419 switch (dev_grp) { 8420 case LPFC_PCI_DEV_LP: 8421 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8422 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8423 phba->lpfc_stop_port = lpfc_stop_port_s3; 8424 break; 8425 case LPFC_PCI_DEV_OC: 8426 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8427 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8428 phba->lpfc_stop_port = lpfc_stop_port_s4; 8429 break; 8430 default: 8431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8432 "1431 Invalid HBA PCI-device group: 0x%x\n", 8433 dev_grp); 8434 return -ENODEV; 8435 } 8436 return 0; 8437 } 8438 8439 /** 8440 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8441 * @phba: pointer to lpfc hba data structure. 8442 * 8443 * This routine is invoked to set up the driver internal resources after the 8444 * device specific resource setup to support the HBA device it attached to. 8445 * 8446 * Return codes 8447 * 0 - successful 8448 * other values - error 8449 **/ 8450 static int 8451 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8452 { 8453 int error; 8454 8455 /* Startup the kernel thread for this host adapter. */ 8456 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8457 "lpfc_worker_%d", phba->brd_no); 8458 if (IS_ERR(phba->worker_thread)) { 8459 error = PTR_ERR(phba->worker_thread); 8460 return error; 8461 } 8462 8463 return 0; 8464 } 8465 8466 /** 8467 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8468 * @phba: pointer to lpfc hba data structure. 8469 * 8470 * This routine is invoked to unset the driver internal resources set up after 8471 * the device specific resource setup for supporting the HBA device it 8472 * attached to. 8473 **/ 8474 static void 8475 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8476 { 8477 if (phba->wq) { 8478 flush_workqueue(phba->wq); 8479 destroy_workqueue(phba->wq); 8480 phba->wq = NULL; 8481 } 8482 8483 /* Stop kernel worker thread */ 8484 if (phba->worker_thread) 8485 kthread_stop(phba->worker_thread); 8486 } 8487 8488 /** 8489 * lpfc_free_iocb_list - Free iocb list. 8490 * @phba: pointer to lpfc hba data structure. 8491 * 8492 * This routine is invoked to free the driver's IOCB list and memory. 8493 **/ 8494 void 8495 lpfc_free_iocb_list(struct lpfc_hba *phba) 8496 { 8497 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8498 8499 spin_lock_irq(&phba->hbalock); 8500 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8501 &phba->lpfc_iocb_list, list) { 8502 list_del(&iocbq_entry->list); 8503 kfree(iocbq_entry); 8504 phba->total_iocbq_bufs--; 8505 } 8506 spin_unlock_irq(&phba->hbalock); 8507 8508 return; 8509 } 8510 8511 /** 8512 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8513 * @phba: pointer to lpfc hba data structure. 8514 * @iocb_count: number of requested iocbs 8515 * 8516 * This routine is invoked to allocate and initizlize the driver's IOCB 8517 * list and set up the IOCB tag array accordingly. 8518 * 8519 * Return codes 8520 * 0 - successful 8521 * other values - error 8522 **/ 8523 int 8524 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8525 { 8526 struct lpfc_iocbq *iocbq_entry = NULL; 8527 uint16_t iotag; 8528 int i; 8529 8530 /* Initialize and populate the iocb list per host. */ 8531 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8532 for (i = 0; i < iocb_count; i++) { 8533 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8534 if (iocbq_entry == NULL) { 8535 printk(KERN_ERR "%s: only allocated %d iocbs of " 8536 "expected %d count. Unloading driver.\n", 8537 __func__, i, iocb_count); 8538 goto out_free_iocbq; 8539 } 8540 8541 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8542 if (iotag == 0) { 8543 kfree(iocbq_entry); 8544 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8545 "Unloading driver.\n", __func__); 8546 goto out_free_iocbq; 8547 } 8548 iocbq_entry->sli4_lxritag = NO_XRI; 8549 iocbq_entry->sli4_xritag = NO_XRI; 8550 8551 spin_lock_irq(&phba->hbalock); 8552 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8553 phba->total_iocbq_bufs++; 8554 spin_unlock_irq(&phba->hbalock); 8555 } 8556 8557 return 0; 8558 8559 out_free_iocbq: 8560 lpfc_free_iocb_list(phba); 8561 8562 return -ENOMEM; 8563 } 8564 8565 /** 8566 * lpfc_free_sgl_list - Free a given sgl list. 8567 * @phba: pointer to lpfc hba data structure. 8568 * @sglq_list: pointer to the head of sgl list. 8569 * 8570 * This routine is invoked to free a give sgl list and memory. 8571 **/ 8572 void 8573 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8574 { 8575 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8576 8577 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8578 list_del(&sglq_entry->list); 8579 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8580 kfree(sglq_entry); 8581 } 8582 } 8583 8584 /** 8585 * lpfc_free_els_sgl_list - Free els sgl list. 8586 * @phba: pointer to lpfc hba data structure. 8587 * 8588 * This routine is invoked to free the driver's els sgl list and memory. 8589 **/ 8590 static void 8591 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8592 { 8593 LIST_HEAD(sglq_list); 8594 8595 /* Retrieve all els sgls from driver list */ 8596 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8597 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8598 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8599 8600 /* Now free the sgl list */ 8601 lpfc_free_sgl_list(phba, &sglq_list); 8602 } 8603 8604 /** 8605 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8606 * @phba: pointer to lpfc hba data structure. 8607 * 8608 * This routine is invoked to free the driver's nvmet sgl list and memory. 8609 **/ 8610 static void 8611 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8612 { 8613 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8614 LIST_HEAD(sglq_list); 8615 8616 /* Retrieve all nvmet sgls from driver list */ 8617 spin_lock_irq(&phba->hbalock); 8618 spin_lock(&phba->sli4_hba.sgl_list_lock); 8619 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8620 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8621 spin_unlock_irq(&phba->hbalock); 8622 8623 /* Now free the sgl list */ 8624 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8625 list_del(&sglq_entry->list); 8626 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8627 kfree(sglq_entry); 8628 } 8629 8630 /* Update the nvmet_xri_cnt to reflect no current sgls. 8631 * The next initialization cycle sets the count and allocates 8632 * the sgls over again. 8633 */ 8634 phba->sli4_hba.nvmet_xri_cnt = 0; 8635 } 8636 8637 /** 8638 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8639 * @phba: pointer to lpfc hba data structure. 8640 * 8641 * This routine is invoked to allocate the driver's active sgl memory. 8642 * This array will hold the sglq_entry's for active IOs. 8643 **/ 8644 static int 8645 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8646 { 8647 int size; 8648 size = sizeof(struct lpfc_sglq *); 8649 size *= phba->sli4_hba.max_cfg_param.max_xri; 8650 8651 phba->sli4_hba.lpfc_sglq_active_list = 8652 kzalloc(size, GFP_KERNEL); 8653 if (!phba->sli4_hba.lpfc_sglq_active_list) 8654 return -ENOMEM; 8655 return 0; 8656 } 8657 8658 /** 8659 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8660 * @phba: pointer to lpfc hba data structure. 8661 * 8662 * This routine is invoked to walk through the array of active sglq entries 8663 * and free all of the resources. 8664 * This is just a place holder for now. 8665 **/ 8666 static void 8667 lpfc_free_active_sgl(struct lpfc_hba *phba) 8668 { 8669 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8670 } 8671 8672 /** 8673 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8674 * @phba: pointer to lpfc hba data structure. 8675 * 8676 * This routine is invoked to allocate and initizlize the driver's sgl 8677 * list and set up the sgl xritag tag array accordingly. 8678 * 8679 **/ 8680 static void 8681 lpfc_init_sgl_list(struct lpfc_hba *phba) 8682 { 8683 /* Initialize and populate the sglq list per host/VF. */ 8684 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8685 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8686 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8687 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8688 8689 /* els xri-sgl book keeping */ 8690 phba->sli4_hba.els_xri_cnt = 0; 8691 8692 /* nvme xri-buffer book keeping */ 8693 phba->sli4_hba.io_xri_cnt = 0; 8694 } 8695 8696 /** 8697 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8698 * @phba: pointer to lpfc hba data structure. 8699 * 8700 * This routine is invoked to post rpi header templates to the 8701 * port for those SLI4 ports that do not support extents. This routine 8702 * posts a PAGE_SIZE memory region to the port to hold up to 8703 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8704 * and should be called only when interrupts are disabled. 8705 * 8706 * Return codes 8707 * 0 - successful 8708 * -ERROR - otherwise. 8709 **/ 8710 int 8711 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8712 { 8713 int rc = 0; 8714 struct lpfc_rpi_hdr *rpi_hdr; 8715 8716 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8717 if (!phba->sli4_hba.rpi_hdrs_in_use) 8718 return rc; 8719 if (phba->sli4_hba.extents_in_use) 8720 return -EIO; 8721 8722 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8723 if (!rpi_hdr) { 8724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8725 "0391 Error during rpi post operation\n"); 8726 lpfc_sli4_remove_rpis(phba); 8727 rc = -ENODEV; 8728 } 8729 8730 return rc; 8731 } 8732 8733 /** 8734 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8735 * @phba: pointer to lpfc hba data structure. 8736 * 8737 * This routine is invoked to allocate a single 4KB memory region to 8738 * support rpis and stores them in the phba. This single region 8739 * provides support for up to 64 rpis. The region is used globally 8740 * by the device. 8741 * 8742 * Returns: 8743 * A valid rpi hdr on success. 8744 * A NULL pointer on any failure. 8745 **/ 8746 struct lpfc_rpi_hdr * 8747 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8748 { 8749 uint16_t rpi_limit, curr_rpi_range; 8750 struct lpfc_dmabuf *dmabuf; 8751 struct lpfc_rpi_hdr *rpi_hdr; 8752 8753 /* 8754 * If the SLI4 port supports extents, posting the rpi header isn't 8755 * required. Set the expected maximum count and let the actual value 8756 * get set when extents are fully allocated. 8757 */ 8758 if (!phba->sli4_hba.rpi_hdrs_in_use) 8759 return NULL; 8760 if (phba->sli4_hba.extents_in_use) 8761 return NULL; 8762 8763 /* The limit on the logical index is just the max_rpi count. */ 8764 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8765 8766 spin_lock_irq(&phba->hbalock); 8767 /* 8768 * Establish the starting RPI in this header block. The starting 8769 * rpi is normalized to a zero base because the physical rpi is 8770 * port based. 8771 */ 8772 curr_rpi_range = phba->sli4_hba.next_rpi; 8773 spin_unlock_irq(&phba->hbalock); 8774 8775 /* Reached full RPI range */ 8776 if (curr_rpi_range == rpi_limit) 8777 return NULL; 8778 8779 /* 8780 * First allocate the protocol header region for the port. The 8781 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8782 */ 8783 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8784 if (!dmabuf) 8785 return NULL; 8786 8787 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8788 LPFC_HDR_TEMPLATE_SIZE, 8789 &dmabuf->phys, GFP_KERNEL); 8790 if (!dmabuf->virt) { 8791 rpi_hdr = NULL; 8792 goto err_free_dmabuf; 8793 } 8794 8795 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8796 rpi_hdr = NULL; 8797 goto err_free_coherent; 8798 } 8799 8800 /* Save the rpi header data for cleanup later. */ 8801 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8802 if (!rpi_hdr) 8803 goto err_free_coherent; 8804 8805 rpi_hdr->dmabuf = dmabuf; 8806 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8807 rpi_hdr->page_count = 1; 8808 spin_lock_irq(&phba->hbalock); 8809 8810 /* The rpi_hdr stores the logical index only. */ 8811 rpi_hdr->start_rpi = curr_rpi_range; 8812 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8813 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8814 8815 spin_unlock_irq(&phba->hbalock); 8816 return rpi_hdr; 8817 8818 err_free_coherent: 8819 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8820 dmabuf->virt, dmabuf->phys); 8821 err_free_dmabuf: 8822 kfree(dmabuf); 8823 return NULL; 8824 } 8825 8826 /** 8827 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8828 * @phba: pointer to lpfc hba data structure. 8829 * 8830 * This routine is invoked to remove all memory resources allocated 8831 * to support rpis for SLI4 ports not supporting extents. This routine 8832 * presumes the caller has released all rpis consumed by fabric or port 8833 * logins and is prepared to have the header pages removed. 8834 **/ 8835 void 8836 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 8837 { 8838 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 8839 8840 if (!phba->sli4_hba.rpi_hdrs_in_use) 8841 goto exit; 8842 8843 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 8844 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 8845 list_del(&rpi_hdr->list); 8846 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 8847 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 8848 kfree(rpi_hdr->dmabuf); 8849 kfree(rpi_hdr); 8850 } 8851 exit: 8852 /* There are no rpis available to the port now. */ 8853 phba->sli4_hba.next_rpi = 0; 8854 } 8855 8856 /** 8857 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 8858 * @pdev: pointer to pci device data structure. 8859 * 8860 * This routine is invoked to allocate the driver hba data structure for an 8861 * HBA device. If the allocation is successful, the phba reference to the 8862 * PCI device data structure is set. 8863 * 8864 * Return codes 8865 * pointer to @phba - successful 8866 * NULL - error 8867 **/ 8868 static struct lpfc_hba * 8869 lpfc_hba_alloc(struct pci_dev *pdev) 8870 { 8871 struct lpfc_hba *phba; 8872 8873 /* Allocate memory for HBA structure */ 8874 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 8875 if (!phba) { 8876 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 8877 return NULL; 8878 } 8879 8880 /* Set reference to PCI device in HBA structure */ 8881 phba->pcidev = pdev; 8882 8883 /* Assign an unused board number */ 8884 phba->brd_no = lpfc_get_instance(); 8885 if (phba->brd_no < 0) { 8886 kfree(phba); 8887 return NULL; 8888 } 8889 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 8890 8891 spin_lock_init(&phba->ct_ev_lock); 8892 INIT_LIST_HEAD(&phba->ct_ev_waiters); 8893 8894 return phba; 8895 } 8896 8897 /** 8898 * lpfc_hba_free - Free driver hba data structure with a device. 8899 * @phba: pointer to lpfc hba data structure. 8900 * 8901 * This routine is invoked to free the driver hba data structure with an 8902 * HBA device. 8903 **/ 8904 static void 8905 lpfc_hba_free(struct lpfc_hba *phba) 8906 { 8907 if (phba->sli_rev == LPFC_SLI_REV4) 8908 kfree(phba->sli4_hba.hdwq); 8909 8910 /* Release the driver assigned board number */ 8911 idr_remove(&lpfc_hba_index, phba->brd_no); 8912 8913 /* Free memory allocated with sli3 rings */ 8914 kfree(phba->sli.sli3_ring); 8915 phba->sli.sli3_ring = NULL; 8916 8917 kfree(phba); 8918 return; 8919 } 8920 8921 /** 8922 * lpfc_create_shost - Create hba physical port with associated scsi host. 8923 * @phba: pointer to lpfc hba data structure. 8924 * 8925 * This routine is invoked to create HBA physical port and associate a SCSI 8926 * host with it. 8927 * 8928 * Return codes 8929 * 0 - successful 8930 * other values - error 8931 **/ 8932 static int 8933 lpfc_create_shost(struct lpfc_hba *phba) 8934 { 8935 struct lpfc_vport *vport; 8936 struct Scsi_Host *shost; 8937 8938 /* Initialize HBA FC structure */ 8939 phba->fc_edtov = FF_DEF_EDTOV; 8940 phba->fc_ratov = FF_DEF_RATOV; 8941 phba->fc_altov = FF_DEF_ALTOV; 8942 phba->fc_arbtov = FF_DEF_ARBTOV; 8943 8944 atomic_set(&phba->sdev_cnt, 0); 8945 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 8946 if (!vport) 8947 return -ENODEV; 8948 8949 shost = lpfc_shost_from_vport(vport); 8950 phba->pport = vport; 8951 8952 if (phba->nvmet_support) { 8953 /* Only 1 vport (pport) will support NVME target */ 8954 phba->targetport = NULL; 8955 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 8956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 8957 "6076 NVME Target Found\n"); 8958 } 8959 8960 lpfc_debugfs_initialize(vport); 8961 /* Put reference to SCSI host to driver's device private data */ 8962 pci_set_drvdata(phba->pcidev, shost); 8963 8964 /* 8965 * At this point we are fully registered with PSA. In addition, 8966 * any initial discovery should be completed. 8967 */ 8968 vport->load_flag |= FC_ALLOW_FDMI; 8969 if (phba->cfg_enable_SmartSAN || 8970 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 8971 8972 /* Setup appropriate attribute masks */ 8973 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 8974 if (phba->cfg_enable_SmartSAN) 8975 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 8976 else 8977 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 8978 } 8979 return 0; 8980 } 8981 8982 /** 8983 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 8984 * @phba: pointer to lpfc hba data structure. 8985 * 8986 * This routine is invoked to destroy HBA physical port and the associated 8987 * SCSI host. 8988 **/ 8989 static void 8990 lpfc_destroy_shost(struct lpfc_hba *phba) 8991 { 8992 struct lpfc_vport *vport = phba->pport; 8993 8994 /* Destroy physical port that associated with the SCSI host */ 8995 destroy_port(vport); 8996 8997 return; 8998 } 8999 9000 /** 9001 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9002 * @phba: pointer to lpfc hba data structure. 9003 * @shost: the shost to be used to detect Block guard settings. 9004 * 9005 * This routine sets up the local Block guard protocol settings for @shost. 9006 * This routine also allocates memory for debugging bg buffers. 9007 **/ 9008 static void 9009 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9010 { 9011 uint32_t old_mask; 9012 uint32_t old_guard; 9013 9014 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9016 "1478 Registering BlockGuard with the " 9017 "SCSI layer\n"); 9018 9019 old_mask = phba->cfg_prot_mask; 9020 old_guard = phba->cfg_prot_guard; 9021 9022 /* Only allow supported values */ 9023 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9024 SHOST_DIX_TYPE0_PROTECTION | 9025 SHOST_DIX_TYPE1_PROTECTION); 9026 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9027 SHOST_DIX_GUARD_CRC); 9028 9029 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9030 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9031 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9032 9033 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9034 if ((old_mask != phba->cfg_prot_mask) || 9035 (old_guard != phba->cfg_prot_guard)) 9036 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9037 "1475 Registering BlockGuard with the " 9038 "SCSI layer: mask %d guard %d\n", 9039 phba->cfg_prot_mask, 9040 phba->cfg_prot_guard); 9041 9042 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9043 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9044 } else 9045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9046 "1479 Not Registering BlockGuard with the SCSI " 9047 "layer, Bad protection parameters: %d %d\n", 9048 old_mask, old_guard); 9049 } 9050 } 9051 9052 /** 9053 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9054 * @phba: pointer to lpfc hba data structure. 9055 * 9056 * This routine is invoked to perform all the necessary post initialization 9057 * setup for the device. 9058 **/ 9059 static void 9060 lpfc_post_init_setup(struct lpfc_hba *phba) 9061 { 9062 struct Scsi_Host *shost; 9063 struct lpfc_adapter_event_header adapter_event; 9064 9065 /* Get the default values for Model Name and Description */ 9066 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9067 9068 /* 9069 * hba setup may have changed the hba_queue_depth so we need to 9070 * adjust the value of can_queue. 9071 */ 9072 shost = pci_get_drvdata(phba->pcidev); 9073 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9074 9075 lpfc_host_attrib_init(shost); 9076 9077 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9078 spin_lock_irq(shost->host_lock); 9079 lpfc_poll_start_timer(phba); 9080 spin_unlock_irq(shost->host_lock); 9081 } 9082 9083 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9084 "0428 Perform SCSI scan\n"); 9085 /* Send board arrival event to upper layer */ 9086 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9087 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9088 fc_host_post_vendor_event(shost, fc_get_event_number(), 9089 sizeof(adapter_event), 9090 (char *) &adapter_event, 9091 LPFC_NL_VENDOR_ID); 9092 return; 9093 } 9094 9095 /** 9096 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9097 * @phba: pointer to lpfc hba data structure. 9098 * 9099 * This routine is invoked to set up the PCI device memory space for device 9100 * with SLI-3 interface spec. 9101 * 9102 * Return codes 9103 * 0 - successful 9104 * other values - error 9105 **/ 9106 static int 9107 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9108 { 9109 struct pci_dev *pdev = phba->pcidev; 9110 unsigned long bar0map_len, bar2map_len; 9111 int i, hbq_count; 9112 void *ptr; 9113 int error; 9114 9115 if (!pdev) 9116 return -ENODEV; 9117 9118 /* Set the device DMA mask size */ 9119 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9120 if (error) 9121 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9122 if (error) 9123 return error; 9124 error = -ENODEV; 9125 9126 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9127 * required by each mapping. 9128 */ 9129 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9130 bar0map_len = pci_resource_len(pdev, 0); 9131 9132 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9133 bar2map_len = pci_resource_len(pdev, 2); 9134 9135 /* Map HBA SLIM to a kernel virtual address. */ 9136 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9137 if (!phba->slim_memmap_p) { 9138 dev_printk(KERN_ERR, &pdev->dev, 9139 "ioremap failed for SLIM memory.\n"); 9140 goto out; 9141 } 9142 9143 /* Map HBA Control Registers to a kernel virtual address. */ 9144 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9145 if (!phba->ctrl_regs_memmap_p) { 9146 dev_printk(KERN_ERR, &pdev->dev, 9147 "ioremap failed for HBA control registers.\n"); 9148 goto out_iounmap_slim; 9149 } 9150 9151 /* Allocate memory for SLI-2 structures */ 9152 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9153 &phba->slim2p.phys, GFP_KERNEL); 9154 if (!phba->slim2p.virt) 9155 goto out_iounmap; 9156 9157 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9158 phba->mbox_ext = (phba->slim2p.virt + 9159 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9160 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9161 phba->IOCBs = (phba->slim2p.virt + 9162 offsetof(struct lpfc_sli2_slim, IOCBs)); 9163 9164 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9165 lpfc_sli_hbq_size(), 9166 &phba->hbqslimp.phys, 9167 GFP_KERNEL); 9168 if (!phba->hbqslimp.virt) 9169 goto out_free_slim; 9170 9171 hbq_count = lpfc_sli_hbq_count(); 9172 ptr = phba->hbqslimp.virt; 9173 for (i = 0; i < hbq_count; ++i) { 9174 phba->hbqs[i].hbq_virt = ptr; 9175 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9176 ptr += (lpfc_hbq_defs[i]->entry_count * 9177 sizeof(struct lpfc_hbq_entry)); 9178 } 9179 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9180 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9181 9182 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9183 9184 phba->MBslimaddr = phba->slim_memmap_p; 9185 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9186 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9187 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9188 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9189 9190 return 0; 9191 9192 out_free_slim: 9193 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9194 phba->slim2p.virt, phba->slim2p.phys); 9195 out_iounmap: 9196 iounmap(phba->ctrl_regs_memmap_p); 9197 out_iounmap_slim: 9198 iounmap(phba->slim_memmap_p); 9199 out: 9200 return error; 9201 } 9202 9203 /** 9204 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9205 * @phba: pointer to lpfc hba data structure. 9206 * 9207 * This routine is invoked to unset the PCI device memory space for device 9208 * with SLI-3 interface spec. 9209 **/ 9210 static void 9211 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9212 { 9213 struct pci_dev *pdev; 9214 9215 /* Obtain PCI device reference */ 9216 if (!phba->pcidev) 9217 return; 9218 else 9219 pdev = phba->pcidev; 9220 9221 /* Free coherent DMA memory allocated */ 9222 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9223 phba->hbqslimp.virt, phba->hbqslimp.phys); 9224 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9225 phba->slim2p.virt, phba->slim2p.phys); 9226 9227 /* I/O memory unmap */ 9228 iounmap(phba->ctrl_regs_memmap_p); 9229 iounmap(phba->slim_memmap_p); 9230 9231 return; 9232 } 9233 9234 /** 9235 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9236 * @phba: pointer to lpfc hba data structure. 9237 * 9238 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9239 * done and check status. 9240 * 9241 * Return 0 if successful, otherwise -ENODEV. 9242 **/ 9243 int 9244 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9245 { 9246 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9247 struct lpfc_register reg_data; 9248 int i, port_error = 0; 9249 uint32_t if_type; 9250 9251 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9252 memset(®_data, 0, sizeof(reg_data)); 9253 if (!phba->sli4_hba.PSMPHRregaddr) 9254 return -ENODEV; 9255 9256 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9257 for (i = 0; i < 3000; i++) { 9258 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9259 &portsmphr_reg.word0) || 9260 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9261 /* Port has a fatal POST error, break out */ 9262 port_error = -ENODEV; 9263 break; 9264 } 9265 if (LPFC_POST_STAGE_PORT_READY == 9266 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9267 break; 9268 msleep(10); 9269 } 9270 9271 /* 9272 * If there was a port error during POST, then don't proceed with 9273 * other register reads as the data may not be valid. Just exit. 9274 */ 9275 if (port_error) { 9276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9277 "1408 Port Failed POST - portsmphr=0x%x, " 9278 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9279 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9280 portsmphr_reg.word0, 9281 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9282 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9283 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9284 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9285 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9286 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9287 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9288 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9289 } else { 9290 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9291 "2534 Device Info: SLIFamily=0x%x, " 9292 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9293 "SLIHint_2=0x%x, FT=0x%x\n", 9294 bf_get(lpfc_sli_intf_sli_family, 9295 &phba->sli4_hba.sli_intf), 9296 bf_get(lpfc_sli_intf_slirev, 9297 &phba->sli4_hba.sli_intf), 9298 bf_get(lpfc_sli_intf_if_type, 9299 &phba->sli4_hba.sli_intf), 9300 bf_get(lpfc_sli_intf_sli_hint1, 9301 &phba->sli4_hba.sli_intf), 9302 bf_get(lpfc_sli_intf_sli_hint2, 9303 &phba->sli4_hba.sli_intf), 9304 bf_get(lpfc_sli_intf_func_type, 9305 &phba->sli4_hba.sli_intf)); 9306 /* 9307 * Check for other Port errors during the initialization 9308 * process. Fail the load if the port did not come up 9309 * correctly. 9310 */ 9311 if_type = bf_get(lpfc_sli_intf_if_type, 9312 &phba->sli4_hba.sli_intf); 9313 switch (if_type) { 9314 case LPFC_SLI_INTF_IF_TYPE_0: 9315 phba->sli4_hba.ue_mask_lo = 9316 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9317 phba->sli4_hba.ue_mask_hi = 9318 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9319 uerrlo_reg.word0 = 9320 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9321 uerrhi_reg.word0 = 9322 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9323 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9324 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9325 lpfc_printf_log(phba, KERN_ERR, 9326 LOG_TRACE_EVENT, 9327 "1422 Unrecoverable Error " 9328 "Detected during POST " 9329 "uerr_lo_reg=0x%x, " 9330 "uerr_hi_reg=0x%x, " 9331 "ue_mask_lo_reg=0x%x, " 9332 "ue_mask_hi_reg=0x%x\n", 9333 uerrlo_reg.word0, 9334 uerrhi_reg.word0, 9335 phba->sli4_hba.ue_mask_lo, 9336 phba->sli4_hba.ue_mask_hi); 9337 port_error = -ENODEV; 9338 } 9339 break; 9340 case LPFC_SLI_INTF_IF_TYPE_2: 9341 case LPFC_SLI_INTF_IF_TYPE_6: 9342 /* Final checks. The port status should be clean. */ 9343 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9344 ®_data.word0) || 9345 (bf_get(lpfc_sliport_status_err, ®_data) && 9346 !bf_get(lpfc_sliport_status_rn, ®_data))) { 9347 phba->work_status[0] = 9348 readl(phba->sli4_hba.u.if_type2. 9349 ERR1regaddr); 9350 phba->work_status[1] = 9351 readl(phba->sli4_hba.u.if_type2. 9352 ERR2regaddr); 9353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9354 "2888 Unrecoverable port error " 9355 "following POST: port status reg " 9356 "0x%x, port_smphr reg 0x%x, " 9357 "error 1=0x%x, error 2=0x%x\n", 9358 reg_data.word0, 9359 portsmphr_reg.word0, 9360 phba->work_status[0], 9361 phba->work_status[1]); 9362 port_error = -ENODEV; 9363 break; 9364 } 9365 9366 if (lpfc_pldv_detect && 9367 bf_get(lpfc_sli_intf_sli_family, 9368 &phba->sli4_hba.sli_intf) == 9369 LPFC_SLI_INTF_FAMILY_G6) 9370 pci_write_config_byte(phba->pcidev, 9371 LPFC_SLI_INTF, CFG_PLD); 9372 break; 9373 case LPFC_SLI_INTF_IF_TYPE_1: 9374 default: 9375 break; 9376 } 9377 } 9378 return port_error; 9379 } 9380 9381 /** 9382 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9383 * @phba: pointer to lpfc hba data structure. 9384 * @if_type: The SLI4 interface type getting configured. 9385 * 9386 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9387 * memory map. 9388 **/ 9389 static void 9390 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9391 { 9392 switch (if_type) { 9393 case LPFC_SLI_INTF_IF_TYPE_0: 9394 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9395 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9396 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9397 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9398 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9399 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9400 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9401 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9402 phba->sli4_hba.SLIINTFregaddr = 9403 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9404 break; 9405 case LPFC_SLI_INTF_IF_TYPE_2: 9406 phba->sli4_hba.u.if_type2.EQDregaddr = 9407 phba->sli4_hba.conf_regs_memmap_p + 9408 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9409 phba->sli4_hba.u.if_type2.ERR1regaddr = 9410 phba->sli4_hba.conf_regs_memmap_p + 9411 LPFC_CTL_PORT_ER1_OFFSET; 9412 phba->sli4_hba.u.if_type2.ERR2regaddr = 9413 phba->sli4_hba.conf_regs_memmap_p + 9414 LPFC_CTL_PORT_ER2_OFFSET; 9415 phba->sli4_hba.u.if_type2.CTRLregaddr = 9416 phba->sli4_hba.conf_regs_memmap_p + 9417 LPFC_CTL_PORT_CTL_OFFSET; 9418 phba->sli4_hba.u.if_type2.STATUSregaddr = 9419 phba->sli4_hba.conf_regs_memmap_p + 9420 LPFC_CTL_PORT_STA_OFFSET; 9421 phba->sli4_hba.SLIINTFregaddr = 9422 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9423 phba->sli4_hba.PSMPHRregaddr = 9424 phba->sli4_hba.conf_regs_memmap_p + 9425 LPFC_CTL_PORT_SEM_OFFSET; 9426 phba->sli4_hba.RQDBregaddr = 9427 phba->sli4_hba.conf_regs_memmap_p + 9428 LPFC_ULP0_RQ_DOORBELL; 9429 phba->sli4_hba.WQDBregaddr = 9430 phba->sli4_hba.conf_regs_memmap_p + 9431 LPFC_ULP0_WQ_DOORBELL; 9432 phba->sli4_hba.CQDBregaddr = 9433 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9434 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9435 phba->sli4_hba.MQDBregaddr = 9436 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9437 phba->sli4_hba.BMBXregaddr = 9438 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9439 break; 9440 case LPFC_SLI_INTF_IF_TYPE_6: 9441 phba->sli4_hba.u.if_type2.EQDregaddr = 9442 phba->sli4_hba.conf_regs_memmap_p + 9443 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9444 phba->sli4_hba.u.if_type2.ERR1regaddr = 9445 phba->sli4_hba.conf_regs_memmap_p + 9446 LPFC_CTL_PORT_ER1_OFFSET; 9447 phba->sli4_hba.u.if_type2.ERR2regaddr = 9448 phba->sli4_hba.conf_regs_memmap_p + 9449 LPFC_CTL_PORT_ER2_OFFSET; 9450 phba->sli4_hba.u.if_type2.CTRLregaddr = 9451 phba->sli4_hba.conf_regs_memmap_p + 9452 LPFC_CTL_PORT_CTL_OFFSET; 9453 phba->sli4_hba.u.if_type2.STATUSregaddr = 9454 phba->sli4_hba.conf_regs_memmap_p + 9455 LPFC_CTL_PORT_STA_OFFSET; 9456 phba->sli4_hba.PSMPHRregaddr = 9457 phba->sli4_hba.conf_regs_memmap_p + 9458 LPFC_CTL_PORT_SEM_OFFSET; 9459 phba->sli4_hba.BMBXregaddr = 9460 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9461 break; 9462 case LPFC_SLI_INTF_IF_TYPE_1: 9463 default: 9464 dev_printk(KERN_ERR, &phba->pcidev->dev, 9465 "FATAL - unsupported SLI4 interface type - %d\n", 9466 if_type); 9467 break; 9468 } 9469 } 9470 9471 /** 9472 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9473 * @phba: pointer to lpfc hba data structure. 9474 * @if_type: sli if type to operate on. 9475 * 9476 * This routine is invoked to set up SLI4 BAR1 register memory map. 9477 **/ 9478 static void 9479 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9480 { 9481 switch (if_type) { 9482 case LPFC_SLI_INTF_IF_TYPE_0: 9483 phba->sli4_hba.PSMPHRregaddr = 9484 phba->sli4_hba.ctrl_regs_memmap_p + 9485 LPFC_SLIPORT_IF0_SMPHR; 9486 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9487 LPFC_HST_ISR0; 9488 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9489 LPFC_HST_IMR0; 9490 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9491 LPFC_HST_ISCR0; 9492 break; 9493 case LPFC_SLI_INTF_IF_TYPE_6: 9494 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9495 LPFC_IF6_RQ_DOORBELL; 9496 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9497 LPFC_IF6_WQ_DOORBELL; 9498 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9499 LPFC_IF6_CQ_DOORBELL; 9500 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9501 LPFC_IF6_EQ_DOORBELL; 9502 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9503 LPFC_IF6_MQ_DOORBELL; 9504 break; 9505 case LPFC_SLI_INTF_IF_TYPE_2: 9506 case LPFC_SLI_INTF_IF_TYPE_1: 9507 default: 9508 dev_err(&phba->pcidev->dev, 9509 "FATAL - unsupported SLI4 interface type - %d\n", 9510 if_type); 9511 break; 9512 } 9513 } 9514 9515 /** 9516 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9517 * @phba: pointer to lpfc hba data structure. 9518 * @vf: virtual function number 9519 * 9520 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9521 * based on the given viftual function number, @vf. 9522 * 9523 * Return 0 if successful, otherwise -ENODEV. 9524 **/ 9525 static int 9526 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9527 { 9528 if (vf > LPFC_VIR_FUNC_MAX) 9529 return -ENODEV; 9530 9531 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9532 vf * LPFC_VFR_PAGE_SIZE + 9533 LPFC_ULP0_RQ_DOORBELL); 9534 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9535 vf * LPFC_VFR_PAGE_SIZE + 9536 LPFC_ULP0_WQ_DOORBELL); 9537 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9538 vf * LPFC_VFR_PAGE_SIZE + 9539 LPFC_EQCQ_DOORBELL); 9540 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9541 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9542 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9543 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9544 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9545 return 0; 9546 } 9547 9548 /** 9549 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9550 * @phba: pointer to lpfc hba data structure. 9551 * 9552 * This routine is invoked to create the bootstrap mailbox 9553 * region consistent with the SLI-4 interface spec. This 9554 * routine allocates all memory necessary to communicate 9555 * mailbox commands to the port and sets up all alignment 9556 * needs. No locks are expected to be held when calling 9557 * this routine. 9558 * 9559 * Return codes 9560 * 0 - successful 9561 * -ENOMEM - could not allocated memory. 9562 **/ 9563 static int 9564 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9565 { 9566 uint32_t bmbx_size; 9567 struct lpfc_dmabuf *dmabuf; 9568 struct dma_address *dma_address; 9569 uint32_t pa_addr; 9570 uint64_t phys_addr; 9571 9572 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9573 if (!dmabuf) 9574 return -ENOMEM; 9575 9576 /* 9577 * The bootstrap mailbox region is comprised of 2 parts 9578 * plus an alignment restriction of 16 bytes. 9579 */ 9580 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9581 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9582 &dmabuf->phys, GFP_KERNEL); 9583 if (!dmabuf->virt) { 9584 kfree(dmabuf); 9585 return -ENOMEM; 9586 } 9587 9588 /* 9589 * Initialize the bootstrap mailbox pointers now so that the register 9590 * operations are simple later. The mailbox dma address is required 9591 * to be 16-byte aligned. Also align the virtual memory as each 9592 * maibox is copied into the bmbx mailbox region before issuing the 9593 * command to the port. 9594 */ 9595 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9596 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9597 9598 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9599 LPFC_ALIGN_16_BYTE); 9600 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9601 LPFC_ALIGN_16_BYTE); 9602 9603 /* 9604 * Set the high and low physical addresses now. The SLI4 alignment 9605 * requirement is 16 bytes and the mailbox is posted to the port 9606 * as two 30-bit addresses. The other data is a bit marking whether 9607 * the 30-bit address is the high or low address. 9608 * Upcast bmbx aphys to 64bits so shift instruction compiles 9609 * clean on 32 bit machines. 9610 */ 9611 dma_address = &phba->sli4_hba.bmbx.dma_address; 9612 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9613 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9614 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9615 LPFC_BMBX_BIT1_ADDR_HI); 9616 9617 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9618 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9619 LPFC_BMBX_BIT1_ADDR_LO); 9620 return 0; 9621 } 9622 9623 /** 9624 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9625 * @phba: pointer to lpfc hba data structure. 9626 * 9627 * This routine is invoked to teardown the bootstrap mailbox 9628 * region and release all host resources. This routine requires 9629 * the caller to ensure all mailbox commands recovered, no 9630 * additional mailbox comands are sent, and interrupts are disabled 9631 * before calling this routine. 9632 * 9633 **/ 9634 static void 9635 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9636 { 9637 dma_free_coherent(&phba->pcidev->dev, 9638 phba->sli4_hba.bmbx.bmbx_size, 9639 phba->sli4_hba.bmbx.dmabuf->virt, 9640 phba->sli4_hba.bmbx.dmabuf->phys); 9641 9642 kfree(phba->sli4_hba.bmbx.dmabuf); 9643 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9644 } 9645 9646 static const char * const lpfc_topo_to_str[] = { 9647 "Loop then P2P", 9648 "Loopback", 9649 "P2P Only", 9650 "Unsupported", 9651 "Loop Only", 9652 "Unsupported", 9653 "P2P then Loop", 9654 }; 9655 9656 #define LINK_FLAGS_DEF 0x0 9657 #define LINK_FLAGS_P2P 0x1 9658 #define LINK_FLAGS_LOOP 0x2 9659 /** 9660 * lpfc_map_topology - Map the topology read from READ_CONFIG 9661 * @phba: pointer to lpfc hba data structure. 9662 * @rd_config: pointer to read config data 9663 * 9664 * This routine is invoked to map the topology values as read 9665 * from the read config mailbox command. If the persistent 9666 * topology feature is supported, the firmware will provide the 9667 * saved topology information to be used in INIT_LINK 9668 **/ 9669 static void 9670 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9671 { 9672 u8 ptv, tf, pt; 9673 9674 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9675 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9676 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9677 9678 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9679 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9680 ptv, tf, pt); 9681 if (!ptv) { 9682 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9683 "2019 FW does not support persistent topology " 9684 "Using driver parameter defined value [%s]", 9685 lpfc_topo_to_str[phba->cfg_topology]); 9686 return; 9687 } 9688 /* FW supports persistent topology - override module parameter value */ 9689 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9690 9691 /* if ASIC_GEN_NUM >= 0xC) */ 9692 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9693 LPFC_SLI_INTF_IF_TYPE_6) || 9694 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9695 LPFC_SLI_INTF_FAMILY_G6)) { 9696 if (!tf) { 9697 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9698 ? FLAGS_TOPOLOGY_MODE_LOOP 9699 : FLAGS_TOPOLOGY_MODE_PT_PT); 9700 } else { 9701 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9702 } 9703 } else { /* G5 */ 9704 if (tf) { 9705 /* If topology failover set - pt is '0' or '1' */ 9706 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9707 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9708 } else { 9709 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9710 ? FLAGS_TOPOLOGY_MODE_PT_PT 9711 : FLAGS_TOPOLOGY_MODE_LOOP); 9712 } 9713 } 9714 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9715 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9716 "2020 Using persistent topology value [%s]", 9717 lpfc_topo_to_str[phba->cfg_topology]); 9718 } else { 9719 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9720 "2021 Invalid topology values from FW " 9721 "Using driver parameter defined value [%s]", 9722 lpfc_topo_to_str[phba->cfg_topology]); 9723 } 9724 } 9725 9726 /** 9727 * lpfc_sli4_read_config - Get the config parameters. 9728 * @phba: pointer to lpfc hba data structure. 9729 * 9730 * This routine is invoked to read the configuration parameters from the HBA. 9731 * The configuration parameters are used to set the base and maximum values 9732 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9733 * allocation for the port. 9734 * 9735 * Return codes 9736 * 0 - successful 9737 * -ENOMEM - No available memory 9738 * -EIO - The mailbox failed to complete successfully. 9739 **/ 9740 int 9741 lpfc_sli4_read_config(struct lpfc_hba *phba) 9742 { 9743 LPFC_MBOXQ_t *pmb; 9744 struct lpfc_mbx_read_config *rd_config; 9745 union lpfc_sli4_cfg_shdr *shdr; 9746 uint32_t shdr_status, shdr_add_status; 9747 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9748 struct lpfc_rsrc_desc_fcfcoe *desc; 9749 char *pdesc_0; 9750 uint16_t forced_link_speed; 9751 uint32_t if_type, qmin; 9752 int length, i, rc = 0, rc2; 9753 9754 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9755 if (!pmb) { 9756 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9757 "2011 Unable to allocate memory for issuing " 9758 "SLI_CONFIG_SPECIAL mailbox command\n"); 9759 return -ENOMEM; 9760 } 9761 9762 lpfc_read_config(phba, pmb); 9763 9764 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9765 if (rc != MBX_SUCCESS) { 9766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9767 "2012 Mailbox failed , mbxCmd x%x " 9768 "READ_CONFIG, mbxStatus x%x\n", 9769 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9770 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9771 rc = -EIO; 9772 } else { 9773 rd_config = &pmb->u.mqe.un.rd_config; 9774 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9775 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9776 phba->sli4_hba.lnk_info.lnk_tp = 9777 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9778 phba->sli4_hba.lnk_info.lnk_no = 9779 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9780 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9781 "3081 lnk_type:%d, lnk_numb:%d\n", 9782 phba->sli4_hba.lnk_info.lnk_tp, 9783 phba->sli4_hba.lnk_info.lnk_no); 9784 } else 9785 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9786 "3082 Mailbox (x%x) returned ldv:x0\n", 9787 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9788 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9789 phba->bbcredit_support = 1; 9790 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9791 } 9792 9793 phba->sli4_hba.conf_trunk = 9794 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9795 phba->sli4_hba.extents_in_use = 9796 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9797 phba->sli4_hba.max_cfg_param.max_xri = 9798 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9799 /* Reduce resource usage in kdump environment */ 9800 if (is_kdump_kernel() && 9801 phba->sli4_hba.max_cfg_param.max_xri > 512) 9802 phba->sli4_hba.max_cfg_param.max_xri = 512; 9803 phba->sli4_hba.max_cfg_param.xri_base = 9804 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 9805 phba->sli4_hba.max_cfg_param.max_vpi = 9806 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 9807 /* Limit the max we support */ 9808 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 9809 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 9810 phba->sli4_hba.max_cfg_param.vpi_base = 9811 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 9812 phba->sli4_hba.max_cfg_param.max_rpi = 9813 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 9814 phba->sli4_hba.max_cfg_param.rpi_base = 9815 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 9816 phba->sli4_hba.max_cfg_param.max_vfi = 9817 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 9818 phba->sli4_hba.max_cfg_param.vfi_base = 9819 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 9820 phba->sli4_hba.max_cfg_param.max_fcfi = 9821 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 9822 phba->sli4_hba.max_cfg_param.max_eq = 9823 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 9824 phba->sli4_hba.max_cfg_param.max_rq = 9825 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 9826 phba->sli4_hba.max_cfg_param.max_wq = 9827 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 9828 phba->sli4_hba.max_cfg_param.max_cq = 9829 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 9830 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 9831 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 9832 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 9833 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 9834 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 9835 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 9836 phba->max_vports = phba->max_vpi; 9837 9838 /* Next decide on FPIN or Signal E2E CGN support 9839 * For congestion alarms and warnings valid combination are: 9840 * 1. FPIN alarms / FPIN warnings 9841 * 2. Signal alarms / Signal warnings 9842 * 3. FPIN alarms / Signal warnings 9843 * 4. Signal alarms / FPIN warnings 9844 * 9845 * Initialize the adapter frequency to 100 mSecs 9846 */ 9847 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9848 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9849 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9850 9851 if (lpfc_use_cgn_signal) { 9852 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 9853 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 9854 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 9855 } 9856 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 9857 /* MUST support both alarm and warning 9858 * because EDC does not support alarm alone. 9859 */ 9860 if (phba->cgn_reg_signal != 9861 EDC_CG_SIG_WARN_ONLY) { 9862 /* Must support both or none */ 9863 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9864 phba->cgn_reg_signal = 9865 EDC_CG_SIG_NOTSUPPORTED; 9866 } else { 9867 phba->cgn_reg_signal = 9868 EDC_CG_SIG_WARN_ALARM; 9869 phba->cgn_reg_fpin = 9870 LPFC_CGN_FPIN_NONE; 9871 } 9872 } 9873 } 9874 9875 /* Set the congestion initial signal and fpin values. */ 9876 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 9877 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 9878 9879 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 9880 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 9881 phba->cgn_reg_signal, phba->cgn_reg_fpin); 9882 9883 lpfc_map_topology(phba, rd_config); 9884 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9885 "2003 cfg params Extents? %d " 9886 "XRI(B:%d M:%d), " 9887 "VPI(B:%d M:%d) " 9888 "VFI(B:%d M:%d) " 9889 "RPI(B:%d M:%d) " 9890 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 9891 phba->sli4_hba.extents_in_use, 9892 phba->sli4_hba.max_cfg_param.xri_base, 9893 phba->sli4_hba.max_cfg_param.max_xri, 9894 phba->sli4_hba.max_cfg_param.vpi_base, 9895 phba->sli4_hba.max_cfg_param.max_vpi, 9896 phba->sli4_hba.max_cfg_param.vfi_base, 9897 phba->sli4_hba.max_cfg_param.max_vfi, 9898 phba->sli4_hba.max_cfg_param.rpi_base, 9899 phba->sli4_hba.max_cfg_param.max_rpi, 9900 phba->sli4_hba.max_cfg_param.max_fcfi, 9901 phba->sli4_hba.max_cfg_param.max_eq, 9902 phba->sli4_hba.max_cfg_param.max_cq, 9903 phba->sli4_hba.max_cfg_param.max_wq, 9904 phba->sli4_hba.max_cfg_param.max_rq, 9905 phba->lmt); 9906 9907 /* 9908 * Calculate queue resources based on how 9909 * many WQ/CQ/EQs are available. 9910 */ 9911 qmin = phba->sli4_hba.max_cfg_param.max_wq; 9912 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 9913 qmin = phba->sli4_hba.max_cfg_param.max_cq; 9914 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 9915 qmin = phba->sli4_hba.max_cfg_param.max_eq; 9916 /* 9917 * Whats left after this can go toward NVME / FCP. 9918 * The minus 4 accounts for ELS, NVME LS, MBOX 9919 * plus one extra. When configured for 9920 * NVMET, FCP io channel WQs are not created. 9921 */ 9922 qmin -= 4; 9923 9924 /* Check to see if there is enough for NVME */ 9925 if ((phba->cfg_irq_chann > qmin) || 9926 (phba->cfg_hdw_queue > qmin)) { 9927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9928 "2005 Reducing Queues - " 9929 "FW resource limitation: " 9930 "WQ %d CQ %d EQ %d: min %d: " 9931 "IRQ %d HDWQ %d\n", 9932 phba->sli4_hba.max_cfg_param.max_wq, 9933 phba->sli4_hba.max_cfg_param.max_cq, 9934 phba->sli4_hba.max_cfg_param.max_eq, 9935 qmin, phba->cfg_irq_chann, 9936 phba->cfg_hdw_queue); 9937 9938 if (phba->cfg_irq_chann > qmin) 9939 phba->cfg_irq_chann = qmin; 9940 if (phba->cfg_hdw_queue > qmin) 9941 phba->cfg_hdw_queue = qmin; 9942 } 9943 } 9944 9945 if (rc) 9946 goto read_cfg_out; 9947 9948 /* Update link speed if forced link speed is supported */ 9949 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9950 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9951 forced_link_speed = 9952 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 9953 if (forced_link_speed) { 9954 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 9955 9956 switch (forced_link_speed) { 9957 case LINK_SPEED_1G: 9958 phba->cfg_link_speed = 9959 LPFC_USER_LINK_SPEED_1G; 9960 break; 9961 case LINK_SPEED_2G: 9962 phba->cfg_link_speed = 9963 LPFC_USER_LINK_SPEED_2G; 9964 break; 9965 case LINK_SPEED_4G: 9966 phba->cfg_link_speed = 9967 LPFC_USER_LINK_SPEED_4G; 9968 break; 9969 case LINK_SPEED_8G: 9970 phba->cfg_link_speed = 9971 LPFC_USER_LINK_SPEED_8G; 9972 break; 9973 case LINK_SPEED_10G: 9974 phba->cfg_link_speed = 9975 LPFC_USER_LINK_SPEED_10G; 9976 break; 9977 case LINK_SPEED_16G: 9978 phba->cfg_link_speed = 9979 LPFC_USER_LINK_SPEED_16G; 9980 break; 9981 case LINK_SPEED_32G: 9982 phba->cfg_link_speed = 9983 LPFC_USER_LINK_SPEED_32G; 9984 break; 9985 case LINK_SPEED_64G: 9986 phba->cfg_link_speed = 9987 LPFC_USER_LINK_SPEED_64G; 9988 break; 9989 case 0xffff: 9990 phba->cfg_link_speed = 9991 LPFC_USER_LINK_SPEED_AUTO; 9992 break; 9993 default: 9994 lpfc_printf_log(phba, KERN_ERR, 9995 LOG_TRACE_EVENT, 9996 "0047 Unrecognized link " 9997 "speed : %d\n", 9998 forced_link_speed); 9999 phba->cfg_link_speed = 10000 LPFC_USER_LINK_SPEED_AUTO; 10001 } 10002 } 10003 } 10004 10005 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10006 length = phba->sli4_hba.max_cfg_param.max_xri - 10007 lpfc_sli4_get_els_iocb_cnt(phba); 10008 if (phba->cfg_hba_queue_depth > length) { 10009 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10010 "3361 HBA queue depth changed from %d to %d\n", 10011 phba->cfg_hba_queue_depth, length); 10012 phba->cfg_hba_queue_depth = length; 10013 } 10014 10015 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10016 LPFC_SLI_INTF_IF_TYPE_2) 10017 goto read_cfg_out; 10018 10019 /* get the pf# and vf# for SLI4 if_type 2 port */ 10020 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10021 sizeof(struct lpfc_sli4_cfg_mhdr)); 10022 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10023 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10024 length, LPFC_SLI4_MBX_EMBED); 10025 10026 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10027 shdr = (union lpfc_sli4_cfg_shdr *) 10028 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10029 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10030 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10031 if (rc2 || shdr_status || shdr_add_status) { 10032 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10033 "3026 Mailbox failed , mbxCmd x%x " 10034 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10035 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10036 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10037 goto read_cfg_out; 10038 } 10039 10040 /* search for fc_fcoe resrouce descriptor */ 10041 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10042 10043 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10044 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10045 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10046 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10047 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10048 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10049 goto read_cfg_out; 10050 10051 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10052 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10053 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10054 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10055 phba->sli4_hba.iov.pf_number = 10056 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10057 phba->sli4_hba.iov.vf_number = 10058 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10059 break; 10060 } 10061 } 10062 10063 if (i < LPFC_RSRC_DESC_MAX_NUM) 10064 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10065 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10066 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10067 phba->sli4_hba.iov.vf_number); 10068 else 10069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10070 "3028 GET_FUNCTION_CONFIG: failed to find " 10071 "Resource Descriptor:x%x\n", 10072 LPFC_RSRC_DESC_TYPE_FCFCOE); 10073 10074 read_cfg_out: 10075 mempool_free(pmb, phba->mbox_mem_pool); 10076 return rc; 10077 } 10078 10079 /** 10080 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10081 * @phba: pointer to lpfc hba data structure. 10082 * 10083 * This routine is invoked to setup the port-side endian order when 10084 * the port if_type is 0. This routine has no function for other 10085 * if_types. 10086 * 10087 * Return codes 10088 * 0 - successful 10089 * -ENOMEM - No available memory 10090 * -EIO - The mailbox failed to complete successfully. 10091 **/ 10092 static int 10093 lpfc_setup_endian_order(struct lpfc_hba *phba) 10094 { 10095 LPFC_MBOXQ_t *mboxq; 10096 uint32_t if_type, rc = 0; 10097 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10098 HOST_ENDIAN_HIGH_WORD1}; 10099 10100 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10101 switch (if_type) { 10102 case LPFC_SLI_INTF_IF_TYPE_0: 10103 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10104 GFP_KERNEL); 10105 if (!mboxq) { 10106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10107 "0492 Unable to allocate memory for " 10108 "issuing SLI_CONFIG_SPECIAL mailbox " 10109 "command\n"); 10110 return -ENOMEM; 10111 } 10112 10113 /* 10114 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10115 * two words to contain special data values and no other data. 10116 */ 10117 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10118 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10119 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10120 if (rc != MBX_SUCCESS) { 10121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10122 "0493 SLI_CONFIG_SPECIAL mailbox " 10123 "failed with status x%x\n", 10124 rc); 10125 rc = -EIO; 10126 } 10127 mempool_free(mboxq, phba->mbox_mem_pool); 10128 break; 10129 case LPFC_SLI_INTF_IF_TYPE_6: 10130 case LPFC_SLI_INTF_IF_TYPE_2: 10131 case LPFC_SLI_INTF_IF_TYPE_1: 10132 default: 10133 break; 10134 } 10135 return rc; 10136 } 10137 10138 /** 10139 * lpfc_sli4_queue_verify - Verify and update EQ counts 10140 * @phba: pointer to lpfc hba data structure. 10141 * 10142 * This routine is invoked to check the user settable queue counts for EQs. 10143 * After this routine is called the counts will be set to valid values that 10144 * adhere to the constraints of the system's interrupt vectors and the port's 10145 * queue resources. 10146 * 10147 * Return codes 10148 * 0 - successful 10149 * -ENOMEM - No available memory 10150 **/ 10151 static int 10152 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10153 { 10154 /* 10155 * Sanity check for configured queue parameters against the run-time 10156 * device parameters 10157 */ 10158 10159 if (phba->nvmet_support) { 10160 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10161 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10162 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10163 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10164 } 10165 10166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10167 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10168 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10169 phba->cfg_nvmet_mrq); 10170 10171 /* Get EQ depth from module parameter, fake the default for now */ 10172 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10173 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10174 10175 /* Get CQ depth from module parameter, fake the default for now */ 10176 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10177 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10178 return 0; 10179 } 10180 10181 static int 10182 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10183 { 10184 struct lpfc_queue *qdesc; 10185 u32 wqesize; 10186 int cpu; 10187 10188 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10189 /* Create Fast Path IO CQs */ 10190 if (phba->enab_exp_wqcq_pages) 10191 /* Increase the CQ size when WQEs contain an embedded cdb */ 10192 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10193 phba->sli4_hba.cq_esize, 10194 LPFC_CQE_EXP_COUNT, cpu); 10195 10196 else 10197 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10198 phba->sli4_hba.cq_esize, 10199 phba->sli4_hba.cq_ecount, cpu); 10200 if (!qdesc) { 10201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10202 "0499 Failed allocate fast-path IO CQ (%d)\n", 10203 idx); 10204 return 1; 10205 } 10206 qdesc->qe_valid = 1; 10207 qdesc->hdwq = idx; 10208 qdesc->chann = cpu; 10209 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10210 10211 /* Create Fast Path IO WQs */ 10212 if (phba->enab_exp_wqcq_pages) { 10213 /* Increase the WQ size when WQEs contain an embedded cdb */ 10214 wqesize = (phba->fcp_embed_io) ? 10215 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10216 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10217 wqesize, 10218 LPFC_WQE_EXP_COUNT, cpu); 10219 } else 10220 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10221 phba->sli4_hba.wq_esize, 10222 phba->sli4_hba.wq_ecount, cpu); 10223 10224 if (!qdesc) { 10225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10226 "0503 Failed allocate fast-path IO WQ (%d)\n", 10227 idx); 10228 return 1; 10229 } 10230 qdesc->hdwq = idx; 10231 qdesc->chann = cpu; 10232 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10233 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10234 return 0; 10235 } 10236 10237 /** 10238 * lpfc_sli4_queue_create - Create all the SLI4 queues 10239 * @phba: pointer to lpfc hba data structure. 10240 * 10241 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10242 * operation. For each SLI4 queue type, the parameters such as queue entry 10243 * count (queue depth) shall be taken from the module parameter. For now, 10244 * we just use some constant number as place holder. 10245 * 10246 * Return codes 10247 * 0 - successful 10248 * -ENOMEM - No availble memory 10249 * -EIO - The mailbox failed to complete successfully. 10250 **/ 10251 int 10252 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10253 { 10254 struct lpfc_queue *qdesc; 10255 int idx, cpu, eqcpu; 10256 struct lpfc_sli4_hdw_queue *qp; 10257 struct lpfc_vector_map_info *cpup; 10258 struct lpfc_vector_map_info *eqcpup; 10259 struct lpfc_eq_intr_info *eqi; 10260 10261 /* 10262 * Create HBA Record arrays. 10263 * Both NVME and FCP will share that same vectors / EQs 10264 */ 10265 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10266 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10267 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10268 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10269 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10270 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10271 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10272 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10273 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10274 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10275 10276 if (!phba->sli4_hba.hdwq) { 10277 phba->sli4_hba.hdwq = kcalloc( 10278 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10279 GFP_KERNEL); 10280 if (!phba->sli4_hba.hdwq) { 10281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10282 "6427 Failed allocate memory for " 10283 "fast-path Hardware Queue array\n"); 10284 goto out_error; 10285 } 10286 /* Prepare hardware queues to take IO buffers */ 10287 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10288 qp = &phba->sli4_hba.hdwq[idx]; 10289 spin_lock_init(&qp->io_buf_list_get_lock); 10290 spin_lock_init(&qp->io_buf_list_put_lock); 10291 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10292 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10293 qp->get_io_bufs = 0; 10294 qp->put_io_bufs = 0; 10295 qp->total_io_bufs = 0; 10296 spin_lock_init(&qp->abts_io_buf_list_lock); 10297 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10298 qp->abts_scsi_io_bufs = 0; 10299 qp->abts_nvme_io_bufs = 0; 10300 INIT_LIST_HEAD(&qp->sgl_list); 10301 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10302 spin_lock_init(&qp->hdwq_lock); 10303 } 10304 } 10305 10306 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10307 if (phba->nvmet_support) { 10308 phba->sli4_hba.nvmet_cqset = kcalloc( 10309 phba->cfg_nvmet_mrq, 10310 sizeof(struct lpfc_queue *), 10311 GFP_KERNEL); 10312 if (!phba->sli4_hba.nvmet_cqset) { 10313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10314 "3121 Fail allocate memory for " 10315 "fast-path CQ set array\n"); 10316 goto out_error; 10317 } 10318 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10319 phba->cfg_nvmet_mrq, 10320 sizeof(struct lpfc_queue *), 10321 GFP_KERNEL); 10322 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10324 "3122 Fail allocate memory for " 10325 "fast-path RQ set hdr array\n"); 10326 goto out_error; 10327 } 10328 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10329 phba->cfg_nvmet_mrq, 10330 sizeof(struct lpfc_queue *), 10331 GFP_KERNEL); 10332 if (!phba->sli4_hba.nvmet_mrq_data) { 10333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10334 "3124 Fail allocate memory for " 10335 "fast-path RQ set data array\n"); 10336 goto out_error; 10337 } 10338 } 10339 } 10340 10341 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10342 10343 /* Create HBA Event Queues (EQs) */ 10344 for_each_present_cpu(cpu) { 10345 /* We only want to create 1 EQ per vector, even though 10346 * multiple CPUs might be using that vector. so only 10347 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10348 */ 10349 cpup = &phba->sli4_hba.cpu_map[cpu]; 10350 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10351 continue; 10352 10353 /* Get a ptr to the Hardware Queue associated with this CPU */ 10354 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10355 10356 /* Allocate an EQ */ 10357 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10358 phba->sli4_hba.eq_esize, 10359 phba->sli4_hba.eq_ecount, cpu); 10360 if (!qdesc) { 10361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10362 "0497 Failed allocate EQ (%d)\n", 10363 cpup->hdwq); 10364 goto out_error; 10365 } 10366 qdesc->qe_valid = 1; 10367 qdesc->hdwq = cpup->hdwq; 10368 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10369 qdesc->last_cpu = qdesc->chann; 10370 10371 /* Save the allocated EQ in the Hardware Queue */ 10372 qp->hba_eq = qdesc; 10373 10374 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10375 list_add(&qdesc->cpu_list, &eqi->list); 10376 } 10377 10378 /* Now we need to populate the other Hardware Queues, that share 10379 * an IRQ vector, with the associated EQ ptr. 10380 */ 10381 for_each_present_cpu(cpu) { 10382 cpup = &phba->sli4_hba.cpu_map[cpu]; 10383 10384 /* Check for EQ already allocated in previous loop */ 10385 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10386 continue; 10387 10388 /* Check for multiple CPUs per hdwq */ 10389 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10390 if (qp->hba_eq) 10391 continue; 10392 10393 /* We need to share an EQ for this hdwq */ 10394 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10395 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10396 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10397 } 10398 10399 /* Allocate IO Path SLI4 CQ/WQs */ 10400 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10401 if (lpfc_alloc_io_wq_cq(phba, idx)) 10402 goto out_error; 10403 } 10404 10405 if (phba->nvmet_support) { 10406 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10407 cpu = lpfc_find_cpu_handle(phba, idx, 10408 LPFC_FIND_BY_HDWQ); 10409 qdesc = lpfc_sli4_queue_alloc(phba, 10410 LPFC_DEFAULT_PAGE_SIZE, 10411 phba->sli4_hba.cq_esize, 10412 phba->sli4_hba.cq_ecount, 10413 cpu); 10414 if (!qdesc) { 10415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10416 "3142 Failed allocate NVME " 10417 "CQ Set (%d)\n", idx); 10418 goto out_error; 10419 } 10420 qdesc->qe_valid = 1; 10421 qdesc->hdwq = idx; 10422 qdesc->chann = cpu; 10423 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10424 } 10425 } 10426 10427 /* 10428 * Create Slow Path Completion Queues (CQs) 10429 */ 10430 10431 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10432 /* Create slow-path Mailbox Command Complete Queue */ 10433 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10434 phba->sli4_hba.cq_esize, 10435 phba->sli4_hba.cq_ecount, cpu); 10436 if (!qdesc) { 10437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10438 "0500 Failed allocate slow-path mailbox CQ\n"); 10439 goto out_error; 10440 } 10441 qdesc->qe_valid = 1; 10442 phba->sli4_hba.mbx_cq = qdesc; 10443 10444 /* Create slow-path ELS Complete Queue */ 10445 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10446 phba->sli4_hba.cq_esize, 10447 phba->sli4_hba.cq_ecount, cpu); 10448 if (!qdesc) { 10449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10450 "0501 Failed allocate slow-path ELS CQ\n"); 10451 goto out_error; 10452 } 10453 qdesc->qe_valid = 1; 10454 qdesc->chann = cpu; 10455 phba->sli4_hba.els_cq = qdesc; 10456 10457 10458 /* 10459 * Create Slow Path Work Queues (WQs) 10460 */ 10461 10462 /* Create Mailbox Command Queue */ 10463 10464 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10465 phba->sli4_hba.mq_esize, 10466 phba->sli4_hba.mq_ecount, cpu); 10467 if (!qdesc) { 10468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10469 "0505 Failed allocate slow-path MQ\n"); 10470 goto out_error; 10471 } 10472 qdesc->chann = cpu; 10473 phba->sli4_hba.mbx_wq = qdesc; 10474 10475 /* 10476 * Create ELS Work Queues 10477 */ 10478 10479 /* Create slow-path ELS Work Queue */ 10480 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10481 phba->sli4_hba.wq_esize, 10482 phba->sli4_hba.wq_ecount, cpu); 10483 if (!qdesc) { 10484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10485 "0504 Failed allocate slow-path ELS WQ\n"); 10486 goto out_error; 10487 } 10488 qdesc->chann = cpu; 10489 phba->sli4_hba.els_wq = qdesc; 10490 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10491 10492 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10493 /* Create NVME LS Complete Queue */ 10494 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10495 phba->sli4_hba.cq_esize, 10496 phba->sli4_hba.cq_ecount, cpu); 10497 if (!qdesc) { 10498 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10499 "6079 Failed allocate NVME LS CQ\n"); 10500 goto out_error; 10501 } 10502 qdesc->chann = cpu; 10503 qdesc->qe_valid = 1; 10504 phba->sli4_hba.nvmels_cq = qdesc; 10505 10506 /* Create NVME LS Work Queue */ 10507 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10508 phba->sli4_hba.wq_esize, 10509 phba->sli4_hba.wq_ecount, cpu); 10510 if (!qdesc) { 10511 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10512 "6080 Failed allocate NVME LS WQ\n"); 10513 goto out_error; 10514 } 10515 qdesc->chann = cpu; 10516 phba->sli4_hba.nvmels_wq = qdesc; 10517 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10518 } 10519 10520 /* 10521 * Create Receive Queue (RQ) 10522 */ 10523 10524 /* Create Receive Queue for header */ 10525 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10526 phba->sli4_hba.rq_esize, 10527 phba->sli4_hba.rq_ecount, cpu); 10528 if (!qdesc) { 10529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10530 "0506 Failed allocate receive HRQ\n"); 10531 goto out_error; 10532 } 10533 phba->sli4_hba.hdr_rq = qdesc; 10534 10535 /* Create Receive Queue for data */ 10536 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10537 phba->sli4_hba.rq_esize, 10538 phba->sli4_hba.rq_ecount, cpu); 10539 if (!qdesc) { 10540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10541 "0507 Failed allocate receive DRQ\n"); 10542 goto out_error; 10543 } 10544 phba->sli4_hba.dat_rq = qdesc; 10545 10546 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10547 phba->nvmet_support) { 10548 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10549 cpu = lpfc_find_cpu_handle(phba, idx, 10550 LPFC_FIND_BY_HDWQ); 10551 /* Create NVMET Receive Queue for header */ 10552 qdesc = lpfc_sli4_queue_alloc(phba, 10553 LPFC_DEFAULT_PAGE_SIZE, 10554 phba->sli4_hba.rq_esize, 10555 LPFC_NVMET_RQE_DEF_COUNT, 10556 cpu); 10557 if (!qdesc) { 10558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10559 "3146 Failed allocate " 10560 "receive HRQ\n"); 10561 goto out_error; 10562 } 10563 qdesc->hdwq = idx; 10564 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10565 10566 /* Only needed for header of RQ pair */ 10567 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10568 GFP_KERNEL, 10569 cpu_to_node(cpu)); 10570 if (qdesc->rqbp == NULL) { 10571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10572 "6131 Failed allocate " 10573 "Header RQBP\n"); 10574 goto out_error; 10575 } 10576 10577 /* Put list in known state in case driver load fails. */ 10578 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10579 10580 /* Create NVMET Receive Queue for data */ 10581 qdesc = lpfc_sli4_queue_alloc(phba, 10582 LPFC_DEFAULT_PAGE_SIZE, 10583 phba->sli4_hba.rq_esize, 10584 LPFC_NVMET_RQE_DEF_COUNT, 10585 cpu); 10586 if (!qdesc) { 10587 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10588 "3156 Failed allocate " 10589 "receive DRQ\n"); 10590 goto out_error; 10591 } 10592 qdesc->hdwq = idx; 10593 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10594 } 10595 } 10596 10597 /* Clear NVME stats */ 10598 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10599 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10600 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10601 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10602 } 10603 } 10604 10605 /* Clear SCSI stats */ 10606 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10607 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10608 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10609 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10610 } 10611 } 10612 10613 return 0; 10614 10615 out_error: 10616 lpfc_sli4_queue_destroy(phba); 10617 return -ENOMEM; 10618 } 10619 10620 static inline void 10621 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10622 { 10623 if (*qp != NULL) { 10624 lpfc_sli4_queue_free(*qp); 10625 *qp = NULL; 10626 } 10627 } 10628 10629 static inline void 10630 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10631 { 10632 int idx; 10633 10634 if (*qs == NULL) 10635 return; 10636 10637 for (idx = 0; idx < max; idx++) 10638 __lpfc_sli4_release_queue(&(*qs)[idx]); 10639 10640 kfree(*qs); 10641 *qs = NULL; 10642 } 10643 10644 static inline void 10645 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10646 { 10647 struct lpfc_sli4_hdw_queue *hdwq; 10648 struct lpfc_queue *eq; 10649 uint32_t idx; 10650 10651 hdwq = phba->sli4_hba.hdwq; 10652 10653 /* Loop thru all Hardware Queues */ 10654 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10655 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10656 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10657 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10658 hdwq[idx].hba_eq = NULL; 10659 hdwq[idx].io_cq = NULL; 10660 hdwq[idx].io_wq = NULL; 10661 if (phba->cfg_xpsgl && !phba->nvmet_support) 10662 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10663 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10664 } 10665 /* Loop thru all IRQ vectors */ 10666 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10667 /* Free the EQ corresponding to the IRQ vector */ 10668 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10669 lpfc_sli4_queue_free(eq); 10670 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10671 } 10672 } 10673 10674 /** 10675 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10676 * @phba: pointer to lpfc hba data structure. 10677 * 10678 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10679 * operation. 10680 * 10681 * Return codes 10682 * 0 - successful 10683 * -ENOMEM - No available memory 10684 * -EIO - The mailbox failed to complete successfully. 10685 **/ 10686 void 10687 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10688 { 10689 /* 10690 * Set FREE_INIT before beginning to free the queues. 10691 * Wait until the users of queues to acknowledge to 10692 * release queues by clearing FREE_WAIT. 10693 */ 10694 spin_lock_irq(&phba->hbalock); 10695 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10696 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10697 spin_unlock_irq(&phba->hbalock); 10698 msleep(20); 10699 spin_lock_irq(&phba->hbalock); 10700 } 10701 spin_unlock_irq(&phba->hbalock); 10702 10703 lpfc_sli4_cleanup_poll_list(phba); 10704 10705 /* Release HBA eqs */ 10706 if (phba->sli4_hba.hdwq) 10707 lpfc_sli4_release_hdwq(phba); 10708 10709 if (phba->nvmet_support) { 10710 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10711 phba->cfg_nvmet_mrq); 10712 10713 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10714 phba->cfg_nvmet_mrq); 10715 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10716 phba->cfg_nvmet_mrq); 10717 } 10718 10719 /* Release mailbox command work queue */ 10720 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10721 10722 /* Release ELS work queue */ 10723 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10724 10725 /* Release ELS work queue */ 10726 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10727 10728 /* Release unsolicited receive queue */ 10729 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10730 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10731 10732 /* Release ELS complete queue */ 10733 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10734 10735 /* Release NVME LS complete queue */ 10736 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10737 10738 /* Release mailbox command complete queue */ 10739 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10740 10741 /* Everything on this list has been freed */ 10742 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10743 10744 /* Done with freeing the queues */ 10745 spin_lock_irq(&phba->hbalock); 10746 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10747 spin_unlock_irq(&phba->hbalock); 10748 } 10749 10750 int 10751 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10752 { 10753 struct lpfc_rqb *rqbp; 10754 struct lpfc_dmabuf *h_buf; 10755 struct rqb_dmabuf *rqb_buffer; 10756 10757 rqbp = rq->rqbp; 10758 while (!list_empty(&rqbp->rqb_buffer_list)) { 10759 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10760 struct lpfc_dmabuf, list); 10761 10762 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10763 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10764 rqbp->buffer_count--; 10765 } 10766 return 1; 10767 } 10768 10769 static int 10770 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10771 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10772 int qidx, uint32_t qtype) 10773 { 10774 struct lpfc_sli_ring *pring; 10775 int rc; 10776 10777 if (!eq || !cq || !wq) { 10778 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10779 "6085 Fast-path %s (%d) not allocated\n", 10780 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10781 return -ENOMEM; 10782 } 10783 10784 /* create the Cq first */ 10785 rc = lpfc_cq_create(phba, cq, eq, 10786 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10787 if (rc) { 10788 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10789 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10790 qidx, (uint32_t)rc); 10791 return rc; 10792 } 10793 10794 if (qtype != LPFC_MBOX) { 10795 /* Setup cq_map for fast lookup */ 10796 if (cq_map) 10797 *cq_map = cq->queue_id; 10798 10799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10800 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10801 qidx, cq->queue_id, qidx, eq->queue_id); 10802 10803 /* create the wq */ 10804 rc = lpfc_wq_create(phba, wq, cq, qtype); 10805 if (rc) { 10806 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10807 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 10808 qidx, (uint32_t)rc); 10809 /* no need to tear down cq - caller will do so */ 10810 return rc; 10811 } 10812 10813 /* Bind this CQ/WQ to the NVME ring */ 10814 pring = wq->pring; 10815 pring->sli.sli4.wqp = (void *)wq; 10816 cq->pring = pring; 10817 10818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10819 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 10820 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 10821 } else { 10822 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 10823 if (rc) { 10824 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10825 "0539 Failed setup of slow-path MQ: " 10826 "rc = 0x%x\n", rc); 10827 /* no need to tear down cq - caller will do so */ 10828 return rc; 10829 } 10830 10831 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10832 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 10833 phba->sli4_hba.mbx_wq->queue_id, 10834 phba->sli4_hba.mbx_cq->queue_id); 10835 } 10836 10837 return 0; 10838 } 10839 10840 /** 10841 * lpfc_setup_cq_lookup - Setup the CQ lookup table 10842 * @phba: pointer to lpfc hba data structure. 10843 * 10844 * This routine will populate the cq_lookup table by all 10845 * available CQ queue_id's. 10846 **/ 10847 static void 10848 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 10849 { 10850 struct lpfc_queue *eq, *childq; 10851 int qidx; 10852 10853 memset(phba->sli4_hba.cq_lookup, 0, 10854 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 10855 /* Loop thru all IRQ vectors */ 10856 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10857 /* Get the EQ corresponding to the IRQ vector */ 10858 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10859 if (!eq) 10860 continue; 10861 /* Loop through all CQs associated with that EQ */ 10862 list_for_each_entry(childq, &eq->child_list, list) { 10863 if (childq->queue_id > phba->sli4_hba.cq_max) 10864 continue; 10865 if (childq->subtype == LPFC_IO) 10866 phba->sli4_hba.cq_lookup[childq->queue_id] = 10867 childq; 10868 } 10869 } 10870 } 10871 10872 /** 10873 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 10874 * @phba: pointer to lpfc hba data structure. 10875 * 10876 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 10877 * operation. 10878 * 10879 * Return codes 10880 * 0 - successful 10881 * -ENOMEM - No available memory 10882 * -EIO - The mailbox failed to complete successfully. 10883 **/ 10884 int 10885 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 10886 { 10887 uint32_t shdr_status, shdr_add_status; 10888 union lpfc_sli4_cfg_shdr *shdr; 10889 struct lpfc_vector_map_info *cpup; 10890 struct lpfc_sli4_hdw_queue *qp; 10891 LPFC_MBOXQ_t *mboxq; 10892 int qidx, cpu; 10893 uint32_t length, usdelay; 10894 int rc = -ENOMEM; 10895 10896 /* Check for dual-ULP support */ 10897 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10898 if (!mboxq) { 10899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10900 "3249 Unable to allocate memory for " 10901 "QUERY_FW_CFG mailbox command\n"); 10902 return -ENOMEM; 10903 } 10904 length = (sizeof(struct lpfc_mbx_query_fw_config) - 10905 sizeof(struct lpfc_sli4_cfg_mhdr)); 10906 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10907 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 10908 length, LPFC_SLI4_MBX_EMBED); 10909 10910 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10911 10912 shdr = (union lpfc_sli4_cfg_shdr *) 10913 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10914 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10915 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10916 if (shdr_status || shdr_add_status || rc) { 10917 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10918 "3250 QUERY_FW_CFG mailbox failed with status " 10919 "x%x add_status x%x, mbx status x%x\n", 10920 shdr_status, shdr_add_status, rc); 10921 mempool_free(mboxq, phba->mbox_mem_pool); 10922 rc = -ENXIO; 10923 goto out_error; 10924 } 10925 10926 phba->sli4_hba.fw_func_mode = 10927 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 10928 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 10929 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 10930 phba->sli4_hba.physical_port = 10931 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 10932 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10933 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 10934 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 10935 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 10936 10937 mempool_free(mboxq, phba->mbox_mem_pool); 10938 10939 /* 10940 * Set up HBA Event Queues (EQs) 10941 */ 10942 qp = phba->sli4_hba.hdwq; 10943 10944 /* Set up HBA event queue */ 10945 if (!qp) { 10946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10947 "3147 Fast-path EQs not allocated\n"); 10948 rc = -ENOMEM; 10949 goto out_error; 10950 } 10951 10952 /* Loop thru all IRQ vectors */ 10953 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10954 /* Create HBA Event Queues (EQs) in order */ 10955 for_each_present_cpu(cpu) { 10956 cpup = &phba->sli4_hba.cpu_map[cpu]; 10957 10958 /* Look for the CPU thats using that vector with 10959 * LPFC_CPU_FIRST_IRQ set. 10960 */ 10961 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10962 continue; 10963 if (qidx != cpup->eq) 10964 continue; 10965 10966 /* Create an EQ for that vector */ 10967 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 10968 phba->cfg_fcp_imax); 10969 if (rc) { 10970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10971 "0523 Failed setup of fast-path" 10972 " EQ (%d), rc = 0x%x\n", 10973 cpup->eq, (uint32_t)rc); 10974 goto out_destroy; 10975 } 10976 10977 /* Save the EQ for that vector in the hba_eq_hdl */ 10978 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 10979 qp[cpup->hdwq].hba_eq; 10980 10981 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10982 "2584 HBA EQ setup: queue[%d]-id=%d\n", 10983 cpup->eq, 10984 qp[cpup->hdwq].hba_eq->queue_id); 10985 } 10986 } 10987 10988 /* Loop thru all Hardware Queues */ 10989 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 10990 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 10991 cpup = &phba->sli4_hba.cpu_map[cpu]; 10992 10993 /* Create the CQ/WQ corresponding to the Hardware Queue */ 10994 rc = lpfc_create_wq_cq(phba, 10995 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 10996 qp[qidx].io_cq, 10997 qp[qidx].io_wq, 10998 &phba->sli4_hba.hdwq[qidx].io_cq_map, 10999 qidx, 11000 LPFC_IO); 11001 if (rc) { 11002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11003 "0535 Failed to setup fastpath " 11004 "IO WQ/CQ (%d), rc = 0x%x\n", 11005 qidx, (uint32_t)rc); 11006 goto out_destroy; 11007 } 11008 } 11009 11010 /* 11011 * Set up Slow Path Complete Queues (CQs) 11012 */ 11013 11014 /* Set up slow-path MBOX CQ/MQ */ 11015 11016 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11017 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11018 "0528 %s not allocated\n", 11019 phba->sli4_hba.mbx_cq ? 11020 "Mailbox WQ" : "Mailbox CQ"); 11021 rc = -ENOMEM; 11022 goto out_destroy; 11023 } 11024 11025 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11026 phba->sli4_hba.mbx_cq, 11027 phba->sli4_hba.mbx_wq, 11028 NULL, 0, LPFC_MBOX); 11029 if (rc) { 11030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11031 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11032 (uint32_t)rc); 11033 goto out_destroy; 11034 } 11035 if (phba->nvmet_support) { 11036 if (!phba->sli4_hba.nvmet_cqset) { 11037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11038 "3165 Fast-path NVME CQ Set " 11039 "array not allocated\n"); 11040 rc = -ENOMEM; 11041 goto out_destroy; 11042 } 11043 if (phba->cfg_nvmet_mrq > 1) { 11044 rc = lpfc_cq_create_set(phba, 11045 phba->sli4_hba.nvmet_cqset, 11046 qp, 11047 LPFC_WCQ, LPFC_NVMET); 11048 if (rc) { 11049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11050 "3164 Failed setup of NVME CQ " 11051 "Set, rc = 0x%x\n", 11052 (uint32_t)rc); 11053 goto out_destroy; 11054 } 11055 } else { 11056 /* Set up NVMET Receive Complete Queue */ 11057 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11058 qp[0].hba_eq, 11059 LPFC_WCQ, LPFC_NVMET); 11060 if (rc) { 11061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11062 "6089 Failed setup NVMET CQ: " 11063 "rc = 0x%x\n", (uint32_t)rc); 11064 goto out_destroy; 11065 } 11066 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11067 11068 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11069 "6090 NVMET CQ setup: cq-id=%d, " 11070 "parent eq-id=%d\n", 11071 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11072 qp[0].hba_eq->queue_id); 11073 } 11074 } 11075 11076 /* Set up slow-path ELS WQ/CQ */ 11077 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11078 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11079 "0530 ELS %s not allocated\n", 11080 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11081 rc = -ENOMEM; 11082 goto out_destroy; 11083 } 11084 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11085 phba->sli4_hba.els_cq, 11086 phba->sli4_hba.els_wq, 11087 NULL, 0, LPFC_ELS); 11088 if (rc) { 11089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11090 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11091 (uint32_t)rc); 11092 goto out_destroy; 11093 } 11094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11095 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11096 phba->sli4_hba.els_wq->queue_id, 11097 phba->sli4_hba.els_cq->queue_id); 11098 11099 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11100 /* Set up NVME LS Complete Queue */ 11101 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11103 "6091 LS %s not allocated\n", 11104 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11105 rc = -ENOMEM; 11106 goto out_destroy; 11107 } 11108 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11109 phba->sli4_hba.nvmels_cq, 11110 phba->sli4_hba.nvmels_wq, 11111 NULL, 0, LPFC_NVME_LS); 11112 if (rc) { 11113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11114 "0526 Failed setup of NVVME LS WQ/CQ: " 11115 "rc = 0x%x\n", (uint32_t)rc); 11116 goto out_destroy; 11117 } 11118 11119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11120 "6096 ELS WQ setup: wq-id=%d, " 11121 "parent cq-id=%d\n", 11122 phba->sli4_hba.nvmels_wq->queue_id, 11123 phba->sli4_hba.nvmels_cq->queue_id); 11124 } 11125 11126 /* 11127 * Create NVMET Receive Queue (RQ) 11128 */ 11129 if (phba->nvmet_support) { 11130 if ((!phba->sli4_hba.nvmet_cqset) || 11131 (!phba->sli4_hba.nvmet_mrq_hdr) || 11132 (!phba->sli4_hba.nvmet_mrq_data)) { 11133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11134 "6130 MRQ CQ Queues not " 11135 "allocated\n"); 11136 rc = -ENOMEM; 11137 goto out_destroy; 11138 } 11139 if (phba->cfg_nvmet_mrq > 1) { 11140 rc = lpfc_mrq_create(phba, 11141 phba->sli4_hba.nvmet_mrq_hdr, 11142 phba->sli4_hba.nvmet_mrq_data, 11143 phba->sli4_hba.nvmet_cqset, 11144 LPFC_NVMET); 11145 if (rc) { 11146 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11147 "6098 Failed setup of NVMET " 11148 "MRQ: rc = 0x%x\n", 11149 (uint32_t)rc); 11150 goto out_destroy; 11151 } 11152 11153 } else { 11154 rc = lpfc_rq_create(phba, 11155 phba->sli4_hba.nvmet_mrq_hdr[0], 11156 phba->sli4_hba.nvmet_mrq_data[0], 11157 phba->sli4_hba.nvmet_cqset[0], 11158 LPFC_NVMET); 11159 if (rc) { 11160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11161 "6057 Failed setup of NVMET " 11162 "Receive Queue: rc = 0x%x\n", 11163 (uint32_t)rc); 11164 goto out_destroy; 11165 } 11166 11167 lpfc_printf_log( 11168 phba, KERN_INFO, LOG_INIT, 11169 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11170 "dat-rq-id=%d parent cq-id=%d\n", 11171 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11172 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11173 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11174 11175 } 11176 } 11177 11178 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11180 "0540 Receive Queue not allocated\n"); 11181 rc = -ENOMEM; 11182 goto out_destroy; 11183 } 11184 11185 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11186 phba->sli4_hba.els_cq, LPFC_USOL); 11187 if (rc) { 11188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11189 "0541 Failed setup of Receive Queue: " 11190 "rc = 0x%x\n", (uint32_t)rc); 11191 goto out_destroy; 11192 } 11193 11194 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11195 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11196 "parent cq-id=%d\n", 11197 phba->sli4_hba.hdr_rq->queue_id, 11198 phba->sli4_hba.dat_rq->queue_id, 11199 phba->sli4_hba.els_cq->queue_id); 11200 11201 if (phba->cfg_fcp_imax) 11202 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11203 else 11204 usdelay = 0; 11205 11206 for (qidx = 0; qidx < phba->cfg_irq_chann; 11207 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11208 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11209 usdelay); 11210 11211 if (phba->sli4_hba.cq_max) { 11212 kfree(phba->sli4_hba.cq_lookup); 11213 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11214 sizeof(struct lpfc_queue *), GFP_KERNEL); 11215 if (!phba->sli4_hba.cq_lookup) { 11216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11217 "0549 Failed setup of CQ Lookup table: " 11218 "size 0x%x\n", phba->sli4_hba.cq_max); 11219 rc = -ENOMEM; 11220 goto out_destroy; 11221 } 11222 lpfc_setup_cq_lookup(phba); 11223 } 11224 return 0; 11225 11226 out_destroy: 11227 lpfc_sli4_queue_unset(phba); 11228 out_error: 11229 return rc; 11230 } 11231 11232 /** 11233 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11234 * @phba: pointer to lpfc hba data structure. 11235 * 11236 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11237 * operation. 11238 * 11239 * Return codes 11240 * 0 - successful 11241 * -ENOMEM - No available memory 11242 * -EIO - The mailbox failed to complete successfully. 11243 **/ 11244 void 11245 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11246 { 11247 struct lpfc_sli4_hdw_queue *qp; 11248 struct lpfc_queue *eq; 11249 int qidx; 11250 11251 /* Unset mailbox command work queue */ 11252 if (phba->sli4_hba.mbx_wq) 11253 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11254 11255 /* Unset NVME LS work queue */ 11256 if (phba->sli4_hba.nvmels_wq) 11257 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11258 11259 /* Unset ELS work queue */ 11260 if (phba->sli4_hba.els_wq) 11261 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11262 11263 /* Unset unsolicited receive queue */ 11264 if (phba->sli4_hba.hdr_rq) 11265 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11266 phba->sli4_hba.dat_rq); 11267 11268 /* Unset mailbox command complete queue */ 11269 if (phba->sli4_hba.mbx_cq) 11270 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11271 11272 /* Unset ELS complete queue */ 11273 if (phba->sli4_hba.els_cq) 11274 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11275 11276 /* Unset NVME LS complete queue */ 11277 if (phba->sli4_hba.nvmels_cq) 11278 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11279 11280 if (phba->nvmet_support) { 11281 /* Unset NVMET MRQ queue */ 11282 if (phba->sli4_hba.nvmet_mrq_hdr) { 11283 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11284 lpfc_rq_destroy( 11285 phba, 11286 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11287 phba->sli4_hba.nvmet_mrq_data[qidx]); 11288 } 11289 11290 /* Unset NVMET CQ Set complete queue */ 11291 if (phba->sli4_hba.nvmet_cqset) { 11292 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11293 lpfc_cq_destroy( 11294 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11295 } 11296 } 11297 11298 /* Unset fast-path SLI4 queues */ 11299 if (phba->sli4_hba.hdwq) { 11300 /* Loop thru all Hardware Queues */ 11301 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11302 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11303 qp = &phba->sli4_hba.hdwq[qidx]; 11304 lpfc_wq_destroy(phba, qp->io_wq); 11305 lpfc_cq_destroy(phba, qp->io_cq); 11306 } 11307 /* Loop thru all IRQ vectors */ 11308 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11309 /* Destroy the EQ corresponding to the IRQ vector */ 11310 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11311 lpfc_eq_destroy(phba, eq); 11312 } 11313 } 11314 11315 kfree(phba->sli4_hba.cq_lookup); 11316 phba->sli4_hba.cq_lookup = NULL; 11317 phba->sli4_hba.cq_max = 0; 11318 } 11319 11320 /** 11321 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11322 * @phba: pointer to lpfc hba data structure. 11323 * 11324 * This routine is invoked to allocate and set up a pool of completion queue 11325 * events. The body of the completion queue event is a completion queue entry 11326 * CQE. For now, this pool is used for the interrupt service routine to queue 11327 * the following HBA completion queue events for the worker thread to process: 11328 * - Mailbox asynchronous events 11329 * - Receive queue completion unsolicited events 11330 * Later, this can be used for all the slow-path events. 11331 * 11332 * Return codes 11333 * 0 - successful 11334 * -ENOMEM - No available memory 11335 **/ 11336 static int 11337 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11338 { 11339 struct lpfc_cq_event *cq_event; 11340 int i; 11341 11342 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11343 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11344 if (!cq_event) 11345 goto out_pool_create_fail; 11346 list_add_tail(&cq_event->list, 11347 &phba->sli4_hba.sp_cqe_event_pool); 11348 } 11349 return 0; 11350 11351 out_pool_create_fail: 11352 lpfc_sli4_cq_event_pool_destroy(phba); 11353 return -ENOMEM; 11354 } 11355 11356 /** 11357 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11358 * @phba: pointer to lpfc hba data structure. 11359 * 11360 * This routine is invoked to free the pool of completion queue events at 11361 * driver unload time. Note that, it is the responsibility of the driver 11362 * cleanup routine to free all the outstanding completion-queue events 11363 * allocated from this pool back into the pool before invoking this routine 11364 * to destroy the pool. 11365 **/ 11366 static void 11367 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11368 { 11369 struct lpfc_cq_event *cq_event, *next_cq_event; 11370 11371 list_for_each_entry_safe(cq_event, next_cq_event, 11372 &phba->sli4_hba.sp_cqe_event_pool, list) { 11373 list_del(&cq_event->list); 11374 kfree(cq_event); 11375 } 11376 } 11377 11378 /** 11379 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11380 * @phba: pointer to lpfc hba data structure. 11381 * 11382 * This routine is the lock free version of the API invoked to allocate a 11383 * completion-queue event from the free pool. 11384 * 11385 * Return: Pointer to the newly allocated completion-queue event if successful 11386 * NULL otherwise. 11387 **/ 11388 struct lpfc_cq_event * 11389 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11390 { 11391 struct lpfc_cq_event *cq_event = NULL; 11392 11393 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11394 struct lpfc_cq_event, list); 11395 return cq_event; 11396 } 11397 11398 /** 11399 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11400 * @phba: pointer to lpfc hba data structure. 11401 * 11402 * This routine is the lock version of the API invoked to allocate a 11403 * completion-queue event from the free pool. 11404 * 11405 * Return: Pointer to the newly allocated completion-queue event if successful 11406 * NULL otherwise. 11407 **/ 11408 struct lpfc_cq_event * 11409 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11410 { 11411 struct lpfc_cq_event *cq_event; 11412 unsigned long iflags; 11413 11414 spin_lock_irqsave(&phba->hbalock, iflags); 11415 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11416 spin_unlock_irqrestore(&phba->hbalock, iflags); 11417 return cq_event; 11418 } 11419 11420 /** 11421 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11422 * @phba: pointer to lpfc hba data structure. 11423 * @cq_event: pointer to the completion queue event to be freed. 11424 * 11425 * This routine is the lock free version of the API invoked to release a 11426 * completion-queue event back into the free pool. 11427 **/ 11428 void 11429 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11430 struct lpfc_cq_event *cq_event) 11431 { 11432 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11433 } 11434 11435 /** 11436 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11437 * @phba: pointer to lpfc hba data structure. 11438 * @cq_event: pointer to the completion queue event to be freed. 11439 * 11440 * This routine is the lock version of the API invoked to release a 11441 * completion-queue event back into the free pool. 11442 **/ 11443 void 11444 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11445 struct lpfc_cq_event *cq_event) 11446 { 11447 unsigned long iflags; 11448 spin_lock_irqsave(&phba->hbalock, iflags); 11449 __lpfc_sli4_cq_event_release(phba, cq_event); 11450 spin_unlock_irqrestore(&phba->hbalock, iflags); 11451 } 11452 11453 /** 11454 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11455 * @phba: pointer to lpfc hba data structure. 11456 * 11457 * This routine is to free all the pending completion-queue events to the 11458 * back into the free pool for device reset. 11459 **/ 11460 static void 11461 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11462 { 11463 LIST_HEAD(cq_event_list); 11464 struct lpfc_cq_event *cq_event; 11465 unsigned long iflags; 11466 11467 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11468 11469 /* Pending ELS XRI abort events */ 11470 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11471 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11472 &cq_event_list); 11473 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11474 11475 /* Pending asynnc events */ 11476 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11477 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11478 &cq_event_list); 11479 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11480 11481 while (!list_empty(&cq_event_list)) { 11482 list_remove_head(&cq_event_list, cq_event, 11483 struct lpfc_cq_event, list); 11484 lpfc_sli4_cq_event_release(phba, cq_event); 11485 } 11486 } 11487 11488 /** 11489 * lpfc_pci_function_reset - Reset pci function. 11490 * @phba: pointer to lpfc hba data structure. 11491 * 11492 * This routine is invoked to request a PCI function reset. It will destroys 11493 * all resources assigned to the PCI function which originates this request. 11494 * 11495 * Return codes 11496 * 0 - successful 11497 * -ENOMEM - No available memory 11498 * -EIO - The mailbox failed to complete successfully. 11499 **/ 11500 int 11501 lpfc_pci_function_reset(struct lpfc_hba *phba) 11502 { 11503 LPFC_MBOXQ_t *mboxq; 11504 uint32_t rc = 0, if_type; 11505 uint32_t shdr_status, shdr_add_status; 11506 uint32_t rdy_chk; 11507 uint32_t port_reset = 0; 11508 union lpfc_sli4_cfg_shdr *shdr; 11509 struct lpfc_register reg_data; 11510 uint16_t devid; 11511 11512 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11513 switch (if_type) { 11514 case LPFC_SLI_INTF_IF_TYPE_0: 11515 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11516 GFP_KERNEL); 11517 if (!mboxq) { 11518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11519 "0494 Unable to allocate memory for " 11520 "issuing SLI_FUNCTION_RESET mailbox " 11521 "command\n"); 11522 return -ENOMEM; 11523 } 11524 11525 /* Setup PCI function reset mailbox-ioctl command */ 11526 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11527 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11528 LPFC_SLI4_MBX_EMBED); 11529 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11530 shdr = (union lpfc_sli4_cfg_shdr *) 11531 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11532 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11533 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11534 &shdr->response); 11535 mempool_free(mboxq, phba->mbox_mem_pool); 11536 if (shdr_status || shdr_add_status || rc) { 11537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11538 "0495 SLI_FUNCTION_RESET mailbox " 11539 "failed with status x%x add_status x%x," 11540 " mbx status x%x\n", 11541 shdr_status, shdr_add_status, rc); 11542 rc = -ENXIO; 11543 } 11544 break; 11545 case LPFC_SLI_INTF_IF_TYPE_2: 11546 case LPFC_SLI_INTF_IF_TYPE_6: 11547 wait: 11548 /* 11549 * Poll the Port Status Register and wait for RDY for 11550 * up to 30 seconds. If the port doesn't respond, treat 11551 * it as an error. 11552 */ 11553 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11554 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11555 STATUSregaddr, ®_data.word0)) { 11556 rc = -ENODEV; 11557 goto out; 11558 } 11559 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11560 break; 11561 msleep(20); 11562 } 11563 11564 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11565 phba->work_status[0] = readl( 11566 phba->sli4_hba.u.if_type2.ERR1regaddr); 11567 phba->work_status[1] = readl( 11568 phba->sli4_hba.u.if_type2.ERR2regaddr); 11569 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11570 "2890 Port not ready, port status reg " 11571 "0x%x error 1=0x%x, error 2=0x%x\n", 11572 reg_data.word0, 11573 phba->work_status[0], 11574 phba->work_status[1]); 11575 rc = -ENODEV; 11576 goto out; 11577 } 11578 11579 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11580 lpfc_pldv_detect = true; 11581 11582 if (!port_reset) { 11583 /* 11584 * Reset the port now 11585 */ 11586 reg_data.word0 = 0; 11587 bf_set(lpfc_sliport_ctrl_end, ®_data, 11588 LPFC_SLIPORT_LITTLE_ENDIAN); 11589 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11590 LPFC_SLIPORT_INIT_PORT); 11591 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11592 CTRLregaddr); 11593 /* flush */ 11594 pci_read_config_word(phba->pcidev, 11595 PCI_DEVICE_ID, &devid); 11596 11597 port_reset = 1; 11598 msleep(20); 11599 goto wait; 11600 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11601 rc = -ENODEV; 11602 goto out; 11603 } 11604 break; 11605 11606 case LPFC_SLI_INTF_IF_TYPE_1: 11607 default: 11608 break; 11609 } 11610 11611 out: 11612 /* Catch the not-ready port failure after a port reset. */ 11613 if (rc) { 11614 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11615 "3317 HBA not functional: IP Reset Failed " 11616 "try: echo fw_reset > board_mode\n"); 11617 rc = -ENODEV; 11618 } 11619 11620 return rc; 11621 } 11622 11623 /** 11624 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11625 * @phba: pointer to lpfc hba data structure. 11626 * 11627 * This routine is invoked to set up the PCI device memory space for device 11628 * with SLI-4 interface spec. 11629 * 11630 * Return codes 11631 * 0 - successful 11632 * other values - error 11633 **/ 11634 static int 11635 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11636 { 11637 struct pci_dev *pdev = phba->pcidev; 11638 unsigned long bar0map_len, bar1map_len, bar2map_len; 11639 int error; 11640 uint32_t if_type; 11641 11642 if (!pdev) 11643 return -ENODEV; 11644 11645 /* Set the device DMA mask size */ 11646 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11647 if (error) 11648 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11649 if (error) 11650 return error; 11651 11652 /* 11653 * The BARs and register set definitions and offset locations are 11654 * dependent on the if_type. 11655 */ 11656 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11657 &phba->sli4_hba.sli_intf.word0)) { 11658 return -ENODEV; 11659 } 11660 11661 /* There is no SLI3 failback for SLI4 devices. */ 11662 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11663 LPFC_SLI_INTF_VALID) { 11664 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11665 "2894 SLI_INTF reg contents invalid " 11666 "sli_intf reg 0x%x\n", 11667 phba->sli4_hba.sli_intf.word0); 11668 return -ENODEV; 11669 } 11670 11671 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11672 /* 11673 * Get the bus address of SLI4 device Bar regions and the 11674 * number of bytes required by each mapping. The mapping of the 11675 * particular PCI BARs regions is dependent on the type of 11676 * SLI4 device. 11677 */ 11678 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11679 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11680 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11681 11682 /* 11683 * Map SLI4 PCI Config Space Register base to a kernel virtual 11684 * addr 11685 */ 11686 phba->sli4_hba.conf_regs_memmap_p = 11687 ioremap(phba->pci_bar0_map, bar0map_len); 11688 if (!phba->sli4_hba.conf_regs_memmap_p) { 11689 dev_printk(KERN_ERR, &pdev->dev, 11690 "ioremap failed for SLI4 PCI config " 11691 "registers.\n"); 11692 return -ENODEV; 11693 } 11694 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11695 /* Set up BAR0 PCI config space register memory map */ 11696 lpfc_sli4_bar0_register_memmap(phba, if_type); 11697 } else { 11698 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11699 bar0map_len = pci_resource_len(pdev, 1); 11700 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11701 dev_printk(KERN_ERR, &pdev->dev, 11702 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11703 return -ENODEV; 11704 } 11705 phba->sli4_hba.conf_regs_memmap_p = 11706 ioremap(phba->pci_bar0_map, bar0map_len); 11707 if (!phba->sli4_hba.conf_regs_memmap_p) { 11708 dev_printk(KERN_ERR, &pdev->dev, 11709 "ioremap failed for SLI4 PCI config " 11710 "registers.\n"); 11711 return -ENODEV; 11712 } 11713 lpfc_sli4_bar0_register_memmap(phba, if_type); 11714 } 11715 11716 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11717 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11718 /* 11719 * Map SLI4 if type 0 HBA Control Register base to a 11720 * kernel virtual address and setup the registers. 11721 */ 11722 phba->pci_bar1_map = pci_resource_start(pdev, 11723 PCI_64BIT_BAR2); 11724 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11725 phba->sli4_hba.ctrl_regs_memmap_p = 11726 ioremap(phba->pci_bar1_map, 11727 bar1map_len); 11728 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11729 dev_err(&pdev->dev, 11730 "ioremap failed for SLI4 HBA " 11731 "control registers.\n"); 11732 error = -ENOMEM; 11733 goto out_iounmap_conf; 11734 } 11735 phba->pci_bar2_memmap_p = 11736 phba->sli4_hba.ctrl_regs_memmap_p; 11737 lpfc_sli4_bar1_register_memmap(phba, if_type); 11738 } else { 11739 error = -ENOMEM; 11740 goto out_iounmap_conf; 11741 } 11742 } 11743 11744 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11745 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11746 /* 11747 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11748 * virtual address and setup the registers. 11749 */ 11750 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11751 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11752 phba->sli4_hba.drbl_regs_memmap_p = 11753 ioremap(phba->pci_bar1_map, bar1map_len); 11754 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11755 dev_err(&pdev->dev, 11756 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11757 error = -ENOMEM; 11758 goto out_iounmap_conf; 11759 } 11760 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11761 lpfc_sli4_bar1_register_memmap(phba, if_type); 11762 } 11763 11764 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11765 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11766 /* 11767 * Map SLI4 if type 0 HBA Doorbell Register base to 11768 * a kernel virtual address and setup the registers. 11769 */ 11770 phba->pci_bar2_map = pci_resource_start(pdev, 11771 PCI_64BIT_BAR4); 11772 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11773 phba->sli4_hba.drbl_regs_memmap_p = 11774 ioremap(phba->pci_bar2_map, 11775 bar2map_len); 11776 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11777 dev_err(&pdev->dev, 11778 "ioremap failed for SLI4 HBA" 11779 " doorbell registers.\n"); 11780 error = -ENOMEM; 11781 goto out_iounmap_ctrl; 11782 } 11783 phba->pci_bar4_memmap_p = 11784 phba->sli4_hba.drbl_regs_memmap_p; 11785 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11786 if (error) 11787 goto out_iounmap_all; 11788 } else { 11789 error = -ENOMEM; 11790 goto out_iounmap_all; 11791 } 11792 } 11793 11794 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11795 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11796 /* 11797 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11798 * virtual address and setup the registers. 11799 */ 11800 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11801 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11802 phba->sli4_hba.dpp_regs_memmap_p = 11803 ioremap(phba->pci_bar2_map, bar2map_len); 11804 if (!phba->sli4_hba.dpp_regs_memmap_p) { 11805 dev_err(&pdev->dev, 11806 "ioremap failed for SLI4 HBA dpp registers.\n"); 11807 error = -ENOMEM; 11808 goto out_iounmap_ctrl; 11809 } 11810 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 11811 } 11812 11813 /* Set up the EQ/CQ register handeling functions now */ 11814 switch (if_type) { 11815 case LPFC_SLI_INTF_IF_TYPE_0: 11816 case LPFC_SLI_INTF_IF_TYPE_2: 11817 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 11818 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 11819 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 11820 break; 11821 case LPFC_SLI_INTF_IF_TYPE_6: 11822 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 11823 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 11824 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 11825 break; 11826 default: 11827 break; 11828 } 11829 11830 return 0; 11831 11832 out_iounmap_all: 11833 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11834 out_iounmap_ctrl: 11835 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11836 out_iounmap_conf: 11837 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11838 11839 return error; 11840 } 11841 11842 /** 11843 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 11844 * @phba: pointer to lpfc hba data structure. 11845 * 11846 * This routine is invoked to unset the PCI device memory space for device 11847 * with SLI-4 interface spec. 11848 **/ 11849 static void 11850 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 11851 { 11852 uint32_t if_type; 11853 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11854 11855 switch (if_type) { 11856 case LPFC_SLI_INTF_IF_TYPE_0: 11857 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11858 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11859 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11860 break; 11861 case LPFC_SLI_INTF_IF_TYPE_2: 11862 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11863 break; 11864 case LPFC_SLI_INTF_IF_TYPE_6: 11865 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11866 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11867 if (phba->sli4_hba.dpp_regs_memmap_p) 11868 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 11869 break; 11870 case LPFC_SLI_INTF_IF_TYPE_1: 11871 default: 11872 dev_printk(KERN_ERR, &phba->pcidev->dev, 11873 "FATAL - unsupported SLI4 interface type - %d\n", 11874 if_type); 11875 break; 11876 } 11877 } 11878 11879 /** 11880 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 11881 * @phba: pointer to lpfc hba data structure. 11882 * 11883 * This routine is invoked to enable the MSI-X interrupt vectors to device 11884 * with SLI-3 interface specs. 11885 * 11886 * Return codes 11887 * 0 - successful 11888 * other values - error 11889 **/ 11890 static int 11891 lpfc_sli_enable_msix(struct lpfc_hba *phba) 11892 { 11893 int rc; 11894 LPFC_MBOXQ_t *pmb; 11895 11896 /* Set up MSI-X multi-message vectors */ 11897 rc = pci_alloc_irq_vectors(phba->pcidev, 11898 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 11899 if (rc < 0) { 11900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11901 "0420 PCI enable MSI-X failed (%d)\n", rc); 11902 goto vec_fail_out; 11903 } 11904 11905 /* 11906 * Assign MSI-X vectors to interrupt handlers 11907 */ 11908 11909 /* vector-0 is associated to slow-path handler */ 11910 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 11911 &lpfc_sli_sp_intr_handler, 0, 11912 LPFC_SP_DRIVER_HANDLER_NAME, phba); 11913 if (rc) { 11914 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11915 "0421 MSI-X slow-path request_irq failed " 11916 "(%d)\n", rc); 11917 goto msi_fail_out; 11918 } 11919 11920 /* vector-1 is associated to fast-path handler */ 11921 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 11922 &lpfc_sli_fp_intr_handler, 0, 11923 LPFC_FP_DRIVER_HANDLER_NAME, phba); 11924 11925 if (rc) { 11926 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11927 "0429 MSI-X fast-path request_irq failed " 11928 "(%d)\n", rc); 11929 goto irq_fail_out; 11930 } 11931 11932 /* 11933 * Configure HBA MSI-X attention conditions to messages 11934 */ 11935 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11936 11937 if (!pmb) { 11938 rc = -ENOMEM; 11939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11940 "0474 Unable to allocate memory for issuing " 11941 "MBOX_CONFIG_MSI command\n"); 11942 goto mem_fail_out; 11943 } 11944 rc = lpfc_config_msi(phba, pmb); 11945 if (rc) 11946 goto mbx_fail_out; 11947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 11948 if (rc != MBX_SUCCESS) { 11949 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 11950 "0351 Config MSI mailbox command failed, " 11951 "mbxCmd x%x, mbxStatus x%x\n", 11952 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 11953 goto mbx_fail_out; 11954 } 11955 11956 /* Free memory allocated for mailbox command */ 11957 mempool_free(pmb, phba->mbox_mem_pool); 11958 return rc; 11959 11960 mbx_fail_out: 11961 /* Free memory allocated for mailbox command */ 11962 mempool_free(pmb, phba->mbox_mem_pool); 11963 11964 mem_fail_out: 11965 /* free the irq already requested */ 11966 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 11967 11968 irq_fail_out: 11969 /* free the irq already requested */ 11970 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 11971 11972 msi_fail_out: 11973 /* Unconfigure MSI-X capability structure */ 11974 pci_free_irq_vectors(phba->pcidev); 11975 11976 vec_fail_out: 11977 return rc; 11978 } 11979 11980 /** 11981 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 11982 * @phba: pointer to lpfc hba data structure. 11983 * 11984 * This routine is invoked to enable the MSI interrupt mode to device with 11985 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 11986 * enable the MSI vector. The device driver is responsible for calling the 11987 * request_irq() to register MSI vector with a interrupt the handler, which 11988 * is done in this function. 11989 * 11990 * Return codes 11991 * 0 - successful 11992 * other values - error 11993 */ 11994 static int 11995 lpfc_sli_enable_msi(struct lpfc_hba *phba) 11996 { 11997 int rc; 11998 11999 rc = pci_enable_msi(phba->pcidev); 12000 if (!rc) 12001 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12002 "0462 PCI enable MSI mode success.\n"); 12003 else { 12004 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12005 "0471 PCI enable MSI mode failed (%d)\n", rc); 12006 return rc; 12007 } 12008 12009 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12010 0, LPFC_DRIVER_NAME, phba); 12011 if (rc) { 12012 pci_disable_msi(phba->pcidev); 12013 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12014 "0478 MSI request_irq failed (%d)\n", rc); 12015 } 12016 return rc; 12017 } 12018 12019 /** 12020 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12021 * @phba: pointer to lpfc hba data structure. 12022 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12023 * 12024 * This routine is invoked to enable device interrupt and associate driver's 12025 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12026 * spec. Depends on the interrupt mode configured to the driver, the driver 12027 * will try to fallback from the configured interrupt mode to an interrupt 12028 * mode which is supported by the platform, kernel, and device in the order 12029 * of: 12030 * MSI-X -> MSI -> IRQ. 12031 * 12032 * Return codes 12033 * 0 - successful 12034 * other values - error 12035 **/ 12036 static uint32_t 12037 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12038 { 12039 uint32_t intr_mode = LPFC_INTR_ERROR; 12040 int retval; 12041 12042 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12043 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12044 if (retval) 12045 return intr_mode; 12046 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12047 12048 if (cfg_mode == 2) { 12049 /* Now, try to enable MSI-X interrupt mode */ 12050 retval = lpfc_sli_enable_msix(phba); 12051 if (!retval) { 12052 /* Indicate initialization to MSI-X mode */ 12053 phba->intr_type = MSIX; 12054 intr_mode = 2; 12055 } 12056 } 12057 12058 /* Fallback to MSI if MSI-X initialization failed */ 12059 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12060 retval = lpfc_sli_enable_msi(phba); 12061 if (!retval) { 12062 /* Indicate initialization to MSI mode */ 12063 phba->intr_type = MSI; 12064 intr_mode = 1; 12065 } 12066 } 12067 12068 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12069 if (phba->intr_type == NONE) { 12070 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12071 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12072 if (!retval) { 12073 /* Indicate initialization to INTx mode */ 12074 phba->intr_type = INTx; 12075 intr_mode = 0; 12076 } 12077 } 12078 return intr_mode; 12079 } 12080 12081 /** 12082 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12083 * @phba: pointer to lpfc hba data structure. 12084 * 12085 * This routine is invoked to disable device interrupt and disassociate the 12086 * driver's interrupt handler(s) from interrupt vector(s) to device with 12087 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12088 * release the interrupt vector(s) for the message signaled interrupt. 12089 **/ 12090 static void 12091 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12092 { 12093 int nr_irqs, i; 12094 12095 if (phba->intr_type == MSIX) 12096 nr_irqs = LPFC_MSIX_VECTORS; 12097 else 12098 nr_irqs = 1; 12099 12100 for (i = 0; i < nr_irqs; i++) 12101 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12102 pci_free_irq_vectors(phba->pcidev); 12103 12104 /* Reset interrupt management states */ 12105 phba->intr_type = NONE; 12106 phba->sli.slistat.sli_intr = 0; 12107 } 12108 12109 /** 12110 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12111 * @phba: pointer to lpfc hba data structure. 12112 * @id: EQ vector index or Hardware Queue index 12113 * @match: LPFC_FIND_BY_EQ = match by EQ 12114 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12115 * Return the CPU that matches the selection criteria 12116 */ 12117 static uint16_t 12118 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12119 { 12120 struct lpfc_vector_map_info *cpup; 12121 int cpu; 12122 12123 /* Loop through all CPUs */ 12124 for_each_present_cpu(cpu) { 12125 cpup = &phba->sli4_hba.cpu_map[cpu]; 12126 12127 /* If we are matching by EQ, there may be multiple CPUs using 12128 * using the same vector, so select the one with 12129 * LPFC_CPU_FIRST_IRQ set. 12130 */ 12131 if ((match == LPFC_FIND_BY_EQ) && 12132 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12133 (cpup->eq == id)) 12134 return cpu; 12135 12136 /* If matching by HDWQ, select the first CPU that matches */ 12137 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12138 return cpu; 12139 } 12140 return 0; 12141 } 12142 12143 #ifdef CONFIG_X86 12144 /** 12145 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12146 * @phba: pointer to lpfc hba data structure. 12147 * @cpu: CPU map index 12148 * @phys_id: CPU package physical id 12149 * @core_id: CPU core id 12150 */ 12151 static int 12152 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12153 uint16_t phys_id, uint16_t core_id) 12154 { 12155 struct lpfc_vector_map_info *cpup; 12156 int idx; 12157 12158 for_each_present_cpu(idx) { 12159 cpup = &phba->sli4_hba.cpu_map[idx]; 12160 /* Does the cpup match the one we are looking for */ 12161 if ((cpup->phys_id == phys_id) && 12162 (cpup->core_id == core_id) && 12163 (cpu != idx)) 12164 return 1; 12165 } 12166 return 0; 12167 } 12168 #endif 12169 12170 /* 12171 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12172 * @phba: pointer to lpfc hba data structure. 12173 * @eqidx: index for eq and irq vector 12174 * @flag: flags to set for vector_map structure 12175 * @cpu: cpu used to index vector_map structure 12176 * 12177 * The routine assigns eq info into vector_map structure 12178 */ 12179 static inline void 12180 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12181 unsigned int cpu) 12182 { 12183 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12184 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12185 12186 cpup->eq = eqidx; 12187 cpup->flag |= flag; 12188 12189 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12190 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12191 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12192 } 12193 12194 /** 12195 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12196 * @phba: pointer to lpfc hba data structure. 12197 * 12198 * The routine initializes the cpu_map array structure 12199 */ 12200 static void 12201 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12202 { 12203 struct lpfc_vector_map_info *cpup; 12204 struct lpfc_eq_intr_info *eqi; 12205 int cpu; 12206 12207 for_each_possible_cpu(cpu) { 12208 cpup = &phba->sli4_hba.cpu_map[cpu]; 12209 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12210 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12211 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12212 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12213 cpup->flag = 0; 12214 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12215 INIT_LIST_HEAD(&eqi->list); 12216 eqi->icnt = 0; 12217 } 12218 } 12219 12220 /** 12221 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12222 * @phba: pointer to lpfc hba data structure. 12223 * 12224 * The routine initializes the hba_eq_hdl array structure 12225 */ 12226 static void 12227 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12228 { 12229 struct lpfc_hba_eq_hdl *eqhdl; 12230 int i; 12231 12232 for (i = 0; i < phba->cfg_irq_chann; i++) { 12233 eqhdl = lpfc_get_eq_hdl(i); 12234 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 12235 eqhdl->phba = phba; 12236 } 12237 } 12238 12239 /** 12240 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12241 * @phba: pointer to lpfc hba data structure. 12242 * @vectors: number of msix vectors allocated. 12243 * 12244 * The routine will figure out the CPU affinity assignment for every 12245 * MSI-X vector allocated for the HBA. 12246 * In addition, the CPU to IO channel mapping will be calculated 12247 * and the phba->sli4_hba.cpu_map array will reflect this. 12248 */ 12249 static void 12250 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12251 { 12252 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12253 int max_phys_id, min_phys_id; 12254 int max_core_id, min_core_id; 12255 struct lpfc_vector_map_info *cpup; 12256 struct lpfc_vector_map_info *new_cpup; 12257 #ifdef CONFIG_X86 12258 struct cpuinfo_x86 *cpuinfo; 12259 #endif 12260 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12261 struct lpfc_hdwq_stat *c_stat; 12262 #endif 12263 12264 max_phys_id = 0; 12265 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12266 max_core_id = 0; 12267 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12268 12269 /* Update CPU map with physical id and core id of each CPU */ 12270 for_each_present_cpu(cpu) { 12271 cpup = &phba->sli4_hba.cpu_map[cpu]; 12272 #ifdef CONFIG_X86 12273 cpuinfo = &cpu_data(cpu); 12274 cpup->phys_id = cpuinfo->phys_proc_id; 12275 cpup->core_id = cpuinfo->cpu_core_id; 12276 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12277 cpup->flag |= LPFC_CPU_MAP_HYPER; 12278 #else 12279 /* No distinction between CPUs for other platforms */ 12280 cpup->phys_id = 0; 12281 cpup->core_id = cpu; 12282 #endif 12283 12284 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12285 "3328 CPU %d physid %d coreid %d flag x%x\n", 12286 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12287 12288 if (cpup->phys_id > max_phys_id) 12289 max_phys_id = cpup->phys_id; 12290 if (cpup->phys_id < min_phys_id) 12291 min_phys_id = cpup->phys_id; 12292 12293 if (cpup->core_id > max_core_id) 12294 max_core_id = cpup->core_id; 12295 if (cpup->core_id < min_core_id) 12296 min_core_id = cpup->core_id; 12297 } 12298 12299 /* After looking at each irq vector assigned to this pcidev, its 12300 * possible to see that not ALL CPUs have been accounted for. 12301 * Next we will set any unassigned (unaffinitized) cpu map 12302 * entries to a IRQ on the same phys_id. 12303 */ 12304 first_cpu = cpumask_first(cpu_present_mask); 12305 start_cpu = first_cpu; 12306 12307 for_each_present_cpu(cpu) { 12308 cpup = &phba->sli4_hba.cpu_map[cpu]; 12309 12310 /* Is this CPU entry unassigned */ 12311 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12312 /* Mark CPU as IRQ not assigned by the kernel */ 12313 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12314 12315 /* If so, find a new_cpup thats on the the SAME 12316 * phys_id as cpup. start_cpu will start where we 12317 * left off so all unassigned entries don't get assgined 12318 * the IRQ of the first entry. 12319 */ 12320 new_cpu = start_cpu; 12321 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12322 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12323 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12324 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12325 (new_cpup->phys_id == cpup->phys_id)) 12326 goto found_same; 12327 new_cpu = cpumask_next( 12328 new_cpu, cpu_present_mask); 12329 if (new_cpu == nr_cpumask_bits) 12330 new_cpu = first_cpu; 12331 } 12332 /* At this point, we leave the CPU as unassigned */ 12333 continue; 12334 found_same: 12335 /* We found a matching phys_id, so copy the IRQ info */ 12336 cpup->eq = new_cpup->eq; 12337 12338 /* Bump start_cpu to the next slot to minmize the 12339 * chance of having multiple unassigned CPU entries 12340 * selecting the same IRQ. 12341 */ 12342 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12343 if (start_cpu == nr_cpumask_bits) 12344 start_cpu = first_cpu; 12345 12346 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12347 "3337 Set Affinity: CPU %d " 12348 "eq %d from peer cpu %d same " 12349 "phys_id (%d)\n", 12350 cpu, cpup->eq, new_cpu, 12351 cpup->phys_id); 12352 } 12353 } 12354 12355 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12356 start_cpu = first_cpu; 12357 12358 for_each_present_cpu(cpu) { 12359 cpup = &phba->sli4_hba.cpu_map[cpu]; 12360 12361 /* Is this entry unassigned */ 12362 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12363 /* Mark it as IRQ not assigned by the kernel */ 12364 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12365 12366 /* If so, find a new_cpup thats on ANY phys_id 12367 * as the cpup. start_cpu will start where we 12368 * left off so all unassigned entries don't get 12369 * assigned the IRQ of the first entry. 12370 */ 12371 new_cpu = start_cpu; 12372 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12373 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12374 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12375 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12376 goto found_any; 12377 new_cpu = cpumask_next( 12378 new_cpu, cpu_present_mask); 12379 if (new_cpu == nr_cpumask_bits) 12380 new_cpu = first_cpu; 12381 } 12382 /* We should never leave an entry unassigned */ 12383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12384 "3339 Set Affinity: CPU %d " 12385 "eq %d UNASSIGNED\n", 12386 cpup->hdwq, cpup->eq); 12387 continue; 12388 found_any: 12389 /* We found an available entry, copy the IRQ info */ 12390 cpup->eq = new_cpup->eq; 12391 12392 /* Bump start_cpu to the next slot to minmize the 12393 * chance of having multiple unassigned CPU entries 12394 * selecting the same IRQ. 12395 */ 12396 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12397 if (start_cpu == nr_cpumask_bits) 12398 start_cpu = first_cpu; 12399 12400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12401 "3338 Set Affinity: CPU %d " 12402 "eq %d from peer cpu %d (%d/%d)\n", 12403 cpu, cpup->eq, new_cpu, 12404 new_cpup->phys_id, new_cpup->core_id); 12405 } 12406 } 12407 12408 /* Assign hdwq indices that are unique across all cpus in the map 12409 * that are also FIRST_CPUs. 12410 */ 12411 idx = 0; 12412 for_each_present_cpu(cpu) { 12413 cpup = &phba->sli4_hba.cpu_map[cpu]; 12414 12415 /* Only FIRST IRQs get a hdwq index assignment. */ 12416 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12417 continue; 12418 12419 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12420 cpup->hdwq = idx; 12421 idx++; 12422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12423 "3333 Set Affinity: CPU %d (phys %d core %d): " 12424 "hdwq %d eq %d flg x%x\n", 12425 cpu, cpup->phys_id, cpup->core_id, 12426 cpup->hdwq, cpup->eq, cpup->flag); 12427 } 12428 /* Associate a hdwq with each cpu_map entry 12429 * This will be 1 to 1 - hdwq to cpu, unless there are less 12430 * hardware queues then CPUs. For that case we will just round-robin 12431 * the available hardware queues as they get assigned to CPUs. 12432 * The next_idx is the idx from the FIRST_CPU loop above to account 12433 * for irq_chann < hdwq. The idx is used for round-robin assignments 12434 * and needs to start at 0. 12435 */ 12436 next_idx = idx; 12437 start_cpu = 0; 12438 idx = 0; 12439 for_each_present_cpu(cpu) { 12440 cpup = &phba->sli4_hba.cpu_map[cpu]; 12441 12442 /* FIRST cpus are already mapped. */ 12443 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12444 continue; 12445 12446 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12447 * of the unassigned cpus to the next idx so that all 12448 * hdw queues are fully utilized. 12449 */ 12450 if (next_idx < phba->cfg_hdw_queue) { 12451 cpup->hdwq = next_idx; 12452 next_idx++; 12453 continue; 12454 } 12455 12456 /* Not a First CPU and all hdw_queues are used. Reuse a 12457 * Hardware Queue for another CPU, so be smart about it 12458 * and pick one that has its IRQ/EQ mapped to the same phys_id 12459 * (CPU package) and core_id. 12460 */ 12461 new_cpu = start_cpu; 12462 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12463 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12464 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12465 new_cpup->phys_id == cpup->phys_id && 12466 new_cpup->core_id == cpup->core_id) { 12467 goto found_hdwq; 12468 } 12469 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12470 if (new_cpu == nr_cpumask_bits) 12471 new_cpu = first_cpu; 12472 } 12473 12474 /* If we can't match both phys_id and core_id, 12475 * settle for just a phys_id match. 12476 */ 12477 new_cpu = start_cpu; 12478 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12479 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12480 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12481 new_cpup->phys_id == cpup->phys_id) 12482 goto found_hdwq; 12483 12484 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12485 if (new_cpu == nr_cpumask_bits) 12486 new_cpu = first_cpu; 12487 } 12488 12489 /* Otherwise just round robin on cfg_hdw_queue */ 12490 cpup->hdwq = idx % phba->cfg_hdw_queue; 12491 idx++; 12492 goto logit; 12493 found_hdwq: 12494 /* We found an available entry, copy the IRQ info */ 12495 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12496 if (start_cpu == nr_cpumask_bits) 12497 start_cpu = first_cpu; 12498 cpup->hdwq = new_cpup->hdwq; 12499 logit: 12500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12501 "3335 Set Affinity: CPU %d (phys %d core %d): " 12502 "hdwq %d eq %d flg x%x\n", 12503 cpu, cpup->phys_id, cpup->core_id, 12504 cpup->hdwq, cpup->eq, cpup->flag); 12505 } 12506 12507 /* 12508 * Initialize the cpu_map slots for not-present cpus in case 12509 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12510 */ 12511 idx = 0; 12512 for_each_possible_cpu(cpu) { 12513 cpup = &phba->sli4_hba.cpu_map[cpu]; 12514 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12515 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12516 c_stat->hdwq_no = cpup->hdwq; 12517 #endif 12518 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12519 continue; 12520 12521 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12522 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12523 c_stat->hdwq_no = cpup->hdwq; 12524 #endif 12525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12526 "3340 Set Affinity: not present " 12527 "CPU %d hdwq %d\n", 12528 cpu, cpup->hdwq); 12529 } 12530 12531 /* The cpu_map array will be used later during initialization 12532 * when EQ / CQ / WQs are allocated and configured. 12533 */ 12534 return; 12535 } 12536 12537 /** 12538 * lpfc_cpuhp_get_eq 12539 * 12540 * @phba: pointer to lpfc hba data structure. 12541 * @cpu: cpu going offline 12542 * @eqlist: eq list to append to 12543 */ 12544 static int 12545 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12546 struct list_head *eqlist) 12547 { 12548 const struct cpumask *maskp; 12549 struct lpfc_queue *eq; 12550 struct cpumask *tmp; 12551 u16 idx; 12552 12553 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12554 if (!tmp) 12555 return -ENOMEM; 12556 12557 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12558 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12559 if (!maskp) 12560 continue; 12561 /* 12562 * if irq is not affinitized to the cpu going 12563 * then we don't need to poll the eq attached 12564 * to it. 12565 */ 12566 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12567 continue; 12568 /* get the cpus that are online and are affini- 12569 * tized to this irq vector. If the count is 12570 * more than 1 then cpuhp is not going to shut- 12571 * down this vector. Since this cpu has not 12572 * gone offline yet, we need >1. 12573 */ 12574 cpumask_and(tmp, maskp, cpu_online_mask); 12575 if (cpumask_weight(tmp) > 1) 12576 continue; 12577 12578 /* Now that we have an irq to shutdown, get the eq 12579 * mapped to this irq. Note: multiple hdwq's in 12580 * the software can share an eq, but eventually 12581 * only eq will be mapped to this vector 12582 */ 12583 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12584 list_add(&eq->_poll_list, eqlist); 12585 } 12586 kfree(tmp); 12587 return 0; 12588 } 12589 12590 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12591 { 12592 if (phba->sli_rev != LPFC_SLI_REV4) 12593 return; 12594 12595 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12596 &phba->cpuhp); 12597 /* 12598 * unregistering the instance doesn't stop the polling 12599 * timer. Wait for the poll timer to retire. 12600 */ 12601 synchronize_rcu(); 12602 del_timer_sync(&phba->cpuhp_poll_timer); 12603 } 12604 12605 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12606 { 12607 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 12608 return; 12609 12610 __lpfc_cpuhp_remove(phba); 12611 } 12612 12613 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12614 { 12615 if (phba->sli_rev != LPFC_SLI_REV4) 12616 return; 12617 12618 rcu_read_lock(); 12619 12620 if (!list_empty(&phba->poll_list)) 12621 mod_timer(&phba->cpuhp_poll_timer, 12622 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12623 12624 rcu_read_unlock(); 12625 12626 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12627 &phba->cpuhp); 12628 } 12629 12630 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12631 { 12632 if (phba->pport->load_flag & FC_UNLOADING) { 12633 *retval = -EAGAIN; 12634 return true; 12635 } 12636 12637 if (phba->sli_rev != LPFC_SLI_REV4) { 12638 *retval = 0; 12639 return true; 12640 } 12641 12642 /* proceed with the hotplug */ 12643 return false; 12644 } 12645 12646 /** 12647 * lpfc_irq_set_aff - set IRQ affinity 12648 * @eqhdl: EQ handle 12649 * @cpu: cpu to set affinity 12650 * 12651 **/ 12652 static inline void 12653 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12654 { 12655 cpumask_clear(&eqhdl->aff_mask); 12656 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12657 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12658 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 12659 } 12660 12661 /** 12662 * lpfc_irq_clear_aff - clear IRQ affinity 12663 * @eqhdl: EQ handle 12664 * 12665 **/ 12666 static inline void 12667 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12668 { 12669 cpumask_clear(&eqhdl->aff_mask); 12670 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12671 } 12672 12673 /** 12674 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12675 * @phba: pointer to HBA context object. 12676 * @cpu: cpu going offline/online 12677 * @offline: true, cpu is going offline. false, cpu is coming online. 12678 * 12679 * If cpu is going offline, we'll try our best effort to find the next 12680 * online cpu on the phba's original_mask and migrate all offlining IRQ 12681 * affinities. 12682 * 12683 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12684 * 12685 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12686 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12687 * 12688 **/ 12689 static void 12690 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12691 { 12692 struct lpfc_vector_map_info *cpup; 12693 struct cpumask *aff_mask; 12694 unsigned int cpu_select, cpu_next, idx; 12695 const struct cpumask *orig_mask; 12696 12697 if (phba->irq_chann_mode == NORMAL_MODE) 12698 return; 12699 12700 orig_mask = &phba->sli4_hba.irq_aff_mask; 12701 12702 if (!cpumask_test_cpu(cpu, orig_mask)) 12703 return; 12704 12705 cpup = &phba->sli4_hba.cpu_map[cpu]; 12706 12707 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12708 return; 12709 12710 if (offline) { 12711 /* Find next online CPU on original mask */ 12712 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12713 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12714 12715 /* Found a valid CPU */ 12716 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12717 /* Go through each eqhdl and ensure offlining 12718 * cpu aff_mask is migrated 12719 */ 12720 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12721 aff_mask = lpfc_get_aff_mask(idx); 12722 12723 /* Migrate affinity */ 12724 if (cpumask_test_cpu(cpu, aff_mask)) 12725 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12726 cpu_select); 12727 } 12728 } else { 12729 /* Rely on irqbalance if no online CPUs left on NUMA */ 12730 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12731 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12732 } 12733 } else { 12734 /* Migrate affinity back to this CPU */ 12735 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12736 } 12737 } 12738 12739 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12740 { 12741 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12742 struct lpfc_queue *eq, *next; 12743 LIST_HEAD(eqlist); 12744 int retval; 12745 12746 if (!phba) { 12747 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12748 return 0; 12749 } 12750 12751 if (__lpfc_cpuhp_checks(phba, &retval)) 12752 return retval; 12753 12754 lpfc_irq_rebalance(phba, cpu, true); 12755 12756 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12757 if (retval) 12758 return retval; 12759 12760 /* start polling on these eq's */ 12761 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12762 list_del_init(&eq->_poll_list); 12763 lpfc_sli4_start_polling(eq); 12764 } 12765 12766 return 0; 12767 } 12768 12769 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12770 { 12771 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12772 struct lpfc_queue *eq, *next; 12773 unsigned int n; 12774 int retval; 12775 12776 if (!phba) { 12777 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12778 return 0; 12779 } 12780 12781 if (__lpfc_cpuhp_checks(phba, &retval)) 12782 return retval; 12783 12784 lpfc_irq_rebalance(phba, cpu, false); 12785 12786 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12787 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12788 if (n == cpu) 12789 lpfc_sli4_stop_polling(eq); 12790 } 12791 12792 return 0; 12793 } 12794 12795 /** 12796 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12797 * @phba: pointer to lpfc hba data structure. 12798 * 12799 * This routine is invoked to enable the MSI-X interrupt vectors to device 12800 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12801 * to cpus on the system. 12802 * 12803 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12804 * the number of cpus on the same numa node as this adapter. The vectors are 12805 * allocated without requesting OS affinity mapping. A vector will be 12806 * allocated and assigned to each online and offline cpu. If the cpu is 12807 * online, then affinity will be set to that cpu. If the cpu is offline, then 12808 * affinity will be set to the nearest peer cpu within the numa node that is 12809 * online. If there are no online cpus within the numa node, affinity is not 12810 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12811 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12812 * configured. 12813 * 12814 * If numa mode is not enabled and there is more than 1 vector allocated, then 12815 * the driver relies on the managed irq interface where the OS assigns vector to 12816 * cpu affinity. The driver will then use that affinity mapping to setup its 12817 * cpu mapping table. 12818 * 12819 * Return codes 12820 * 0 - successful 12821 * other values - error 12822 **/ 12823 static int 12824 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 12825 { 12826 int vectors, rc, index; 12827 char *name; 12828 const struct cpumask *aff_mask = NULL; 12829 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 12830 struct lpfc_vector_map_info *cpup; 12831 struct lpfc_hba_eq_hdl *eqhdl; 12832 const struct cpumask *maskp; 12833 unsigned int flags = PCI_IRQ_MSIX; 12834 12835 /* Set up MSI-X multi-message vectors */ 12836 vectors = phba->cfg_irq_chann; 12837 12838 if (phba->irq_chann_mode != NORMAL_MODE) 12839 aff_mask = &phba->sli4_hba.irq_aff_mask; 12840 12841 if (aff_mask) { 12842 cpu_cnt = cpumask_weight(aff_mask); 12843 vectors = min(phba->cfg_irq_chann, cpu_cnt); 12844 12845 /* cpu: iterates over aff_mask including offline or online 12846 * cpu_select: iterates over online aff_mask to set affinity 12847 */ 12848 cpu = cpumask_first(aff_mask); 12849 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12850 } else { 12851 flags |= PCI_IRQ_AFFINITY; 12852 } 12853 12854 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 12855 if (rc < 0) { 12856 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12857 "0484 PCI enable MSI-X failed (%d)\n", rc); 12858 goto vec_fail_out; 12859 } 12860 vectors = rc; 12861 12862 /* Assign MSI-X vectors to interrupt handlers */ 12863 for (index = 0; index < vectors; index++) { 12864 eqhdl = lpfc_get_eq_hdl(index); 12865 name = eqhdl->handler_name; 12866 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 12867 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 12868 LPFC_DRIVER_HANDLER_NAME"%d", index); 12869 12870 eqhdl->idx = index; 12871 rc = request_irq(pci_irq_vector(phba->pcidev, index), 12872 &lpfc_sli4_hba_intr_handler, 0, 12873 name, eqhdl); 12874 if (rc) { 12875 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12876 "0486 MSI-X fast-path (%d) " 12877 "request_irq failed (%d)\n", index, rc); 12878 goto cfg_fail_out; 12879 } 12880 12881 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 12882 12883 if (aff_mask) { 12884 /* If found a neighboring online cpu, set affinity */ 12885 if (cpu_select < nr_cpu_ids) 12886 lpfc_irq_set_aff(eqhdl, cpu_select); 12887 12888 /* Assign EQ to cpu_map */ 12889 lpfc_assign_eq_map_info(phba, index, 12890 LPFC_CPU_FIRST_IRQ, 12891 cpu); 12892 12893 /* Iterate to next offline or online cpu in aff_mask */ 12894 cpu = cpumask_next(cpu, aff_mask); 12895 12896 /* Find next online cpu in aff_mask to set affinity */ 12897 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12898 } else if (vectors == 1) { 12899 cpu = cpumask_first(cpu_present_mask); 12900 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 12901 cpu); 12902 } else { 12903 maskp = pci_irq_get_affinity(phba->pcidev, index); 12904 12905 /* Loop through all CPUs associated with vector index */ 12906 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 12907 cpup = &phba->sli4_hba.cpu_map[cpu]; 12908 12909 /* If this is the first CPU thats assigned to 12910 * this vector, set LPFC_CPU_FIRST_IRQ. 12911 * 12912 * With certain platforms its possible that irq 12913 * vectors are affinitized to all the cpu's. 12914 * This can result in each cpu_map.eq to be set 12915 * to the last vector, resulting in overwrite 12916 * of all the previous cpu_map.eq. Ensure that 12917 * each vector receives a place in cpu_map. 12918 * Later call to lpfc_cpu_affinity_check will 12919 * ensure we are nicely balanced out. 12920 */ 12921 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 12922 continue; 12923 lpfc_assign_eq_map_info(phba, index, 12924 LPFC_CPU_FIRST_IRQ, 12925 cpu); 12926 break; 12927 } 12928 } 12929 } 12930 12931 if (vectors != phba->cfg_irq_chann) { 12932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12933 "3238 Reducing IO channels to match number of " 12934 "MSI-X vectors, requested %d got %d\n", 12935 phba->cfg_irq_chann, vectors); 12936 if (phba->cfg_irq_chann > vectors) 12937 phba->cfg_irq_chann = vectors; 12938 } 12939 12940 return rc; 12941 12942 cfg_fail_out: 12943 /* free the irq already requested */ 12944 for (--index; index >= 0; index--) { 12945 eqhdl = lpfc_get_eq_hdl(index); 12946 lpfc_irq_clear_aff(eqhdl); 12947 irq_set_affinity_hint(eqhdl->irq, NULL); 12948 free_irq(eqhdl->irq, eqhdl); 12949 } 12950 12951 /* Unconfigure MSI-X capability structure */ 12952 pci_free_irq_vectors(phba->pcidev); 12953 12954 vec_fail_out: 12955 return rc; 12956 } 12957 12958 /** 12959 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 12960 * @phba: pointer to lpfc hba data structure. 12961 * 12962 * This routine is invoked to enable the MSI interrupt mode to device with 12963 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 12964 * called to enable the MSI vector. The device driver is responsible for 12965 * calling the request_irq() to register MSI vector with a interrupt the 12966 * handler, which is done in this function. 12967 * 12968 * Return codes 12969 * 0 - successful 12970 * other values - error 12971 **/ 12972 static int 12973 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 12974 { 12975 int rc, index; 12976 unsigned int cpu; 12977 struct lpfc_hba_eq_hdl *eqhdl; 12978 12979 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 12980 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 12981 if (rc > 0) 12982 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12983 "0487 PCI enable MSI mode success.\n"); 12984 else { 12985 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12986 "0488 PCI enable MSI mode failed (%d)\n", rc); 12987 return rc ? rc : -1; 12988 } 12989 12990 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 12991 0, LPFC_DRIVER_NAME, phba); 12992 if (rc) { 12993 pci_free_irq_vectors(phba->pcidev); 12994 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12995 "0490 MSI request_irq failed (%d)\n", rc); 12996 return rc; 12997 } 12998 12999 eqhdl = lpfc_get_eq_hdl(0); 13000 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13001 13002 cpu = cpumask_first(cpu_present_mask); 13003 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13004 13005 for (index = 0; index < phba->cfg_irq_chann; index++) { 13006 eqhdl = lpfc_get_eq_hdl(index); 13007 eqhdl->idx = index; 13008 } 13009 13010 return 0; 13011 } 13012 13013 /** 13014 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13015 * @phba: pointer to lpfc hba data structure. 13016 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13017 * 13018 * This routine is invoked to enable device interrupt and associate driver's 13019 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13020 * interface spec. Depends on the interrupt mode configured to the driver, 13021 * the driver will try to fallback from the configured interrupt mode to an 13022 * interrupt mode which is supported by the platform, kernel, and device in 13023 * the order of: 13024 * MSI-X -> MSI -> IRQ. 13025 * 13026 * Return codes 13027 * 0 - successful 13028 * other values - error 13029 **/ 13030 static uint32_t 13031 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13032 { 13033 uint32_t intr_mode = LPFC_INTR_ERROR; 13034 int retval, idx; 13035 13036 if (cfg_mode == 2) { 13037 /* Preparation before conf_msi mbox cmd */ 13038 retval = 0; 13039 if (!retval) { 13040 /* Now, try to enable MSI-X interrupt mode */ 13041 retval = lpfc_sli4_enable_msix(phba); 13042 if (!retval) { 13043 /* Indicate initialization to MSI-X mode */ 13044 phba->intr_type = MSIX; 13045 intr_mode = 2; 13046 } 13047 } 13048 } 13049 13050 /* Fallback to MSI if MSI-X initialization failed */ 13051 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13052 retval = lpfc_sli4_enable_msi(phba); 13053 if (!retval) { 13054 /* Indicate initialization to MSI mode */ 13055 phba->intr_type = MSI; 13056 intr_mode = 1; 13057 } 13058 } 13059 13060 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13061 if (phba->intr_type == NONE) { 13062 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13063 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13064 if (!retval) { 13065 struct lpfc_hba_eq_hdl *eqhdl; 13066 unsigned int cpu; 13067 13068 /* Indicate initialization to INTx mode */ 13069 phba->intr_type = INTx; 13070 intr_mode = 0; 13071 13072 eqhdl = lpfc_get_eq_hdl(0); 13073 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13074 13075 cpu = cpumask_first(cpu_present_mask); 13076 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13077 cpu); 13078 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13079 eqhdl = lpfc_get_eq_hdl(idx); 13080 eqhdl->idx = idx; 13081 } 13082 } 13083 } 13084 return intr_mode; 13085 } 13086 13087 /** 13088 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13089 * @phba: pointer to lpfc hba data structure. 13090 * 13091 * This routine is invoked to disable device interrupt and disassociate 13092 * the driver's interrupt handler(s) from interrupt vector(s) to device 13093 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13094 * will release the interrupt vector(s) for the message signaled interrupt. 13095 **/ 13096 static void 13097 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13098 { 13099 /* Disable the currently initialized interrupt mode */ 13100 if (phba->intr_type == MSIX) { 13101 int index; 13102 struct lpfc_hba_eq_hdl *eqhdl; 13103 13104 /* Free up MSI-X multi-message vectors */ 13105 for (index = 0; index < phba->cfg_irq_chann; index++) { 13106 eqhdl = lpfc_get_eq_hdl(index); 13107 lpfc_irq_clear_aff(eqhdl); 13108 irq_set_affinity_hint(eqhdl->irq, NULL); 13109 free_irq(eqhdl->irq, eqhdl); 13110 } 13111 } else { 13112 free_irq(phba->pcidev->irq, phba); 13113 } 13114 13115 pci_free_irq_vectors(phba->pcidev); 13116 13117 /* Reset interrupt management states */ 13118 phba->intr_type = NONE; 13119 phba->sli.slistat.sli_intr = 0; 13120 } 13121 13122 /** 13123 * lpfc_unset_hba - Unset SLI3 hba device initialization 13124 * @phba: pointer to lpfc hba data structure. 13125 * 13126 * This routine is invoked to unset the HBA device initialization steps to 13127 * a device with SLI-3 interface spec. 13128 **/ 13129 static void 13130 lpfc_unset_hba(struct lpfc_hba *phba) 13131 { 13132 struct lpfc_vport *vport = phba->pport; 13133 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13134 13135 spin_lock_irq(shost->host_lock); 13136 vport->load_flag |= FC_UNLOADING; 13137 spin_unlock_irq(shost->host_lock); 13138 13139 kfree(phba->vpi_bmask); 13140 kfree(phba->vpi_ids); 13141 13142 lpfc_stop_hba_timers(phba); 13143 13144 phba->pport->work_port_events = 0; 13145 13146 lpfc_sli_hba_down(phba); 13147 13148 lpfc_sli_brdrestart(phba); 13149 13150 lpfc_sli_disable_intr(phba); 13151 13152 return; 13153 } 13154 13155 /** 13156 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13157 * @phba: Pointer to HBA context object. 13158 * 13159 * This function is called in the SLI4 code path to wait for completion 13160 * of device's XRIs exchange busy. It will check the XRI exchange busy 13161 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13162 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13163 * I/Os every 30 seconds, log error message, and wait forever. Only when 13164 * all XRI exchange busy complete, the driver unload shall proceed with 13165 * invoking the function reset ioctl mailbox command to the CNA and the 13166 * the rest of the driver unload resource release. 13167 **/ 13168 static void 13169 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13170 { 13171 struct lpfc_sli4_hdw_queue *qp; 13172 int idx, ccnt; 13173 int wait_time = 0; 13174 int io_xri_cmpl = 1; 13175 int nvmet_xri_cmpl = 1; 13176 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13177 13178 /* Driver just aborted IOs during the hba_unset process. Pause 13179 * here to give the HBA time to complete the IO and get entries 13180 * into the abts lists. 13181 */ 13182 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13183 13184 /* Wait for NVME pending IO to flush back to transport. */ 13185 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13186 lpfc_nvme_wait_for_io_drain(phba); 13187 13188 ccnt = 0; 13189 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13190 qp = &phba->sli4_hba.hdwq[idx]; 13191 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13192 if (!io_xri_cmpl) /* if list is NOT empty */ 13193 ccnt++; 13194 } 13195 if (ccnt) 13196 io_xri_cmpl = 0; 13197 13198 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13199 nvmet_xri_cmpl = 13200 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13201 } 13202 13203 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13204 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13205 if (!nvmet_xri_cmpl) 13206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13207 "6424 NVMET XRI exchange busy " 13208 "wait time: %d seconds.\n", 13209 wait_time/1000); 13210 if (!io_xri_cmpl) 13211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13212 "6100 IO XRI exchange busy " 13213 "wait time: %d seconds.\n", 13214 wait_time/1000); 13215 if (!els_xri_cmpl) 13216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13217 "2878 ELS XRI exchange busy " 13218 "wait time: %d seconds.\n", 13219 wait_time/1000); 13220 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13221 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13222 } else { 13223 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13224 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13225 } 13226 13227 ccnt = 0; 13228 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13229 qp = &phba->sli4_hba.hdwq[idx]; 13230 io_xri_cmpl = list_empty( 13231 &qp->lpfc_abts_io_buf_list); 13232 if (!io_xri_cmpl) /* if list is NOT empty */ 13233 ccnt++; 13234 } 13235 if (ccnt) 13236 io_xri_cmpl = 0; 13237 13238 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13239 nvmet_xri_cmpl = list_empty( 13240 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13241 } 13242 els_xri_cmpl = 13243 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13244 13245 } 13246 } 13247 13248 /** 13249 * lpfc_sli4_hba_unset - Unset the fcoe hba 13250 * @phba: Pointer to HBA context object. 13251 * 13252 * This function is called in the SLI4 code path to reset the HBA's FCoE 13253 * function. The caller is not required to hold any lock. This routine 13254 * issues PCI function reset mailbox command to reset the FCoE function. 13255 * At the end of the function, it calls lpfc_hba_down_post function to 13256 * free any pending commands. 13257 **/ 13258 static void 13259 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13260 { 13261 int wait_cnt = 0; 13262 LPFC_MBOXQ_t *mboxq; 13263 struct pci_dev *pdev = phba->pcidev; 13264 13265 lpfc_stop_hba_timers(phba); 13266 hrtimer_cancel(&phba->cmf_timer); 13267 13268 if (phba->pport) 13269 phba->sli4_hba.intr_enable = 0; 13270 13271 /* 13272 * Gracefully wait out the potential current outstanding asynchronous 13273 * mailbox command. 13274 */ 13275 13276 /* First, block any pending async mailbox command from posted */ 13277 spin_lock_irq(&phba->hbalock); 13278 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13279 spin_unlock_irq(&phba->hbalock); 13280 /* Now, trying to wait it out if we can */ 13281 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13282 msleep(10); 13283 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13284 break; 13285 } 13286 /* Forcefully release the outstanding mailbox command if timed out */ 13287 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13288 spin_lock_irq(&phba->hbalock); 13289 mboxq = phba->sli.mbox_active; 13290 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13291 __lpfc_mbox_cmpl_put(phba, mboxq); 13292 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13293 phba->sli.mbox_active = NULL; 13294 spin_unlock_irq(&phba->hbalock); 13295 } 13296 13297 /* Abort all iocbs associated with the hba */ 13298 lpfc_sli_hba_iocb_abort(phba); 13299 13300 /* Wait for completion of device XRI exchange busy */ 13301 lpfc_sli4_xri_exchange_busy_wait(phba); 13302 13303 /* per-phba callback de-registration for hotplug event */ 13304 if (phba->pport) 13305 lpfc_cpuhp_remove(phba); 13306 13307 /* Disable PCI subsystem interrupt */ 13308 lpfc_sli4_disable_intr(phba); 13309 13310 /* Disable SR-IOV if enabled */ 13311 if (phba->cfg_sriov_nr_virtfn) 13312 pci_disable_sriov(pdev); 13313 13314 /* Stop kthread signal shall trigger work_done one more time */ 13315 kthread_stop(phba->worker_thread); 13316 13317 /* Disable FW logging to host memory */ 13318 lpfc_ras_stop_fwlog(phba); 13319 13320 /* Unset the queues shared with the hardware then release all 13321 * allocated resources. 13322 */ 13323 lpfc_sli4_queue_unset(phba); 13324 lpfc_sli4_queue_destroy(phba); 13325 13326 /* Reset SLI4 HBA FCoE function */ 13327 lpfc_pci_function_reset(phba); 13328 13329 /* Free RAS DMA memory */ 13330 if (phba->ras_fwlog.ras_enabled) 13331 lpfc_sli4_ras_dma_free(phba); 13332 13333 /* Stop the SLI4 device port */ 13334 if (phba->pport) 13335 phba->pport->work_port_events = 0; 13336 } 13337 13338 static uint32_t 13339 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13340 { 13341 uint32_t msb = 0; 13342 uint32_t bit; 13343 13344 for (bit = 0; bit < 8; bit++) { 13345 msb = (crc >> 31) & 1; 13346 crc <<= 1; 13347 13348 if (msb ^ (byte & 1)) { 13349 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13350 crc |= 1; 13351 } 13352 byte >>= 1; 13353 } 13354 return crc; 13355 } 13356 13357 static uint32_t 13358 lpfc_cgn_reverse_bits(uint32_t wd) 13359 { 13360 uint32_t result = 0; 13361 uint32_t i; 13362 13363 for (i = 0; i < 32; i++) { 13364 result <<= 1; 13365 result |= (1 & (wd >> i)); 13366 } 13367 return result; 13368 } 13369 13370 /* 13371 * The routine corresponds with the algorithm the HBA firmware 13372 * uses to validate the data integrity. 13373 */ 13374 uint32_t 13375 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13376 { 13377 uint32_t i; 13378 uint32_t result; 13379 uint8_t *data = (uint8_t *)ptr; 13380 13381 for (i = 0; i < byteLen; ++i) 13382 crc = lpfc_cgn_crc32(crc, data[i]); 13383 13384 result = ~lpfc_cgn_reverse_bits(crc); 13385 return result; 13386 } 13387 13388 void 13389 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13390 { 13391 struct lpfc_cgn_info *cp; 13392 struct timespec64 cmpl_time; 13393 struct tm broken; 13394 uint16_t size; 13395 uint32_t crc; 13396 13397 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13398 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13399 13400 if (!phba->cgn_i) 13401 return; 13402 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13403 13404 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13405 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13406 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13407 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13408 13409 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13410 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13411 atomic64_set(&phba->cgn_latency_evt, 0); 13412 phba->cgn_evt_minute = 0; 13413 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; 13414 13415 memset(cp, 0xff, LPFC_CGN_DATA_SIZE); 13416 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13417 cp->cgn_info_version = LPFC_CGN_INFO_V3; 13418 13419 /* cgn parameters */ 13420 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13421 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13422 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13423 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13424 13425 ktime_get_real_ts64(&cmpl_time); 13426 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13427 13428 cp->cgn_info_month = broken.tm_mon + 1; 13429 cp->cgn_info_day = broken.tm_mday; 13430 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ 13431 cp->cgn_info_hour = broken.tm_hour; 13432 cp->cgn_info_minute = broken.tm_min; 13433 cp->cgn_info_second = broken.tm_sec; 13434 13435 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13436 "2643 CGNInfo Init: Start Time " 13437 "%d/%d/%d %d:%d:%d\n", 13438 cp->cgn_info_day, cp->cgn_info_month, 13439 cp->cgn_info_year, cp->cgn_info_hour, 13440 cp->cgn_info_minute, cp->cgn_info_second); 13441 13442 /* Fill in default LUN qdepth */ 13443 if (phba->pport) { 13444 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13445 cp->cgn_lunq = cpu_to_le16(size); 13446 } 13447 13448 /* last used Index initialized to 0xff already */ 13449 13450 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13451 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13452 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13453 cp->cgn_info_crc = cpu_to_le32(crc); 13454 13455 phba->cgn_evt_timestamp = jiffies + 13456 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13457 } 13458 13459 void 13460 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13461 { 13462 struct lpfc_cgn_info *cp; 13463 struct timespec64 cmpl_time; 13464 struct tm broken; 13465 uint32_t crc; 13466 13467 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13468 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13469 13470 if (!phba->cgn_i) 13471 return; 13472 13473 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13474 memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE); 13475 13476 ktime_get_real_ts64(&cmpl_time); 13477 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13478 13479 cp->cgn_stat_month = broken.tm_mon + 1; 13480 cp->cgn_stat_day = broken.tm_mday; 13481 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ 13482 cp->cgn_stat_hour = broken.tm_hour; 13483 cp->cgn_stat_minute = broken.tm_min; 13484 13485 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13486 "2647 CGNstat Init: Start Time " 13487 "%d/%d/%d %d:%d\n", 13488 cp->cgn_stat_day, cp->cgn_stat_month, 13489 cp->cgn_stat_year, cp->cgn_stat_hour, 13490 cp->cgn_stat_minute); 13491 13492 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13493 cp->cgn_info_crc = cpu_to_le32(crc); 13494 } 13495 13496 /** 13497 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13498 * @phba: Pointer to hba context object. 13499 * @reg: flag to determine register or unregister. 13500 */ 13501 static int 13502 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13503 { 13504 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13505 union lpfc_sli4_cfg_shdr *shdr; 13506 uint32_t shdr_status, shdr_add_status; 13507 LPFC_MBOXQ_t *mboxq; 13508 int length, rc; 13509 13510 if (!phba->cgn_i) 13511 return -ENXIO; 13512 13513 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13514 if (!mboxq) { 13515 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13516 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13517 "HBA state x%x reg %d\n", 13518 phba->pport->port_state, reg); 13519 return -ENOMEM; 13520 } 13521 13522 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13523 sizeof(struct lpfc_sli4_cfg_mhdr)); 13524 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13525 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13526 LPFC_SLI4_MBX_EMBED); 13527 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13528 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13529 if (reg > 0) 13530 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13531 else 13532 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13533 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13534 reg_congestion_buf->addr_lo = 13535 putPaddrLow(phba->cgn_i->phys); 13536 reg_congestion_buf->addr_hi = 13537 putPaddrHigh(phba->cgn_i->phys); 13538 13539 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13540 shdr = (union lpfc_sli4_cfg_shdr *) 13541 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13542 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13543 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13544 &shdr->response); 13545 mempool_free(mboxq, phba->mbox_mem_pool); 13546 if (shdr_status || shdr_add_status || rc) { 13547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13548 "2642 REG_CONGESTION_BUF mailbox " 13549 "failed with status x%x add_status x%x," 13550 " mbx status x%x reg %d\n", 13551 shdr_status, shdr_add_status, rc, reg); 13552 return -ENXIO; 13553 } 13554 return 0; 13555 } 13556 13557 int 13558 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13559 { 13560 lpfc_cmf_stop(phba); 13561 return __lpfc_reg_congestion_buf(phba, 0); 13562 } 13563 13564 int 13565 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13566 { 13567 return __lpfc_reg_congestion_buf(phba, 1); 13568 } 13569 13570 /** 13571 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13572 * @phba: Pointer to HBA context object. 13573 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13574 * 13575 * This function is called in the SLI4 code path to read the port's 13576 * sli4 capabilities. 13577 * 13578 * This function may be be called from any context that can block-wait 13579 * for the completion. The expectation is that this routine is called 13580 * typically from probe_one or from the online routine. 13581 **/ 13582 int 13583 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13584 { 13585 int rc; 13586 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13587 struct lpfc_pc_sli4_params *sli4_params; 13588 uint32_t mbox_tmo; 13589 int length; 13590 bool exp_wqcq_pages = true; 13591 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13592 13593 /* 13594 * By default, the driver assumes the SLI4 port requires RPI 13595 * header postings. The SLI4_PARAM response will correct this 13596 * assumption. 13597 */ 13598 phba->sli4_hba.rpi_hdrs_in_use = 1; 13599 13600 /* Read the port's SLI4 Config Parameters */ 13601 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13602 sizeof(struct lpfc_sli4_cfg_mhdr)); 13603 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13604 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13605 length, LPFC_SLI4_MBX_EMBED); 13606 if (!phba->sli4_hba.intr_enable) 13607 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13608 else { 13609 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13610 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13611 } 13612 if (unlikely(rc)) 13613 return rc; 13614 sli4_params = &phba->sli4_hba.pc_sli4_params; 13615 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13616 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13617 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13618 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13619 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13620 mbx_sli4_parameters); 13621 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13622 mbx_sli4_parameters); 13623 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13624 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13625 else 13626 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13627 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13628 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13629 mbx_sli4_parameters); 13630 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13631 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13632 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13633 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13634 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13635 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13636 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13637 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13638 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13639 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13640 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13641 mbx_sli4_parameters); 13642 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13643 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13644 mbx_sli4_parameters); 13645 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13646 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13647 13648 /* Check for Extended Pre-Registered SGL support */ 13649 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13650 13651 /* Check for firmware nvme support */ 13652 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13653 bf_get(cfg_xib, mbx_sli4_parameters)); 13654 13655 if (rc) { 13656 /* Save this to indicate the Firmware supports NVME */ 13657 sli4_params->nvme = 1; 13658 13659 /* Firmware NVME support, check driver FC4 NVME support */ 13660 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13661 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13662 "6133 Disabling NVME support: " 13663 "FC4 type not supported: x%x\n", 13664 phba->cfg_enable_fc4_type); 13665 goto fcponly; 13666 } 13667 } else { 13668 /* No firmware NVME support, check driver FC4 NVME support */ 13669 sli4_params->nvme = 0; 13670 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13672 "6101 Disabling NVME support: Not " 13673 "supported by firmware (%d %d) x%x\n", 13674 bf_get(cfg_nvme, mbx_sli4_parameters), 13675 bf_get(cfg_xib, mbx_sli4_parameters), 13676 phba->cfg_enable_fc4_type); 13677 fcponly: 13678 phba->nvmet_support = 0; 13679 phba->cfg_nvmet_mrq = 0; 13680 phba->cfg_nvme_seg_cnt = 0; 13681 13682 /* If no FC4 type support, move to just SCSI support */ 13683 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13684 return -ENODEV; 13685 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13686 } 13687 } 13688 13689 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13690 * accommodate 512K and 1M IOs in a single nvme buf. 13691 */ 13692 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13693 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13694 13695 /* Enable embedded Payload BDE if support is indicated */ 13696 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13697 phba->cfg_enable_pbde = 1; 13698 else 13699 phba->cfg_enable_pbde = 0; 13700 13701 /* 13702 * To support Suppress Response feature we must satisfy 3 conditions. 13703 * lpfc_suppress_rsp module parameter must be set (default). 13704 * In SLI4-Parameters Descriptor: 13705 * Extended Inline Buffers (XIB) must be supported. 13706 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13707 * (double negative). 13708 */ 13709 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13710 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13711 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13712 else 13713 phba->cfg_suppress_rsp = 0; 13714 13715 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13716 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13717 13718 /* Make sure that sge_supp_len can be handled by the driver */ 13719 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13720 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13721 13722 /* 13723 * Check whether the adapter supports an embedded copy of the 13724 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13725 * to use this option, 128-byte WQEs must be used. 13726 */ 13727 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13728 phba->fcp_embed_io = 1; 13729 else 13730 phba->fcp_embed_io = 0; 13731 13732 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13733 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13734 bf_get(cfg_xib, mbx_sli4_parameters), 13735 phba->cfg_enable_pbde, 13736 phba->fcp_embed_io, sli4_params->nvme, 13737 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13738 13739 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13740 LPFC_SLI_INTF_IF_TYPE_2) && 13741 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13742 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13743 exp_wqcq_pages = false; 13744 13745 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13746 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13747 exp_wqcq_pages && 13748 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13749 phba->enab_exp_wqcq_pages = 1; 13750 else 13751 phba->enab_exp_wqcq_pages = 0; 13752 /* 13753 * Check if the SLI port supports MDS Diagnostics 13754 */ 13755 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13756 phba->mds_diags_support = 1; 13757 else 13758 phba->mds_diags_support = 0; 13759 13760 /* 13761 * Check if the SLI port supports NSLER 13762 */ 13763 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13764 phba->nsler = 1; 13765 else 13766 phba->nsler = 0; 13767 13768 return 0; 13769 } 13770 13771 /** 13772 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13773 * @pdev: pointer to PCI device 13774 * @pid: pointer to PCI device identifier 13775 * 13776 * This routine is to be called to attach a device with SLI-3 interface spec 13777 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13778 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13779 * information of the device and driver to see if the driver state that it can 13780 * support this kind of device. If the match is successful, the driver core 13781 * invokes this routine. If this routine determines it can claim the HBA, it 13782 * does all the initialization that it needs to do to handle the HBA properly. 13783 * 13784 * Return code 13785 * 0 - driver can claim the device 13786 * negative value - driver can not claim the device 13787 **/ 13788 static int 13789 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13790 { 13791 struct lpfc_hba *phba; 13792 struct lpfc_vport *vport = NULL; 13793 struct Scsi_Host *shost = NULL; 13794 int error; 13795 uint32_t cfg_mode, intr_mode; 13796 13797 /* Allocate memory for HBA structure */ 13798 phba = lpfc_hba_alloc(pdev); 13799 if (!phba) 13800 return -ENOMEM; 13801 13802 /* Perform generic PCI device enabling operation */ 13803 error = lpfc_enable_pci_dev(phba); 13804 if (error) 13805 goto out_free_phba; 13806 13807 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 13808 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 13809 if (error) 13810 goto out_disable_pci_dev; 13811 13812 /* Set up SLI-3 specific device PCI memory space */ 13813 error = lpfc_sli_pci_mem_setup(phba); 13814 if (error) { 13815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13816 "1402 Failed to set up pci memory space.\n"); 13817 goto out_disable_pci_dev; 13818 } 13819 13820 /* Set up SLI-3 specific device driver resources */ 13821 error = lpfc_sli_driver_resource_setup(phba); 13822 if (error) { 13823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13824 "1404 Failed to set up driver resource.\n"); 13825 goto out_unset_pci_mem_s3; 13826 } 13827 13828 /* Initialize and populate the iocb list per host */ 13829 13830 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 13831 if (error) { 13832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13833 "1405 Failed to initialize iocb list.\n"); 13834 goto out_unset_driver_resource_s3; 13835 } 13836 13837 /* Set up common device driver resources */ 13838 error = lpfc_setup_driver_resource_phase2(phba); 13839 if (error) { 13840 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13841 "1406 Failed to set up driver resource.\n"); 13842 goto out_free_iocb_list; 13843 } 13844 13845 /* Get the default values for Model Name and Description */ 13846 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13847 13848 /* Create SCSI host to the physical port */ 13849 error = lpfc_create_shost(phba); 13850 if (error) { 13851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13852 "1407 Failed to create scsi host.\n"); 13853 goto out_unset_driver_resource; 13854 } 13855 13856 /* Configure sysfs attributes */ 13857 vport = phba->pport; 13858 error = lpfc_alloc_sysfs_attr(vport); 13859 if (error) { 13860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13861 "1476 Failed to allocate sysfs attr\n"); 13862 goto out_destroy_shost; 13863 } 13864 13865 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13866 /* Now, trying to enable interrupt and bring up the device */ 13867 cfg_mode = phba->cfg_use_msi; 13868 while (true) { 13869 /* Put device to a known state before enabling interrupt */ 13870 lpfc_stop_port(phba); 13871 /* Configure and enable interrupt */ 13872 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 13873 if (intr_mode == LPFC_INTR_ERROR) { 13874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13875 "0431 Failed to enable interrupt.\n"); 13876 error = -ENODEV; 13877 goto out_free_sysfs_attr; 13878 } 13879 /* SLI-3 HBA setup */ 13880 if (lpfc_sli_hba_setup(phba)) { 13881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13882 "1477 Failed to set up hba\n"); 13883 error = -ENODEV; 13884 goto out_remove_device; 13885 } 13886 13887 /* Wait 50ms for the interrupts of previous mailbox commands */ 13888 msleep(50); 13889 /* Check active interrupts on message signaled interrupts */ 13890 if (intr_mode == 0 || 13891 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 13892 /* Log the current active interrupt mode */ 13893 phba->intr_mode = intr_mode; 13894 lpfc_log_intr_mode(phba, intr_mode); 13895 break; 13896 } else { 13897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13898 "0447 Configure interrupt mode (%d) " 13899 "failed active interrupt test.\n", 13900 intr_mode); 13901 /* Disable the current interrupt mode */ 13902 lpfc_sli_disable_intr(phba); 13903 /* Try next level of interrupt mode */ 13904 cfg_mode = --intr_mode; 13905 } 13906 } 13907 13908 /* Perform post initialization setup */ 13909 lpfc_post_init_setup(phba); 13910 13911 /* Check if there are static vports to be created. */ 13912 lpfc_create_static_vport(phba); 13913 13914 return 0; 13915 13916 out_remove_device: 13917 lpfc_unset_hba(phba); 13918 out_free_sysfs_attr: 13919 lpfc_free_sysfs_attr(vport); 13920 out_destroy_shost: 13921 lpfc_destroy_shost(phba); 13922 out_unset_driver_resource: 13923 lpfc_unset_driver_resource_phase2(phba); 13924 out_free_iocb_list: 13925 lpfc_free_iocb_list(phba); 13926 out_unset_driver_resource_s3: 13927 lpfc_sli_driver_resource_unset(phba); 13928 out_unset_pci_mem_s3: 13929 lpfc_sli_pci_mem_unset(phba); 13930 out_disable_pci_dev: 13931 lpfc_disable_pci_dev(phba); 13932 if (shost) 13933 scsi_host_put(shost); 13934 out_free_phba: 13935 lpfc_hba_free(phba); 13936 return error; 13937 } 13938 13939 /** 13940 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 13941 * @pdev: pointer to PCI device 13942 * 13943 * This routine is to be called to disattach a device with SLI-3 interface 13944 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13945 * removed from PCI bus, it performs all the necessary cleanup for the HBA 13946 * device to be removed from the PCI subsystem properly. 13947 **/ 13948 static void 13949 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 13950 { 13951 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13952 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 13953 struct lpfc_vport **vports; 13954 struct lpfc_hba *phba = vport->phba; 13955 int i; 13956 13957 spin_lock_irq(&phba->hbalock); 13958 vport->load_flag |= FC_UNLOADING; 13959 spin_unlock_irq(&phba->hbalock); 13960 13961 lpfc_free_sysfs_attr(vport); 13962 13963 /* Release all the vports against this physical port */ 13964 vports = lpfc_create_vport_work_array(phba); 13965 if (vports != NULL) 13966 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 13967 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 13968 continue; 13969 fc_vport_terminate(vports[i]->fc_vport); 13970 } 13971 lpfc_destroy_vport_work_array(phba, vports); 13972 13973 /* Remove FC host with the physical port */ 13974 fc_remove_host(shost); 13975 scsi_remove_host(shost); 13976 13977 /* Clean up all nodes, mailboxes and IOs. */ 13978 lpfc_cleanup(vport); 13979 13980 /* 13981 * Bring down the SLI Layer. This step disable all interrupts, 13982 * clears the rings, discards all mailbox commands, and resets 13983 * the HBA. 13984 */ 13985 13986 /* HBA interrupt will be disabled after this call */ 13987 lpfc_sli_hba_down(phba); 13988 /* Stop kthread signal shall trigger work_done one more time */ 13989 kthread_stop(phba->worker_thread); 13990 /* Final cleanup of txcmplq and reset the HBA */ 13991 lpfc_sli_brdrestart(phba); 13992 13993 kfree(phba->vpi_bmask); 13994 kfree(phba->vpi_ids); 13995 13996 lpfc_stop_hba_timers(phba); 13997 spin_lock_irq(&phba->port_list_lock); 13998 list_del_init(&vport->listentry); 13999 spin_unlock_irq(&phba->port_list_lock); 14000 14001 lpfc_debugfs_terminate(vport); 14002 14003 /* Disable SR-IOV if enabled */ 14004 if (phba->cfg_sriov_nr_virtfn) 14005 pci_disable_sriov(pdev); 14006 14007 /* Disable interrupt */ 14008 lpfc_sli_disable_intr(phba); 14009 14010 scsi_host_put(shost); 14011 14012 /* 14013 * Call scsi_free before mem_free since scsi bufs are released to their 14014 * corresponding pools here. 14015 */ 14016 lpfc_scsi_free(phba); 14017 lpfc_free_iocb_list(phba); 14018 14019 lpfc_mem_free_all(phba); 14020 14021 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14022 phba->hbqslimp.virt, phba->hbqslimp.phys); 14023 14024 /* Free resources associated with SLI2 interface */ 14025 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14026 phba->slim2p.virt, phba->slim2p.phys); 14027 14028 /* unmap adapter SLIM and Control Registers */ 14029 iounmap(phba->ctrl_regs_memmap_p); 14030 iounmap(phba->slim_memmap_p); 14031 14032 lpfc_hba_free(phba); 14033 14034 pci_release_mem_regions(pdev); 14035 pci_disable_device(pdev); 14036 } 14037 14038 /** 14039 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14040 * @dev_d: pointer to device 14041 * 14042 * This routine is to be called from the kernel's PCI subsystem to support 14043 * system Power Management (PM) to device with SLI-3 interface spec. When 14044 * PM invokes this method, it quiesces the device by stopping the driver's 14045 * worker thread for the device, turning off device's interrupt and DMA, 14046 * and bring the device offline. Note that as the driver implements the 14047 * minimum PM requirements to a power-aware driver's PM support for the 14048 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14049 * to the suspend() method call will be treated as SUSPEND and the driver will 14050 * fully reinitialize its device during resume() method call, the driver will 14051 * set device to PCI_D3hot state in PCI config space instead of setting it 14052 * according to the @msg provided by the PM. 14053 * 14054 * Return code 14055 * 0 - driver suspended the device 14056 * Error otherwise 14057 **/ 14058 static int __maybe_unused 14059 lpfc_pci_suspend_one_s3(struct device *dev_d) 14060 { 14061 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14062 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14063 14064 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14065 "0473 PCI device Power Management suspend.\n"); 14066 14067 /* Bring down the device */ 14068 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14069 lpfc_offline(phba); 14070 kthread_stop(phba->worker_thread); 14071 14072 /* Disable interrupt from device */ 14073 lpfc_sli_disable_intr(phba); 14074 14075 return 0; 14076 } 14077 14078 /** 14079 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14080 * @dev_d: pointer to device 14081 * 14082 * This routine is to be called from the kernel's PCI subsystem to support 14083 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14084 * invokes this method, it restores the device's PCI config space state and 14085 * fully reinitializes the device and brings it online. Note that as the 14086 * driver implements the minimum PM requirements to a power-aware driver's 14087 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14088 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14089 * driver will fully reinitialize its device during resume() method call, 14090 * the device will be set to PCI_D0 directly in PCI config space before 14091 * restoring the state. 14092 * 14093 * Return code 14094 * 0 - driver suspended the device 14095 * Error otherwise 14096 **/ 14097 static int __maybe_unused 14098 lpfc_pci_resume_one_s3(struct device *dev_d) 14099 { 14100 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14101 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14102 uint32_t intr_mode; 14103 int error; 14104 14105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14106 "0452 PCI device Power Management resume.\n"); 14107 14108 /* Startup the kernel thread for this host adapter. */ 14109 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14110 "lpfc_worker_%d", phba->brd_no); 14111 if (IS_ERR(phba->worker_thread)) { 14112 error = PTR_ERR(phba->worker_thread); 14113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14114 "0434 PM resume failed to start worker " 14115 "thread: error=x%x.\n", error); 14116 return error; 14117 } 14118 14119 /* Init cpu_map array */ 14120 lpfc_cpu_map_array_init(phba); 14121 /* Init hba_eq_hdl array */ 14122 lpfc_hba_eq_hdl_array_init(phba); 14123 /* Configure and enable interrupt */ 14124 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14125 if (intr_mode == LPFC_INTR_ERROR) { 14126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14127 "0430 PM resume Failed to enable interrupt\n"); 14128 return -EIO; 14129 } else 14130 phba->intr_mode = intr_mode; 14131 14132 /* Restart HBA and bring it online */ 14133 lpfc_sli_brdrestart(phba); 14134 lpfc_online(phba); 14135 14136 /* Log the current active interrupt mode */ 14137 lpfc_log_intr_mode(phba, phba->intr_mode); 14138 14139 return 0; 14140 } 14141 14142 /** 14143 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14144 * @phba: pointer to lpfc hba data structure. 14145 * 14146 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14147 * aborts all the outstanding SCSI I/Os to the pci device. 14148 **/ 14149 static void 14150 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14151 { 14152 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14153 "2723 PCI channel I/O abort preparing for recovery\n"); 14154 14155 /* 14156 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14157 * and let the SCSI mid-layer to retry them to recover. 14158 */ 14159 lpfc_sli_abort_fcp_rings(phba); 14160 } 14161 14162 /** 14163 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14164 * @phba: pointer to lpfc hba data structure. 14165 * 14166 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14167 * disables the device interrupt and pci device, and aborts the internal FCP 14168 * pending I/Os. 14169 **/ 14170 static void 14171 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14172 { 14173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14174 "2710 PCI channel disable preparing for reset\n"); 14175 14176 /* Block any management I/Os to the device */ 14177 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14178 14179 /* Block all SCSI devices' I/Os on the host */ 14180 lpfc_scsi_dev_block(phba); 14181 14182 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14183 lpfc_sli_flush_io_rings(phba); 14184 14185 /* stop all timers */ 14186 lpfc_stop_hba_timers(phba); 14187 14188 /* Disable interrupt and pci device */ 14189 lpfc_sli_disable_intr(phba); 14190 pci_disable_device(phba->pcidev); 14191 } 14192 14193 /** 14194 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14195 * @phba: pointer to lpfc hba data structure. 14196 * 14197 * This routine is called to prepare the SLI3 device for PCI slot permanently 14198 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14199 * pending I/Os. 14200 **/ 14201 static void 14202 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14203 { 14204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14205 "2711 PCI channel permanent disable for failure\n"); 14206 /* Block all SCSI devices' I/Os on the host */ 14207 lpfc_scsi_dev_block(phba); 14208 14209 /* stop all timers */ 14210 lpfc_stop_hba_timers(phba); 14211 14212 /* Clean up all driver's outstanding SCSI I/Os */ 14213 lpfc_sli_flush_io_rings(phba); 14214 } 14215 14216 /** 14217 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14218 * @pdev: pointer to PCI device. 14219 * @state: the current PCI connection state. 14220 * 14221 * This routine is called from the PCI subsystem for I/O error handling to 14222 * device with SLI-3 interface spec. This function is called by the PCI 14223 * subsystem after a PCI bus error affecting this device has been detected. 14224 * When this function is invoked, it will need to stop all the I/Os and 14225 * interrupt(s) to the device. Once that is done, it will return 14226 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14227 * as desired. 14228 * 14229 * Return codes 14230 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14231 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14232 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14233 **/ 14234 static pci_ers_result_t 14235 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14236 { 14237 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14238 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14239 14240 switch (state) { 14241 case pci_channel_io_normal: 14242 /* Non-fatal error, prepare for recovery */ 14243 lpfc_sli_prep_dev_for_recover(phba); 14244 return PCI_ERS_RESULT_CAN_RECOVER; 14245 case pci_channel_io_frozen: 14246 /* Fatal error, prepare for slot reset */ 14247 lpfc_sli_prep_dev_for_reset(phba); 14248 return PCI_ERS_RESULT_NEED_RESET; 14249 case pci_channel_io_perm_failure: 14250 /* Permanent failure, prepare for device down */ 14251 lpfc_sli_prep_dev_for_perm_failure(phba); 14252 return PCI_ERS_RESULT_DISCONNECT; 14253 default: 14254 /* Unknown state, prepare and request slot reset */ 14255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14256 "0472 Unknown PCI error state: x%x\n", state); 14257 lpfc_sli_prep_dev_for_reset(phba); 14258 return PCI_ERS_RESULT_NEED_RESET; 14259 } 14260 } 14261 14262 /** 14263 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14264 * @pdev: pointer to PCI device. 14265 * 14266 * This routine is called from the PCI subsystem for error handling to 14267 * device with SLI-3 interface spec. This is called after PCI bus has been 14268 * reset to restart the PCI card from scratch, as if from a cold-boot. 14269 * During the PCI subsystem error recovery, after driver returns 14270 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14271 * recovery and then call this routine before calling the .resume method 14272 * to recover the device. This function will initialize the HBA device, 14273 * enable the interrupt, but it will just put the HBA to offline state 14274 * without passing any I/O traffic. 14275 * 14276 * Return codes 14277 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14278 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14279 */ 14280 static pci_ers_result_t 14281 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14282 { 14283 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14284 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14285 struct lpfc_sli *psli = &phba->sli; 14286 uint32_t intr_mode; 14287 14288 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14289 if (pci_enable_device_mem(pdev)) { 14290 printk(KERN_ERR "lpfc: Cannot re-enable " 14291 "PCI device after reset.\n"); 14292 return PCI_ERS_RESULT_DISCONNECT; 14293 } 14294 14295 pci_restore_state(pdev); 14296 14297 /* 14298 * As the new kernel behavior of pci_restore_state() API call clears 14299 * device saved_state flag, need to save the restored state again. 14300 */ 14301 pci_save_state(pdev); 14302 14303 if (pdev->is_busmaster) 14304 pci_set_master(pdev); 14305 14306 spin_lock_irq(&phba->hbalock); 14307 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14308 spin_unlock_irq(&phba->hbalock); 14309 14310 /* Configure and enable interrupt */ 14311 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14312 if (intr_mode == LPFC_INTR_ERROR) { 14313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14314 "0427 Cannot re-enable interrupt after " 14315 "slot reset.\n"); 14316 return PCI_ERS_RESULT_DISCONNECT; 14317 } else 14318 phba->intr_mode = intr_mode; 14319 14320 /* Take device offline, it will perform cleanup */ 14321 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14322 lpfc_offline(phba); 14323 lpfc_sli_brdrestart(phba); 14324 14325 /* Log the current active interrupt mode */ 14326 lpfc_log_intr_mode(phba, phba->intr_mode); 14327 14328 return PCI_ERS_RESULT_RECOVERED; 14329 } 14330 14331 /** 14332 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14333 * @pdev: pointer to PCI device 14334 * 14335 * This routine is called from the PCI subsystem for error handling to device 14336 * with SLI-3 interface spec. It is called when kernel error recovery tells 14337 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14338 * error recovery. After this call, traffic can start to flow from this device 14339 * again. 14340 */ 14341 static void 14342 lpfc_io_resume_s3(struct pci_dev *pdev) 14343 { 14344 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14345 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14346 14347 /* Bring device online, it will be no-op for non-fatal error resume */ 14348 lpfc_online(phba); 14349 } 14350 14351 /** 14352 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14353 * @phba: pointer to lpfc hba data structure. 14354 * 14355 * returns the number of ELS/CT IOCBs to reserve 14356 **/ 14357 int 14358 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14359 { 14360 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14361 14362 if (phba->sli_rev == LPFC_SLI_REV4) { 14363 if (max_xri <= 100) 14364 return 10; 14365 else if (max_xri <= 256) 14366 return 25; 14367 else if (max_xri <= 512) 14368 return 50; 14369 else if (max_xri <= 1024) 14370 return 100; 14371 else if (max_xri <= 1536) 14372 return 150; 14373 else if (max_xri <= 2048) 14374 return 200; 14375 else 14376 return 250; 14377 } else 14378 return 0; 14379 } 14380 14381 /** 14382 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14383 * @phba: pointer to lpfc hba data structure. 14384 * 14385 * returns the number of ELS/CT + NVMET IOCBs to reserve 14386 **/ 14387 int 14388 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14389 { 14390 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14391 14392 if (phba->nvmet_support) 14393 max_xri += LPFC_NVMET_BUF_POST; 14394 return max_xri; 14395 } 14396 14397 14398 static int 14399 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14400 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14401 const struct firmware *fw) 14402 { 14403 int rc; 14404 u8 sli_family; 14405 14406 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14407 /* Three cases: (1) FW was not supported on the detected adapter. 14408 * (2) FW update has been locked out administratively. 14409 * (3) Some other error during FW update. 14410 * In each case, an unmaskable message is written to the console 14411 * for admin diagnosis. 14412 */ 14413 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14414 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14415 magic_number != MAGIC_NUMBER_G6) || 14416 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14417 magic_number != MAGIC_NUMBER_G7) || 14418 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14419 magic_number != MAGIC_NUMBER_G7P)) { 14420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14421 "3030 This firmware version is not supported on" 14422 " this HBA model. Device:%x Magic:%x Type:%x " 14423 "ID:%x Size %d %zd\n", 14424 phba->pcidev->device, magic_number, ftype, fid, 14425 fsize, fw->size); 14426 rc = -EINVAL; 14427 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14429 "3021 Firmware downloads have been prohibited " 14430 "by a system configuration setting on " 14431 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14432 "%zd\n", 14433 phba->pcidev->device, magic_number, ftype, fid, 14434 fsize, fw->size); 14435 rc = -EACCES; 14436 } else { 14437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14438 "3022 FW Download failed. Add Status x%x " 14439 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14440 "%zd\n", 14441 offset, phba->pcidev->device, magic_number, 14442 ftype, fid, fsize, fw->size); 14443 rc = -EIO; 14444 } 14445 return rc; 14446 } 14447 14448 /** 14449 * lpfc_write_firmware - attempt to write a firmware image to the port 14450 * @fw: pointer to firmware image returned from request_firmware. 14451 * @context: pointer to firmware image returned from request_firmware. 14452 * 14453 **/ 14454 static void 14455 lpfc_write_firmware(const struct firmware *fw, void *context) 14456 { 14457 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14458 char fwrev[FW_REV_STR_SIZE]; 14459 struct lpfc_grp_hdr *image; 14460 struct list_head dma_buffer_list; 14461 int i, rc = 0; 14462 struct lpfc_dmabuf *dmabuf, *next; 14463 uint32_t offset = 0, temp_offset = 0; 14464 uint32_t magic_number, ftype, fid, fsize; 14465 14466 /* It can be null in no-wait mode, sanity check */ 14467 if (!fw) { 14468 rc = -ENXIO; 14469 goto out; 14470 } 14471 image = (struct lpfc_grp_hdr *)fw->data; 14472 14473 magic_number = be32_to_cpu(image->magic_number); 14474 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14475 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14476 fsize = be32_to_cpu(image->size); 14477 14478 INIT_LIST_HEAD(&dma_buffer_list); 14479 lpfc_decode_firmware_rev(phba, fwrev, 1); 14480 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14482 "3023 Updating Firmware, Current Version:%s " 14483 "New Version:%s\n", 14484 fwrev, image->revision); 14485 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14486 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14487 GFP_KERNEL); 14488 if (!dmabuf) { 14489 rc = -ENOMEM; 14490 goto release_out; 14491 } 14492 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14493 SLI4_PAGE_SIZE, 14494 &dmabuf->phys, 14495 GFP_KERNEL); 14496 if (!dmabuf->virt) { 14497 kfree(dmabuf); 14498 rc = -ENOMEM; 14499 goto release_out; 14500 } 14501 list_add_tail(&dmabuf->list, &dma_buffer_list); 14502 } 14503 while (offset < fw->size) { 14504 temp_offset = offset; 14505 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14506 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14507 memcpy(dmabuf->virt, 14508 fw->data + temp_offset, 14509 fw->size - temp_offset); 14510 temp_offset = fw->size; 14511 break; 14512 } 14513 memcpy(dmabuf->virt, fw->data + temp_offset, 14514 SLI4_PAGE_SIZE); 14515 temp_offset += SLI4_PAGE_SIZE; 14516 } 14517 rc = lpfc_wr_object(phba, &dma_buffer_list, 14518 (fw->size - offset), &offset); 14519 if (rc) { 14520 rc = lpfc_log_write_firmware_error(phba, offset, 14521 magic_number, 14522 ftype, 14523 fid, 14524 fsize, 14525 fw); 14526 goto release_out; 14527 } 14528 } 14529 rc = offset; 14530 } else 14531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14532 "3029 Skipped Firmware update, Current " 14533 "Version:%s New Version:%s\n", 14534 fwrev, image->revision); 14535 14536 release_out: 14537 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14538 list_del(&dmabuf->list); 14539 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14540 dmabuf->virt, dmabuf->phys); 14541 kfree(dmabuf); 14542 } 14543 release_firmware(fw); 14544 out: 14545 if (rc < 0) 14546 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14547 "3062 Firmware update error, status %d.\n", rc); 14548 else 14549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14550 "3024 Firmware update success: size %d.\n", rc); 14551 } 14552 14553 /** 14554 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14555 * @phba: pointer to lpfc hba data structure. 14556 * @fw_upgrade: which firmware to update. 14557 * 14558 * This routine is called to perform Linux generic firmware upgrade on device 14559 * that supports such feature. 14560 **/ 14561 int 14562 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14563 { 14564 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 14565 int ret; 14566 const struct firmware *fw; 14567 14568 /* Only supported on SLI4 interface type 2 for now */ 14569 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14570 LPFC_SLI_INTF_IF_TYPE_2) 14571 return -EPERM; 14572 14573 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 14574 14575 if (fw_upgrade == INT_FW_UPGRADE) { 14576 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14577 file_name, &phba->pcidev->dev, 14578 GFP_KERNEL, (void *)phba, 14579 lpfc_write_firmware); 14580 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14581 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14582 if (!ret) 14583 lpfc_write_firmware(fw, (void *)phba); 14584 } else { 14585 ret = -EINVAL; 14586 } 14587 14588 return ret; 14589 } 14590 14591 /** 14592 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14593 * @pdev: pointer to PCI device 14594 * @pid: pointer to PCI device identifier 14595 * 14596 * This routine is called from the kernel's PCI subsystem to device with 14597 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14598 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14599 * information of the device and driver to see if the driver state that it 14600 * can support this kind of device. If the match is successful, the driver 14601 * core invokes this routine. If this routine determines it can claim the HBA, 14602 * it does all the initialization that it needs to do to handle the HBA 14603 * properly. 14604 * 14605 * Return code 14606 * 0 - driver can claim the device 14607 * negative value - driver can not claim the device 14608 **/ 14609 static int 14610 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14611 { 14612 struct lpfc_hba *phba; 14613 struct lpfc_vport *vport = NULL; 14614 struct Scsi_Host *shost = NULL; 14615 int error; 14616 uint32_t cfg_mode, intr_mode; 14617 14618 /* Allocate memory for HBA structure */ 14619 phba = lpfc_hba_alloc(pdev); 14620 if (!phba) 14621 return -ENOMEM; 14622 14623 INIT_LIST_HEAD(&phba->poll_list); 14624 14625 /* Perform generic PCI device enabling operation */ 14626 error = lpfc_enable_pci_dev(phba); 14627 if (error) 14628 goto out_free_phba; 14629 14630 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14631 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14632 if (error) 14633 goto out_disable_pci_dev; 14634 14635 /* Set up SLI-4 specific device PCI memory space */ 14636 error = lpfc_sli4_pci_mem_setup(phba); 14637 if (error) { 14638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14639 "1410 Failed to set up pci memory space.\n"); 14640 goto out_disable_pci_dev; 14641 } 14642 14643 /* Set up SLI-4 Specific device driver resources */ 14644 error = lpfc_sli4_driver_resource_setup(phba); 14645 if (error) { 14646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14647 "1412 Failed to set up driver resource.\n"); 14648 goto out_unset_pci_mem_s4; 14649 } 14650 14651 INIT_LIST_HEAD(&phba->active_rrq_list); 14652 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14653 14654 /* Set up common device driver resources */ 14655 error = lpfc_setup_driver_resource_phase2(phba); 14656 if (error) { 14657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14658 "1414 Failed to set up driver resource.\n"); 14659 goto out_unset_driver_resource_s4; 14660 } 14661 14662 /* Get the default values for Model Name and Description */ 14663 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14664 14665 /* Now, trying to enable interrupt and bring up the device */ 14666 cfg_mode = phba->cfg_use_msi; 14667 14668 /* Put device to a known state before enabling interrupt */ 14669 phba->pport = NULL; 14670 lpfc_stop_port(phba); 14671 14672 /* Init cpu_map array */ 14673 lpfc_cpu_map_array_init(phba); 14674 14675 /* Init hba_eq_hdl array */ 14676 lpfc_hba_eq_hdl_array_init(phba); 14677 14678 /* Configure and enable interrupt */ 14679 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14680 if (intr_mode == LPFC_INTR_ERROR) { 14681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14682 "0426 Failed to enable interrupt.\n"); 14683 error = -ENODEV; 14684 goto out_unset_driver_resource; 14685 } 14686 /* Default to single EQ for non-MSI-X */ 14687 if (phba->intr_type != MSIX) { 14688 phba->cfg_irq_chann = 1; 14689 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14690 if (phba->nvmet_support) 14691 phba->cfg_nvmet_mrq = 1; 14692 } 14693 } 14694 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14695 14696 /* Create SCSI host to the physical port */ 14697 error = lpfc_create_shost(phba); 14698 if (error) { 14699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14700 "1415 Failed to create scsi host.\n"); 14701 goto out_disable_intr; 14702 } 14703 vport = phba->pport; 14704 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14705 14706 /* Configure sysfs attributes */ 14707 error = lpfc_alloc_sysfs_attr(vport); 14708 if (error) { 14709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14710 "1416 Failed to allocate sysfs attr\n"); 14711 goto out_destroy_shost; 14712 } 14713 14714 /* Set up SLI-4 HBA */ 14715 if (lpfc_sli4_hba_setup(phba)) { 14716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14717 "1421 Failed to set up hba\n"); 14718 error = -ENODEV; 14719 goto out_free_sysfs_attr; 14720 } 14721 14722 /* Log the current active interrupt mode */ 14723 phba->intr_mode = intr_mode; 14724 lpfc_log_intr_mode(phba, intr_mode); 14725 14726 /* Perform post initialization setup */ 14727 lpfc_post_init_setup(phba); 14728 14729 /* NVME support in FW earlier in the driver load corrects the 14730 * FC4 type making a check for nvme_support unnecessary. 14731 */ 14732 if (phba->nvmet_support == 0) { 14733 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14734 /* Create NVME binding with nvme_fc_transport. This 14735 * ensures the vport is initialized. If the localport 14736 * create fails, it should not unload the driver to 14737 * support field issues. 14738 */ 14739 error = lpfc_nvme_create_localport(vport); 14740 if (error) { 14741 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14742 "6004 NVME registration " 14743 "failed, error x%x\n", 14744 error); 14745 } 14746 } 14747 } 14748 14749 /* check for firmware upgrade or downgrade */ 14750 if (phba->cfg_request_firmware_upgrade) 14751 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14752 14753 /* Check if there are static vports to be created. */ 14754 lpfc_create_static_vport(phba); 14755 14756 /* Enable RAS FW log support */ 14757 lpfc_sli4_ras_setup(phba); 14758 14759 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14760 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14761 14762 return 0; 14763 14764 out_free_sysfs_attr: 14765 lpfc_free_sysfs_attr(vport); 14766 out_destroy_shost: 14767 lpfc_destroy_shost(phba); 14768 out_disable_intr: 14769 lpfc_sli4_disable_intr(phba); 14770 out_unset_driver_resource: 14771 lpfc_unset_driver_resource_phase2(phba); 14772 out_unset_driver_resource_s4: 14773 lpfc_sli4_driver_resource_unset(phba); 14774 out_unset_pci_mem_s4: 14775 lpfc_sli4_pci_mem_unset(phba); 14776 out_disable_pci_dev: 14777 lpfc_disable_pci_dev(phba); 14778 if (shost) 14779 scsi_host_put(shost); 14780 out_free_phba: 14781 lpfc_hba_free(phba); 14782 return error; 14783 } 14784 14785 /** 14786 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14787 * @pdev: pointer to PCI device 14788 * 14789 * This routine is called from the kernel's PCI subsystem to device with 14790 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14791 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14792 * device to be removed from the PCI subsystem properly. 14793 **/ 14794 static void 14795 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14796 { 14797 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14798 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14799 struct lpfc_vport **vports; 14800 struct lpfc_hba *phba = vport->phba; 14801 int i; 14802 14803 /* Mark the device unloading flag */ 14804 spin_lock_irq(&phba->hbalock); 14805 vport->load_flag |= FC_UNLOADING; 14806 spin_unlock_irq(&phba->hbalock); 14807 if (phba->cgn_i) 14808 lpfc_unreg_congestion_buf(phba); 14809 14810 lpfc_free_sysfs_attr(vport); 14811 14812 /* Release all the vports against this physical port */ 14813 vports = lpfc_create_vport_work_array(phba); 14814 if (vports != NULL) 14815 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14816 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14817 continue; 14818 fc_vport_terminate(vports[i]->fc_vport); 14819 } 14820 lpfc_destroy_vport_work_array(phba, vports); 14821 14822 /* Remove FC host with the physical port */ 14823 fc_remove_host(shost); 14824 scsi_remove_host(shost); 14825 14826 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 14827 * localports are destroyed after to cleanup all transport memory. 14828 */ 14829 lpfc_cleanup(vport); 14830 lpfc_nvmet_destroy_targetport(phba); 14831 lpfc_nvme_destroy_localport(vport); 14832 14833 /* De-allocate multi-XRI pools */ 14834 if (phba->cfg_xri_rebalancing) 14835 lpfc_destroy_multixri_pools(phba); 14836 14837 /* 14838 * Bring down the SLI Layer. This step disables all interrupts, 14839 * clears the rings, discards all mailbox commands, and resets 14840 * the HBA FCoE function. 14841 */ 14842 lpfc_debugfs_terminate(vport); 14843 14844 lpfc_stop_hba_timers(phba); 14845 spin_lock_irq(&phba->port_list_lock); 14846 list_del_init(&vport->listentry); 14847 spin_unlock_irq(&phba->port_list_lock); 14848 14849 /* Perform scsi free before driver resource_unset since scsi 14850 * buffers are released to their corresponding pools here. 14851 */ 14852 lpfc_io_free(phba); 14853 lpfc_free_iocb_list(phba); 14854 lpfc_sli4_hba_unset(phba); 14855 14856 lpfc_unset_driver_resource_phase2(phba); 14857 lpfc_sli4_driver_resource_unset(phba); 14858 14859 /* Unmap adapter Control and Doorbell registers */ 14860 lpfc_sli4_pci_mem_unset(phba); 14861 14862 /* Release PCI resources and disable device's PCI function */ 14863 scsi_host_put(shost); 14864 lpfc_disable_pci_dev(phba); 14865 14866 /* Finally, free the driver's device data structure */ 14867 lpfc_hba_free(phba); 14868 14869 return; 14870 } 14871 14872 /** 14873 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 14874 * @dev_d: pointer to device 14875 * 14876 * This routine is called from the kernel's PCI subsystem to support system 14877 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 14878 * this method, it quiesces the device by stopping the driver's worker 14879 * thread for the device, turning off device's interrupt and DMA, and bring 14880 * the device offline. Note that as the driver implements the minimum PM 14881 * requirements to a power-aware driver's PM support for suspend/resume -- all 14882 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 14883 * method call will be treated as SUSPEND and the driver will fully 14884 * reinitialize its device during resume() method call, the driver will set 14885 * device to PCI_D3hot state in PCI config space instead of setting it 14886 * according to the @msg provided by the PM. 14887 * 14888 * Return code 14889 * 0 - driver suspended the device 14890 * Error otherwise 14891 **/ 14892 static int __maybe_unused 14893 lpfc_pci_suspend_one_s4(struct device *dev_d) 14894 { 14895 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14896 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14897 14898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14899 "2843 PCI device Power Management suspend.\n"); 14900 14901 /* Bring down the device */ 14902 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14903 lpfc_offline(phba); 14904 kthread_stop(phba->worker_thread); 14905 14906 /* Disable interrupt from device */ 14907 lpfc_sli4_disable_intr(phba); 14908 lpfc_sli4_queue_destroy(phba); 14909 14910 return 0; 14911 } 14912 14913 /** 14914 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 14915 * @dev_d: pointer to device 14916 * 14917 * This routine is called from the kernel's PCI subsystem to support system 14918 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 14919 * this method, it restores the device's PCI config space state and fully 14920 * reinitializes the device and brings it online. Note that as the driver 14921 * implements the minimum PM requirements to a power-aware driver's PM for 14922 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14923 * to the suspend() method call will be treated as SUSPEND and the driver 14924 * will fully reinitialize its device during resume() method call, the device 14925 * will be set to PCI_D0 directly in PCI config space before restoring the 14926 * state. 14927 * 14928 * Return code 14929 * 0 - driver suspended the device 14930 * Error otherwise 14931 **/ 14932 static int __maybe_unused 14933 lpfc_pci_resume_one_s4(struct device *dev_d) 14934 { 14935 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14936 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14937 uint32_t intr_mode; 14938 int error; 14939 14940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14941 "0292 PCI device Power Management resume.\n"); 14942 14943 /* Startup the kernel thread for this host adapter. */ 14944 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14945 "lpfc_worker_%d", phba->brd_no); 14946 if (IS_ERR(phba->worker_thread)) { 14947 error = PTR_ERR(phba->worker_thread); 14948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14949 "0293 PM resume failed to start worker " 14950 "thread: error=x%x.\n", error); 14951 return error; 14952 } 14953 14954 /* Configure and enable interrupt */ 14955 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 14956 if (intr_mode == LPFC_INTR_ERROR) { 14957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14958 "0294 PM resume Failed to enable interrupt\n"); 14959 return -EIO; 14960 } else 14961 phba->intr_mode = intr_mode; 14962 14963 /* Restart HBA and bring it online */ 14964 lpfc_sli_brdrestart(phba); 14965 lpfc_online(phba); 14966 14967 /* Log the current active interrupt mode */ 14968 lpfc_log_intr_mode(phba, phba->intr_mode); 14969 14970 return 0; 14971 } 14972 14973 /** 14974 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 14975 * @phba: pointer to lpfc hba data structure. 14976 * 14977 * This routine is called to prepare the SLI4 device for PCI slot recover. It 14978 * aborts all the outstanding SCSI I/Os to the pci device. 14979 **/ 14980 static void 14981 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 14982 { 14983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14984 "2828 PCI channel I/O abort preparing for recovery\n"); 14985 /* 14986 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14987 * and let the SCSI mid-layer to retry them to recover. 14988 */ 14989 lpfc_sli_abort_fcp_rings(phba); 14990 } 14991 14992 /** 14993 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 14994 * @phba: pointer to lpfc hba data structure. 14995 * 14996 * This routine is called to prepare the SLI4 device for PCI slot reset. It 14997 * disables the device interrupt and pci device, and aborts the internal FCP 14998 * pending I/Os. 14999 **/ 15000 static void 15001 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15002 { 15003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15004 "2826 PCI channel disable preparing for reset\n"); 15005 15006 /* Block any management I/Os to the device */ 15007 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15008 15009 /* Block all SCSI devices' I/Os on the host */ 15010 lpfc_scsi_dev_block(phba); 15011 15012 /* Flush all driver's outstanding I/Os as we are to reset */ 15013 lpfc_sli_flush_io_rings(phba); 15014 15015 /* stop all timers */ 15016 lpfc_stop_hba_timers(phba); 15017 15018 /* Disable interrupt and pci device */ 15019 lpfc_sli4_disable_intr(phba); 15020 lpfc_sli4_queue_destroy(phba); 15021 pci_disable_device(phba->pcidev); 15022 } 15023 15024 /** 15025 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15026 * @phba: pointer to lpfc hba data structure. 15027 * 15028 * This routine is called to prepare the SLI4 device for PCI slot permanently 15029 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15030 * pending I/Os. 15031 **/ 15032 static void 15033 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15034 { 15035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15036 "2827 PCI channel permanent disable for failure\n"); 15037 15038 /* Block all SCSI devices' I/Os on the host */ 15039 lpfc_scsi_dev_block(phba); 15040 15041 /* stop all timers */ 15042 lpfc_stop_hba_timers(phba); 15043 15044 /* Clean up all driver's outstanding I/Os */ 15045 lpfc_sli_flush_io_rings(phba); 15046 } 15047 15048 /** 15049 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15050 * @pdev: pointer to PCI device. 15051 * @state: the current PCI connection state. 15052 * 15053 * This routine is called from the PCI subsystem for error handling to device 15054 * with SLI-4 interface spec. This function is called by the PCI subsystem 15055 * after a PCI bus error affecting this device has been detected. When this 15056 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15057 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15058 * for the PCI subsystem to perform proper recovery as desired. 15059 * 15060 * Return codes 15061 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15062 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15063 **/ 15064 static pci_ers_result_t 15065 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15066 { 15067 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15068 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15069 15070 switch (state) { 15071 case pci_channel_io_normal: 15072 /* Non-fatal error, prepare for recovery */ 15073 lpfc_sli4_prep_dev_for_recover(phba); 15074 return PCI_ERS_RESULT_CAN_RECOVER; 15075 case pci_channel_io_frozen: 15076 phba->hba_flag |= HBA_PCI_ERR; 15077 /* Fatal error, prepare for slot reset */ 15078 lpfc_sli4_prep_dev_for_reset(phba); 15079 return PCI_ERS_RESULT_NEED_RESET; 15080 case pci_channel_io_perm_failure: 15081 phba->hba_flag |= HBA_PCI_ERR; 15082 /* Permanent failure, prepare for device down */ 15083 lpfc_sli4_prep_dev_for_perm_failure(phba); 15084 return PCI_ERS_RESULT_DISCONNECT; 15085 default: 15086 phba->hba_flag |= HBA_PCI_ERR; 15087 /* Unknown state, prepare and request slot reset */ 15088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15089 "2825 Unknown PCI error state: x%x\n", state); 15090 lpfc_sli4_prep_dev_for_reset(phba); 15091 return PCI_ERS_RESULT_NEED_RESET; 15092 } 15093 } 15094 15095 /** 15096 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15097 * @pdev: pointer to PCI device. 15098 * 15099 * This routine is called from the PCI subsystem for error handling to device 15100 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15101 * restart the PCI card from scratch, as if from a cold-boot. During the 15102 * PCI subsystem error recovery, after the driver returns 15103 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15104 * recovery and then call this routine before calling the .resume method to 15105 * recover the device. This function will initialize the HBA device, enable 15106 * the interrupt, but it will just put the HBA to offline state without 15107 * passing any I/O traffic. 15108 * 15109 * Return codes 15110 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15111 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15112 */ 15113 static pci_ers_result_t 15114 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15115 { 15116 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15117 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15118 struct lpfc_sli *psli = &phba->sli; 15119 uint32_t intr_mode; 15120 15121 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15122 if (pci_enable_device_mem(pdev)) { 15123 printk(KERN_ERR "lpfc: Cannot re-enable " 15124 "PCI device after reset.\n"); 15125 return PCI_ERS_RESULT_DISCONNECT; 15126 } 15127 15128 pci_restore_state(pdev); 15129 15130 phba->hba_flag &= ~HBA_PCI_ERR; 15131 /* 15132 * As the new kernel behavior of pci_restore_state() API call clears 15133 * device saved_state flag, need to save the restored state again. 15134 */ 15135 pci_save_state(pdev); 15136 15137 if (pdev->is_busmaster) 15138 pci_set_master(pdev); 15139 15140 spin_lock_irq(&phba->hbalock); 15141 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15142 spin_unlock_irq(&phba->hbalock); 15143 15144 /* Configure and enable interrupt */ 15145 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15146 if (intr_mode == LPFC_INTR_ERROR) { 15147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15148 "2824 Cannot re-enable interrupt after " 15149 "slot reset.\n"); 15150 return PCI_ERS_RESULT_DISCONNECT; 15151 } else 15152 phba->intr_mode = intr_mode; 15153 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15154 15155 /* Log the current active interrupt mode */ 15156 lpfc_log_intr_mode(phba, phba->intr_mode); 15157 15158 return PCI_ERS_RESULT_RECOVERED; 15159 } 15160 15161 /** 15162 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15163 * @pdev: pointer to PCI device 15164 * 15165 * This routine is called from the PCI subsystem for error handling to device 15166 * with SLI-4 interface spec. It is called when kernel error recovery tells 15167 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15168 * error recovery. After this call, traffic can start to flow from this device 15169 * again. 15170 **/ 15171 static void 15172 lpfc_io_resume_s4(struct pci_dev *pdev) 15173 { 15174 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15175 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15176 15177 /* 15178 * In case of slot reset, as function reset is performed through 15179 * mailbox command which needs DMA to be enabled, this operation 15180 * has to be moved to the io resume phase. Taking device offline 15181 * will perform the necessary cleanup. 15182 */ 15183 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15184 /* Perform device reset */ 15185 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15186 lpfc_offline(phba); 15187 lpfc_sli_brdrestart(phba); 15188 /* Bring the device back online */ 15189 lpfc_online(phba); 15190 } 15191 } 15192 15193 /** 15194 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15195 * @pdev: pointer to PCI device 15196 * @pid: pointer to PCI device identifier 15197 * 15198 * This routine is to be registered to the kernel's PCI subsystem. When an 15199 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15200 * at PCI device-specific information of the device and driver to see if the 15201 * driver state that it can support this kind of device. If the match is 15202 * successful, the driver core invokes this routine. This routine dispatches 15203 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15204 * do all the initialization that it needs to do to handle the HBA device 15205 * properly. 15206 * 15207 * Return code 15208 * 0 - driver can claim the device 15209 * negative value - driver can not claim the device 15210 **/ 15211 static int 15212 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15213 { 15214 int rc; 15215 struct lpfc_sli_intf intf; 15216 15217 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15218 return -ENODEV; 15219 15220 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15221 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15222 rc = lpfc_pci_probe_one_s4(pdev, pid); 15223 else 15224 rc = lpfc_pci_probe_one_s3(pdev, pid); 15225 15226 return rc; 15227 } 15228 15229 /** 15230 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15231 * @pdev: pointer to PCI device 15232 * 15233 * This routine is to be registered to the kernel's PCI subsystem. When an 15234 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15235 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15236 * remove routine, which will perform all the necessary cleanup for the 15237 * device to be removed from the PCI subsystem properly. 15238 **/ 15239 static void 15240 lpfc_pci_remove_one(struct pci_dev *pdev) 15241 { 15242 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15243 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15244 15245 switch (phba->pci_dev_grp) { 15246 case LPFC_PCI_DEV_LP: 15247 lpfc_pci_remove_one_s3(pdev); 15248 break; 15249 case LPFC_PCI_DEV_OC: 15250 lpfc_pci_remove_one_s4(pdev); 15251 break; 15252 default: 15253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15254 "1424 Invalid PCI device group: 0x%x\n", 15255 phba->pci_dev_grp); 15256 break; 15257 } 15258 return; 15259 } 15260 15261 /** 15262 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15263 * @dev: pointer to device 15264 * 15265 * This routine is to be registered to the kernel's PCI subsystem to support 15266 * system Power Management (PM). When PM invokes this method, it dispatches 15267 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15268 * suspend the device. 15269 * 15270 * Return code 15271 * 0 - driver suspended the device 15272 * Error otherwise 15273 **/ 15274 static int __maybe_unused 15275 lpfc_pci_suspend_one(struct device *dev) 15276 { 15277 struct Scsi_Host *shost = dev_get_drvdata(dev); 15278 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15279 int rc = -ENODEV; 15280 15281 switch (phba->pci_dev_grp) { 15282 case LPFC_PCI_DEV_LP: 15283 rc = lpfc_pci_suspend_one_s3(dev); 15284 break; 15285 case LPFC_PCI_DEV_OC: 15286 rc = lpfc_pci_suspend_one_s4(dev); 15287 break; 15288 default: 15289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15290 "1425 Invalid PCI device group: 0x%x\n", 15291 phba->pci_dev_grp); 15292 break; 15293 } 15294 return rc; 15295 } 15296 15297 /** 15298 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15299 * @dev: pointer to device 15300 * 15301 * This routine is to be registered to the kernel's PCI subsystem to support 15302 * system Power Management (PM). When PM invokes this method, it dispatches 15303 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15304 * resume the device. 15305 * 15306 * Return code 15307 * 0 - driver suspended the device 15308 * Error otherwise 15309 **/ 15310 static int __maybe_unused 15311 lpfc_pci_resume_one(struct device *dev) 15312 { 15313 struct Scsi_Host *shost = dev_get_drvdata(dev); 15314 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15315 int rc = -ENODEV; 15316 15317 switch (phba->pci_dev_grp) { 15318 case LPFC_PCI_DEV_LP: 15319 rc = lpfc_pci_resume_one_s3(dev); 15320 break; 15321 case LPFC_PCI_DEV_OC: 15322 rc = lpfc_pci_resume_one_s4(dev); 15323 break; 15324 default: 15325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15326 "1426 Invalid PCI device group: 0x%x\n", 15327 phba->pci_dev_grp); 15328 break; 15329 } 15330 return rc; 15331 } 15332 15333 /** 15334 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15335 * @pdev: pointer to PCI device. 15336 * @state: the current PCI connection state. 15337 * 15338 * This routine is registered to the PCI subsystem for error handling. This 15339 * function is called by the PCI subsystem after a PCI bus error affecting 15340 * this device has been detected. When this routine is invoked, it dispatches 15341 * the action to the proper SLI-3 or SLI-4 device error detected handling 15342 * routine, which will perform the proper error detected operation. 15343 * 15344 * Return codes 15345 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15346 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15347 **/ 15348 static pci_ers_result_t 15349 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15350 { 15351 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15352 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15353 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15354 15355 if (phba->link_state == LPFC_HBA_ERROR && 15356 phba->hba_flag & HBA_IOQ_FLUSH) 15357 return PCI_ERS_RESULT_NEED_RESET; 15358 15359 switch (phba->pci_dev_grp) { 15360 case LPFC_PCI_DEV_LP: 15361 rc = lpfc_io_error_detected_s3(pdev, state); 15362 break; 15363 case LPFC_PCI_DEV_OC: 15364 rc = lpfc_io_error_detected_s4(pdev, state); 15365 break; 15366 default: 15367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15368 "1427 Invalid PCI device group: 0x%x\n", 15369 phba->pci_dev_grp); 15370 break; 15371 } 15372 return rc; 15373 } 15374 15375 /** 15376 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15377 * @pdev: pointer to PCI device. 15378 * 15379 * This routine is registered to the PCI subsystem for error handling. This 15380 * function is called after PCI bus has been reset to restart the PCI card 15381 * from scratch, as if from a cold-boot. When this routine is invoked, it 15382 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15383 * routine, which will perform the proper device reset. 15384 * 15385 * Return codes 15386 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15387 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15388 **/ 15389 static pci_ers_result_t 15390 lpfc_io_slot_reset(struct pci_dev *pdev) 15391 { 15392 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15393 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15394 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15395 15396 switch (phba->pci_dev_grp) { 15397 case LPFC_PCI_DEV_LP: 15398 rc = lpfc_io_slot_reset_s3(pdev); 15399 break; 15400 case LPFC_PCI_DEV_OC: 15401 rc = lpfc_io_slot_reset_s4(pdev); 15402 break; 15403 default: 15404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15405 "1428 Invalid PCI device group: 0x%x\n", 15406 phba->pci_dev_grp); 15407 break; 15408 } 15409 return rc; 15410 } 15411 15412 /** 15413 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15414 * @pdev: pointer to PCI device 15415 * 15416 * This routine is registered to the PCI subsystem for error handling. It 15417 * is called when kernel error recovery tells the lpfc driver that it is 15418 * OK to resume normal PCI operation after PCI bus error recovery. When 15419 * this routine is invoked, it dispatches the action to the proper SLI-3 15420 * or SLI-4 device io_resume routine, which will resume the device operation. 15421 **/ 15422 static void 15423 lpfc_io_resume(struct pci_dev *pdev) 15424 { 15425 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15426 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15427 15428 switch (phba->pci_dev_grp) { 15429 case LPFC_PCI_DEV_LP: 15430 lpfc_io_resume_s3(pdev); 15431 break; 15432 case LPFC_PCI_DEV_OC: 15433 lpfc_io_resume_s4(pdev); 15434 break; 15435 default: 15436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15437 "1429 Invalid PCI device group: 0x%x\n", 15438 phba->pci_dev_grp); 15439 break; 15440 } 15441 return; 15442 } 15443 15444 /** 15445 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15446 * @phba: pointer to lpfc hba data structure. 15447 * 15448 * This routine checks to see if OAS is supported for this adapter. If 15449 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15450 * the enable oas flag is cleared and the pool created for OAS device data 15451 * is destroyed. 15452 * 15453 **/ 15454 static void 15455 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15456 { 15457 15458 if (!phba->cfg_EnableXLane) 15459 return; 15460 15461 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15462 phba->cfg_fof = 1; 15463 } else { 15464 phba->cfg_fof = 0; 15465 mempool_destroy(phba->device_data_mem_pool); 15466 phba->device_data_mem_pool = NULL; 15467 } 15468 15469 return; 15470 } 15471 15472 /** 15473 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15474 * @phba: pointer to lpfc hba data structure. 15475 * 15476 * This routine checks to see if RAS is supported by the adapter. Check the 15477 * function through which RAS support enablement is to be done. 15478 **/ 15479 void 15480 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15481 { 15482 /* if ASIC_GEN_NUM >= 0xC) */ 15483 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15484 LPFC_SLI_INTF_IF_TYPE_6) || 15485 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15486 LPFC_SLI_INTF_FAMILY_G6)) { 15487 phba->ras_fwlog.ras_hwsupport = true; 15488 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15489 phba->cfg_ras_fwlog_buffsize) 15490 phba->ras_fwlog.ras_enabled = true; 15491 else 15492 phba->ras_fwlog.ras_enabled = false; 15493 } else { 15494 phba->ras_fwlog.ras_hwsupport = false; 15495 } 15496 } 15497 15498 15499 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15500 15501 static const struct pci_error_handlers lpfc_err_handler = { 15502 .error_detected = lpfc_io_error_detected, 15503 .slot_reset = lpfc_io_slot_reset, 15504 .resume = lpfc_io_resume, 15505 }; 15506 15507 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15508 lpfc_pci_suspend_one, 15509 lpfc_pci_resume_one); 15510 15511 static struct pci_driver lpfc_driver = { 15512 .name = LPFC_DRIVER_NAME, 15513 .id_table = lpfc_id_table, 15514 .probe = lpfc_pci_probe_one, 15515 .remove = lpfc_pci_remove_one, 15516 .shutdown = lpfc_pci_remove_one, 15517 .driver.pm = &lpfc_pci_pm_ops_one, 15518 .err_handler = &lpfc_err_handler, 15519 }; 15520 15521 static const struct file_operations lpfc_mgmt_fop = { 15522 .owner = THIS_MODULE, 15523 }; 15524 15525 static struct miscdevice lpfc_mgmt_dev = { 15526 .minor = MISC_DYNAMIC_MINOR, 15527 .name = "lpfcmgmt", 15528 .fops = &lpfc_mgmt_fop, 15529 }; 15530 15531 /** 15532 * lpfc_init - lpfc module initialization routine 15533 * 15534 * This routine is to be invoked when the lpfc module is loaded into the 15535 * kernel. The special kernel macro module_init() is used to indicate the 15536 * role of this routine to the kernel as lpfc module entry point. 15537 * 15538 * Return codes 15539 * 0 - successful 15540 * -ENOMEM - FC attach transport failed 15541 * all others - failed 15542 */ 15543 static int __init 15544 lpfc_init(void) 15545 { 15546 int error = 0; 15547 15548 pr_info(LPFC_MODULE_DESC "\n"); 15549 pr_info(LPFC_COPYRIGHT "\n"); 15550 15551 error = misc_register(&lpfc_mgmt_dev); 15552 if (error) 15553 printk(KERN_ERR "Could not register lpfcmgmt device, " 15554 "misc_register returned with status %d", error); 15555 15556 error = -ENOMEM; 15557 lpfc_transport_functions.vport_create = lpfc_vport_create; 15558 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15559 lpfc_transport_template = 15560 fc_attach_transport(&lpfc_transport_functions); 15561 if (lpfc_transport_template == NULL) 15562 goto unregister; 15563 lpfc_vport_transport_template = 15564 fc_attach_transport(&lpfc_vport_transport_functions); 15565 if (lpfc_vport_transport_template == NULL) { 15566 fc_release_transport(lpfc_transport_template); 15567 goto unregister; 15568 } 15569 lpfc_wqe_cmd_template(); 15570 lpfc_nvmet_cmd_template(); 15571 15572 /* Initialize in case vector mapping is needed */ 15573 lpfc_present_cpu = num_present_cpus(); 15574 15575 lpfc_pldv_detect = false; 15576 15577 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15578 "lpfc/sli4:online", 15579 lpfc_cpu_online, lpfc_cpu_offline); 15580 if (error < 0) 15581 goto cpuhp_failure; 15582 lpfc_cpuhp_state = error; 15583 15584 error = pci_register_driver(&lpfc_driver); 15585 if (error) 15586 goto unwind; 15587 15588 return error; 15589 15590 unwind: 15591 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15592 cpuhp_failure: 15593 fc_release_transport(lpfc_transport_template); 15594 fc_release_transport(lpfc_vport_transport_template); 15595 unregister: 15596 misc_deregister(&lpfc_mgmt_dev); 15597 15598 return error; 15599 } 15600 15601 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15602 { 15603 unsigned int start_idx; 15604 unsigned int dbg_cnt; 15605 unsigned int temp_idx; 15606 int i; 15607 int j = 0; 15608 unsigned long rem_nsec, iflags; 15609 bool log_verbose = false; 15610 struct lpfc_vport *port_iterator; 15611 15612 /* Don't dump messages if we explicitly set log_verbose for the 15613 * physical port or any vport. 15614 */ 15615 if (phba->cfg_log_verbose) 15616 return; 15617 15618 spin_lock_irqsave(&phba->port_list_lock, iflags); 15619 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 15620 if (port_iterator->load_flag & FC_UNLOADING) 15621 continue; 15622 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 15623 if (port_iterator->cfg_log_verbose) 15624 log_verbose = true; 15625 15626 scsi_host_put(lpfc_shost_from_vport(port_iterator)); 15627 15628 if (log_verbose) { 15629 spin_unlock_irqrestore(&phba->port_list_lock, 15630 iflags); 15631 return; 15632 } 15633 } 15634 } 15635 spin_unlock_irqrestore(&phba->port_list_lock, iflags); 15636 15637 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15638 return; 15639 15640 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15641 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15642 if (!dbg_cnt) 15643 goto out; 15644 temp_idx = start_idx; 15645 if (dbg_cnt >= DBG_LOG_SZ) { 15646 dbg_cnt = DBG_LOG_SZ; 15647 temp_idx -= 1; 15648 } else { 15649 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15650 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15651 } else { 15652 if (start_idx < dbg_cnt) 15653 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15654 else 15655 start_idx -= dbg_cnt; 15656 } 15657 } 15658 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15659 start_idx, temp_idx, dbg_cnt); 15660 15661 for (i = 0; i < dbg_cnt; i++) { 15662 if ((start_idx + i) < DBG_LOG_SZ) 15663 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15664 else 15665 temp_idx = j++; 15666 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15667 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15668 temp_idx, 15669 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15670 rem_nsec / 1000, 15671 phba->dbg_log[temp_idx].log); 15672 } 15673 out: 15674 atomic_set(&phba->dbg_log_cnt, 0); 15675 atomic_set(&phba->dbg_log_dmping, 0); 15676 } 15677 15678 __printf(2, 3) 15679 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15680 { 15681 unsigned int idx; 15682 va_list args; 15683 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15684 struct va_format vaf; 15685 15686 15687 va_start(args, fmt); 15688 if (unlikely(dbg_dmping)) { 15689 vaf.fmt = fmt; 15690 vaf.va = &args; 15691 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15692 va_end(args); 15693 return; 15694 } 15695 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15696 DBG_LOG_SZ; 15697 15698 atomic_inc(&phba->dbg_log_cnt); 15699 15700 vscnprintf(phba->dbg_log[idx].log, 15701 sizeof(phba->dbg_log[idx].log), fmt, args); 15702 va_end(args); 15703 15704 phba->dbg_log[idx].t_ns = local_clock(); 15705 } 15706 15707 /** 15708 * lpfc_exit - lpfc module removal routine 15709 * 15710 * This routine is invoked when the lpfc module is removed from the kernel. 15711 * The special kernel macro module_exit() is used to indicate the role of 15712 * this routine to the kernel as lpfc module exit point. 15713 */ 15714 static void __exit 15715 lpfc_exit(void) 15716 { 15717 misc_deregister(&lpfc_mgmt_dev); 15718 pci_unregister_driver(&lpfc_driver); 15719 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15720 fc_release_transport(lpfc_transport_template); 15721 fc_release_transport(lpfc_vport_transport_template); 15722 idr_destroy(&lpfc_hba_index); 15723 } 15724 15725 module_init(lpfc_init); 15726 module_exit(lpfc_exit); 15727 MODULE_LICENSE("GPL"); 15728 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15729 MODULE_AUTHOR("Broadcom"); 15730 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15731