1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 static bool lpfc_pldv_detect; 72 73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 75 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 77 static int lpfc_post_rcv_buf(struct lpfc_hba *); 78 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 80 static int lpfc_setup_endian_order(struct lpfc_hba *); 81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 82 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 84 static void lpfc_init_sgl_list(struct lpfc_hba *); 85 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 86 static void lpfc_free_active_sgl(struct lpfc_hba *); 87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 92 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 260 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 261 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 262 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 263 lpfc_vpd_data + offset, 264 mb->un.varDmp.word_cnt); 265 offset += mb->un.varDmp.word_cnt; 266 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 267 268 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 269 270 kfree(lpfc_vpd_data); 271 out_free_mbox: 272 mempool_free(pmb, phba->mbox_mem_pool); 273 return 0; 274 } 275 276 /** 277 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for driver's configuring asynchronous event 282 * mailbox command to the device. If the mailbox command returns successfully, 283 * it will set internal async event support flag to 1; otherwise, it will 284 * set internal async event support flag to 0. 285 **/ 286 static void 287 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 288 { 289 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 290 phba->temp_sensor_support = 1; 291 else 292 phba->temp_sensor_support = 0; 293 mempool_free(pmboxq, phba->mbox_mem_pool); 294 return; 295 } 296 297 /** 298 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 299 * @phba: pointer to lpfc hba data structure. 300 * @pmboxq: pointer to the driver internal queue element for mailbox command. 301 * 302 * This is the completion handler for dump mailbox command for getting 303 * wake up parameters. When this command complete, the response contain 304 * Option rom version of the HBA. This function translate the version number 305 * into a human readable string and store it in OptionROMVersion. 306 **/ 307 static void 308 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 309 { 310 struct prog_id *prg; 311 uint32_t prog_id_word; 312 char dist = ' '; 313 /* character array used for decoding dist type. */ 314 char dist_char[] = "nabx"; 315 316 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 317 mempool_free(pmboxq, phba->mbox_mem_pool); 318 return; 319 } 320 321 prg = (struct prog_id *) &prog_id_word; 322 323 /* word 7 contain option rom version */ 324 prog_id_word = pmboxq->u.mb.un.varWords[7]; 325 326 /* Decode the Option rom version word to a readable string */ 327 if (prg->dist < 4) 328 dist = dist_char[prg->dist]; 329 330 if ((prg->dist == 3) && (prg->num == 0)) 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 332 prg->ver, prg->rev, prg->lev); 333 else 334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 335 prg->ver, prg->rev, prg->lev, 336 dist, prg->num); 337 mempool_free(pmboxq, phba->mbox_mem_pool); 338 return; 339 } 340 341 /** 342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 343 * cfg_soft_wwnn, cfg_soft_wwpn 344 * @vport: pointer to lpfc vport data structure. 345 * 346 * 347 * Return codes 348 * None. 349 **/ 350 void 351 lpfc_update_vport_wwn(struct lpfc_vport *vport) 352 { 353 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 354 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 355 356 /* If the soft name exists then update it using the service params */ 357 if (vport->phba->cfg_soft_wwnn) 358 u64_to_wwn(vport->phba->cfg_soft_wwnn, 359 vport->fc_sparam.nodeName.u.wwn); 360 if (vport->phba->cfg_soft_wwpn) 361 u64_to_wwn(vport->phba->cfg_soft_wwpn, 362 vport->fc_sparam.portName.u.wwn); 363 364 /* 365 * If the name is empty or there exists a soft name 366 * then copy the service params name, otherwise use the fc name 367 */ 368 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 369 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 370 sizeof(struct lpfc_name)); 371 else 372 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 373 sizeof(struct lpfc_name)); 374 375 /* 376 * If the port name has changed, then set the Param changes flag 377 * to unreg the login 378 */ 379 if (vport->fc_portname.u.wwn[0] != 0 && 380 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 381 sizeof(struct lpfc_name))) 382 vport->vport_flag |= FAWWPN_PARAM_CHG; 383 384 if (vport->fc_portname.u.wwn[0] == 0 || 385 vport->phba->cfg_soft_wwpn || 386 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 387 vport->vport_flag & FAWWPN_SET) { 388 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 389 sizeof(struct lpfc_name)); 390 vport->vport_flag &= ~FAWWPN_SET; 391 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 392 vport->vport_flag |= FAWWPN_SET; 393 } 394 else 395 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 396 sizeof(struct lpfc_name)); 397 } 398 399 /** 400 * lpfc_config_port_post - Perform lpfc initialization after config port 401 * @phba: pointer to lpfc hba data structure. 402 * 403 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 404 * command call. It performs all internal resource and state setups on the 405 * port: post IOCB buffers, enable appropriate host interrupt attentions, 406 * ELS ring timers, etc. 407 * 408 * Return codes 409 * 0 - success. 410 * Any other value - error. 411 **/ 412 int 413 lpfc_config_port_post(struct lpfc_hba *phba) 414 { 415 struct lpfc_vport *vport = phba->pport; 416 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 417 LPFC_MBOXQ_t *pmb; 418 MAILBOX_t *mb; 419 struct lpfc_dmabuf *mp; 420 struct lpfc_sli *psli = &phba->sli; 421 uint32_t status, timeout; 422 int i, j; 423 int rc; 424 425 spin_lock_irq(&phba->hbalock); 426 /* 427 * If the Config port completed correctly the HBA is not 428 * over heated any more. 429 */ 430 if (phba->over_temp_state == HBA_OVER_TEMP) 431 phba->over_temp_state = HBA_NORMAL_TEMP; 432 spin_unlock_irq(&phba->hbalock); 433 434 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 435 if (!pmb) { 436 phba->link_state = LPFC_HBA_ERROR; 437 return -ENOMEM; 438 } 439 mb = &pmb->u.mb; 440 441 /* Get login parameters for NID. */ 442 rc = lpfc_read_sparam(phba, pmb, 0); 443 if (rc) { 444 mempool_free(pmb, phba->mbox_mem_pool); 445 return -ENOMEM; 446 } 447 448 pmb->vport = vport; 449 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 450 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 451 "0448 Adapter failed init, mbxCmd x%x " 452 "READ_SPARM mbxStatus x%x\n", 453 mb->mbxCommand, mb->mbxStatus); 454 phba->link_state = LPFC_HBA_ERROR; 455 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 456 mempool_free(pmb, phba->mbox_mem_pool); 457 lpfc_mbuf_free(phba, mp->virt, mp->phys); 458 kfree(mp); 459 return -EIO; 460 } 461 462 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 463 464 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 465 lpfc_mbuf_free(phba, mp->virt, mp->phys); 466 kfree(mp); 467 pmb->ctx_buf = NULL; 468 lpfc_update_vport_wwn(vport); 469 470 /* Update the fc_host data structures with new wwn. */ 471 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 472 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 473 fc_host_max_npiv_vports(shost) = phba->max_vpi; 474 475 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 476 /* This should be consolidated into parse_vpd ? - mr */ 477 if (phba->SerialNumber[0] == 0) { 478 uint8_t *outptr; 479 480 outptr = &vport->fc_nodename.u.s.IEEE[0]; 481 for (i = 0; i < 12; i++) { 482 status = *outptr++; 483 j = ((status & 0xf0) >> 4); 484 if (j <= 9) 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x30 + (uint8_t) j); 487 else 488 phba->SerialNumber[i] = 489 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 490 i++; 491 j = (status & 0xf); 492 if (j <= 9) 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x30 + (uint8_t) j); 495 else 496 phba->SerialNumber[i] = 497 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 498 } 499 } 500 501 lpfc_read_config(phba, pmb); 502 pmb->vport = vport; 503 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 505 "0453 Adapter failed to init, mbxCmd x%x " 506 "READ_CONFIG, mbxStatus x%x\n", 507 mb->mbxCommand, mb->mbxStatus); 508 phba->link_state = LPFC_HBA_ERROR; 509 mempool_free( pmb, phba->mbox_mem_pool); 510 return -EIO; 511 } 512 513 /* Check if the port is disabled */ 514 lpfc_sli_read_link_ste(phba); 515 516 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 517 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 519 "3359 HBA queue depth changed from %d to %d\n", 520 phba->cfg_hba_queue_depth, 521 mb->un.varRdConfig.max_xri); 522 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 523 } 524 525 phba->lmt = mb->un.varRdConfig.lmt; 526 527 /* Get the default values for Model Name and Description */ 528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 529 530 phba->link_state = LPFC_LINK_DOWN; 531 532 /* Only process IOCBs on ELS ring till hba_state is READY */ 533 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 536 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 537 538 /* Post receive buffers for desired rings */ 539 if (phba->sli_rev != 3) 540 lpfc_post_rcv_buf(phba); 541 542 /* 543 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 544 */ 545 if (phba->intr_type == MSIX) { 546 rc = lpfc_config_msi(phba, pmb); 547 if (rc) { 548 mempool_free(pmb, phba->mbox_mem_pool); 549 return -EIO; 550 } 551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 552 if (rc != MBX_SUCCESS) { 553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 554 "0352 Config MSI mailbox command " 555 "failed, mbxCmd x%x, mbxStatus x%x\n", 556 pmb->u.mb.mbxCommand, 557 pmb->u.mb.mbxStatus); 558 mempool_free(pmb, phba->mbox_mem_pool); 559 return -EIO; 560 } 561 } 562 563 spin_lock_irq(&phba->hbalock); 564 /* Initialize ERATT handling flag */ 565 phba->hba_flag &= ~HBA_ERATT_HANDLED; 566 567 /* Enable appropriate host interrupts */ 568 if (lpfc_readl(phba->HCregaddr, &status)) { 569 spin_unlock_irq(&phba->hbalock); 570 return -EIO; 571 } 572 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 573 if (psli->num_rings > 0) 574 status |= HC_R0INT_ENA; 575 if (psli->num_rings > 1) 576 status |= HC_R1INT_ENA; 577 if (psli->num_rings > 2) 578 status |= HC_R2INT_ENA; 579 if (psli->num_rings > 3) 580 status |= HC_R3INT_ENA; 581 582 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 583 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 584 status &= ~(HC_R0INT_ENA); 585 586 writel(status, phba->HCregaddr); 587 readl(phba->HCregaddr); /* flush */ 588 spin_unlock_irq(&phba->hbalock); 589 590 /* Set up ring-0 (ELS) timer */ 591 timeout = phba->fc_ratov * 2; 592 mod_timer(&vport->els_tmofunc, 593 jiffies + msecs_to_jiffies(1000 * timeout)); 594 /* Set up heart beat (HB) timer */ 595 mod_timer(&phba->hb_tmofunc, 596 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 597 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 598 phba->last_completion_time = jiffies; 599 /* Set up error attention (ERATT) polling timer */ 600 mod_timer(&phba->eratt_poll, 601 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 602 603 if (phba->hba_flag & LINK_DISABLED) { 604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 605 "2598 Adapter Link is disabled.\n"); 606 lpfc_down_link(phba, pmb); 607 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 608 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 609 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 611 "2599 Adapter failed to issue DOWN_LINK" 612 " mbox command rc 0x%x\n", rc); 613 614 mempool_free(pmb, phba->mbox_mem_pool); 615 return -EIO; 616 } 617 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 618 mempool_free(pmb, phba->mbox_mem_pool); 619 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 620 if (rc) 621 return rc; 622 } 623 /* MBOX buffer will be freed in mbox compl */ 624 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 625 if (!pmb) { 626 phba->link_state = LPFC_HBA_ERROR; 627 return -ENOMEM; 628 } 629 630 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 631 pmb->mbox_cmpl = lpfc_config_async_cmpl; 632 pmb->vport = phba->pport; 633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 634 635 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 637 "0456 Adapter failed to issue " 638 "ASYNCEVT_ENABLE mbox status x%x\n", 639 rc); 640 mempool_free(pmb, phba->mbox_mem_pool); 641 } 642 643 /* Get Option rom version */ 644 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 645 if (!pmb) { 646 phba->link_state = LPFC_HBA_ERROR; 647 return -ENOMEM; 648 } 649 650 lpfc_dump_wakeup_param(phba, pmb); 651 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 652 pmb->vport = phba->pport; 653 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 654 655 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 656 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 657 "0435 Adapter failed " 658 "to get Option ROM version status x%x\n", rc); 659 mempool_free(pmb, phba->mbox_mem_pool); 660 } 661 662 return 0; 663 } 664 665 /** 666 * lpfc_sli4_refresh_params - update driver copy of params. 667 * @phba: Pointer to HBA context object. 668 * 669 * This is called to refresh driver copy of dynamic fields from the 670 * common_get_sli4_parameters descriptor. 671 **/ 672 int 673 lpfc_sli4_refresh_params(struct lpfc_hba *phba) 674 { 675 LPFC_MBOXQ_t *mboxq; 676 struct lpfc_mqe *mqe; 677 struct lpfc_sli4_parameters *mbx_sli4_parameters; 678 int length, rc; 679 680 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 681 if (!mboxq) 682 return -ENOMEM; 683 684 mqe = &mboxq->u.mqe; 685 /* Read the port's SLI4 Config Parameters */ 686 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 687 sizeof(struct lpfc_sli4_cfg_mhdr)); 688 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 689 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 690 length, LPFC_SLI4_MBX_EMBED); 691 692 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 693 if (unlikely(rc)) { 694 mempool_free(mboxq, phba->mbox_mem_pool); 695 return rc; 696 } 697 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 698 phba->sli4_hba.pc_sli4_params.mi_ver = 699 bf_get(cfg_mi_ver, mbx_sli4_parameters); 700 phba->sli4_hba.pc_sli4_params.cmf = 701 bf_get(cfg_cmf, mbx_sli4_parameters); 702 phba->sli4_hba.pc_sli4_params.pls = 703 bf_get(cfg_pvl, mbx_sli4_parameters); 704 705 mempool_free(mboxq, phba->mbox_mem_pool); 706 return rc; 707 } 708 709 /** 710 * lpfc_hba_init_link - Initialize the FC link 711 * @phba: pointer to lpfc hba data structure. 712 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 713 * 714 * This routine will issue the INIT_LINK mailbox command call. 715 * It is available to other drivers through the lpfc_hba data 716 * structure for use as a delayed link up mechanism with the 717 * module parameter lpfc_suppress_link_up. 718 * 719 * Return code 720 * 0 - success 721 * Any other value - error 722 **/ 723 static int 724 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 725 { 726 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 727 } 728 729 /** 730 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 731 * @phba: pointer to lpfc hba data structure. 732 * @fc_topology: desired fc topology. 733 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 734 * 735 * This routine will issue the INIT_LINK mailbox command call. 736 * It is available to other drivers through the lpfc_hba data 737 * structure for use as a delayed link up mechanism with the 738 * module parameter lpfc_suppress_link_up. 739 * 740 * Return code 741 * 0 - success 742 * Any other value - error 743 **/ 744 int 745 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 746 uint32_t flag) 747 { 748 struct lpfc_vport *vport = phba->pport; 749 LPFC_MBOXQ_t *pmb; 750 MAILBOX_t *mb; 751 int rc; 752 753 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 754 if (!pmb) { 755 phba->link_state = LPFC_HBA_ERROR; 756 return -ENOMEM; 757 } 758 mb = &pmb->u.mb; 759 pmb->vport = vport; 760 761 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 762 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 763 !(phba->lmt & LMT_1Gb)) || 764 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 765 !(phba->lmt & LMT_2Gb)) || 766 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 767 !(phba->lmt & LMT_4Gb)) || 768 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 769 !(phba->lmt & LMT_8Gb)) || 770 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 771 !(phba->lmt & LMT_10Gb)) || 772 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 773 !(phba->lmt & LMT_16Gb)) || 774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 775 !(phba->lmt & LMT_32Gb)) || 776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 777 !(phba->lmt & LMT_64Gb))) { 778 /* Reset link speed to auto */ 779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 780 "1302 Invalid speed for this board:%d " 781 "Reset link speed to auto.\n", 782 phba->cfg_link_speed); 783 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 784 } 785 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 786 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 787 if (phba->sli_rev < LPFC_SLI_REV4) 788 lpfc_set_loopback_flag(phba); 789 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 790 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 792 "0498 Adapter failed to init, mbxCmd x%x " 793 "INIT_LINK, mbxStatus x%x\n", 794 mb->mbxCommand, mb->mbxStatus); 795 if (phba->sli_rev <= LPFC_SLI_REV3) { 796 /* Clear all interrupt enable conditions */ 797 writel(0, phba->HCregaddr); 798 readl(phba->HCregaddr); /* flush */ 799 /* Clear all pending interrupts */ 800 writel(0xffffffff, phba->HAregaddr); 801 readl(phba->HAregaddr); /* flush */ 802 } 803 phba->link_state = LPFC_HBA_ERROR; 804 if (rc != MBX_BUSY || flag == MBX_POLL) 805 mempool_free(pmb, phba->mbox_mem_pool); 806 return -EIO; 807 } 808 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 809 if (flag == MBX_POLL) 810 mempool_free(pmb, phba->mbox_mem_pool); 811 812 return 0; 813 } 814 815 /** 816 * lpfc_hba_down_link - this routine downs the FC link 817 * @phba: pointer to lpfc hba data structure. 818 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 819 * 820 * This routine will issue the DOWN_LINK mailbox command call. 821 * It is available to other drivers through the lpfc_hba data 822 * structure for use to stop the link. 823 * 824 * Return code 825 * 0 - success 826 * Any other value - error 827 **/ 828 static int 829 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 830 { 831 LPFC_MBOXQ_t *pmb; 832 int rc; 833 834 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 835 if (!pmb) { 836 phba->link_state = LPFC_HBA_ERROR; 837 return -ENOMEM; 838 } 839 840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 841 "0491 Adapter Link is disabled.\n"); 842 lpfc_down_link(phba, pmb); 843 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 844 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 845 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 846 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 847 "2522 Adapter failed to issue DOWN_LINK" 848 " mbox command rc 0x%x\n", rc); 849 850 mempool_free(pmb, phba->mbox_mem_pool); 851 return -EIO; 852 } 853 if (flag == MBX_POLL) 854 mempool_free(pmb, phba->mbox_mem_pool); 855 856 return 0; 857 } 858 859 /** 860 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 861 * @phba: pointer to lpfc HBA data structure. 862 * 863 * This routine will do LPFC uninitialization before the HBA is reset when 864 * bringing down the SLI Layer. 865 * 866 * Return codes 867 * 0 - success. 868 * Any other value - error. 869 **/ 870 int 871 lpfc_hba_down_prep(struct lpfc_hba *phba) 872 { 873 struct lpfc_vport **vports; 874 int i; 875 876 if (phba->sli_rev <= LPFC_SLI_REV3) { 877 /* Disable interrupts */ 878 writel(0, phba->HCregaddr); 879 readl(phba->HCregaddr); /* flush */ 880 } 881 882 if (phba->pport->load_flag & FC_UNLOADING) 883 lpfc_cleanup_discovery_resources(phba->pport); 884 else { 885 vports = lpfc_create_vport_work_array(phba); 886 if (vports != NULL) 887 for (i = 0; i <= phba->max_vports && 888 vports[i] != NULL; i++) 889 lpfc_cleanup_discovery_resources(vports[i]); 890 lpfc_destroy_vport_work_array(phba, vports); 891 } 892 return 0; 893 } 894 895 /** 896 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 897 * rspiocb which got deferred 898 * 899 * @phba: pointer to lpfc HBA data structure. 900 * 901 * This routine will cleanup completed slow path events after HBA is reset 902 * when bringing down the SLI Layer. 903 * 904 * 905 * Return codes 906 * void. 907 **/ 908 static void 909 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 910 { 911 struct lpfc_iocbq *rspiocbq; 912 struct hbq_dmabuf *dmabuf; 913 struct lpfc_cq_event *cq_event; 914 915 spin_lock_irq(&phba->hbalock); 916 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 917 spin_unlock_irq(&phba->hbalock); 918 919 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 920 /* Get the response iocb from the head of work queue */ 921 spin_lock_irq(&phba->hbalock); 922 list_remove_head(&phba->sli4_hba.sp_queue_event, 923 cq_event, struct lpfc_cq_event, list); 924 spin_unlock_irq(&phba->hbalock); 925 926 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 927 case CQE_CODE_COMPL_WQE: 928 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 929 cq_event); 930 lpfc_sli_release_iocbq(phba, rspiocbq); 931 break; 932 case CQE_CODE_RECEIVE: 933 case CQE_CODE_RECEIVE_V1: 934 dmabuf = container_of(cq_event, struct hbq_dmabuf, 935 cq_event); 936 lpfc_in_buf_free(phba, &dmabuf->dbuf); 937 } 938 } 939 } 940 941 /** 942 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 943 * @phba: pointer to lpfc HBA data structure. 944 * 945 * This routine will cleanup posted ELS buffers after the HBA is reset 946 * when bringing down the SLI Layer. 947 * 948 * 949 * Return codes 950 * void. 951 **/ 952 static void 953 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 954 { 955 struct lpfc_sli *psli = &phba->sli; 956 struct lpfc_sli_ring *pring; 957 struct lpfc_dmabuf *mp, *next_mp; 958 LIST_HEAD(buflist); 959 int count; 960 961 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 962 lpfc_sli_hbqbuf_free_all(phba); 963 else { 964 /* Cleanup preposted buffers on the ELS ring */ 965 pring = &psli->sli3_ring[LPFC_ELS_RING]; 966 spin_lock_irq(&phba->hbalock); 967 list_splice_init(&pring->postbufq, &buflist); 968 spin_unlock_irq(&phba->hbalock); 969 970 count = 0; 971 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 972 list_del(&mp->list); 973 count++; 974 lpfc_mbuf_free(phba, mp->virt, mp->phys); 975 kfree(mp); 976 } 977 978 spin_lock_irq(&phba->hbalock); 979 pring->postbufq_cnt -= count; 980 spin_unlock_irq(&phba->hbalock); 981 } 982 } 983 984 /** 985 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 986 * @phba: pointer to lpfc HBA data structure. 987 * 988 * This routine will cleanup the txcmplq after the HBA is reset when bringing 989 * down the SLI Layer. 990 * 991 * Return codes 992 * void 993 **/ 994 static void 995 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 996 { 997 struct lpfc_sli *psli = &phba->sli; 998 struct lpfc_queue *qp = NULL; 999 struct lpfc_sli_ring *pring; 1000 LIST_HEAD(completions); 1001 int i; 1002 struct lpfc_iocbq *piocb, *next_iocb; 1003 1004 if (phba->sli_rev != LPFC_SLI_REV4) { 1005 for (i = 0; i < psli->num_rings; i++) { 1006 pring = &psli->sli3_ring[i]; 1007 spin_lock_irq(&phba->hbalock); 1008 /* At this point in time the HBA is either reset or DOA 1009 * Nothing should be on txcmplq as it will 1010 * NEVER complete. 1011 */ 1012 list_splice_init(&pring->txcmplq, &completions); 1013 pring->txcmplq_cnt = 0; 1014 spin_unlock_irq(&phba->hbalock); 1015 1016 lpfc_sli_abort_iocb_ring(phba, pring); 1017 } 1018 /* Cancel all the IOCBs from the completions list */ 1019 lpfc_sli_cancel_iocbs(phba, &completions, 1020 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1021 return; 1022 } 1023 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1024 pring = qp->pring; 1025 if (!pring) 1026 continue; 1027 spin_lock_irq(&pring->ring_lock); 1028 list_for_each_entry_safe(piocb, next_iocb, 1029 &pring->txcmplq, list) 1030 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 1031 list_splice_init(&pring->txcmplq, &completions); 1032 pring->txcmplq_cnt = 0; 1033 spin_unlock_irq(&pring->ring_lock); 1034 lpfc_sli_abort_iocb_ring(phba, pring); 1035 } 1036 /* Cancel all the IOCBs from the completions list */ 1037 lpfc_sli_cancel_iocbs(phba, &completions, 1038 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1039 } 1040 1041 /** 1042 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1043 * @phba: pointer to lpfc HBA data structure. 1044 * 1045 * This routine will do uninitialization after the HBA is reset when bring 1046 * down the SLI Layer. 1047 * 1048 * Return codes 1049 * 0 - success. 1050 * Any other value - error. 1051 **/ 1052 static int 1053 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1054 { 1055 lpfc_hba_free_post_buf(phba); 1056 lpfc_hba_clean_txcmplq(phba); 1057 return 0; 1058 } 1059 1060 /** 1061 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1062 * @phba: pointer to lpfc HBA data structure. 1063 * 1064 * This routine will do uninitialization after the HBA is reset when bring 1065 * down the SLI Layer. 1066 * 1067 * Return codes 1068 * 0 - success. 1069 * Any other value - error. 1070 **/ 1071 static int 1072 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1073 { 1074 struct lpfc_io_buf *psb, *psb_next; 1075 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1076 struct lpfc_sli4_hdw_queue *qp; 1077 LIST_HEAD(aborts); 1078 LIST_HEAD(nvme_aborts); 1079 LIST_HEAD(nvmet_aborts); 1080 struct lpfc_sglq *sglq_entry = NULL; 1081 int cnt, idx; 1082 1083 1084 lpfc_sli_hbqbuf_free_all(phba); 1085 lpfc_hba_clean_txcmplq(phba); 1086 1087 /* At this point in time the HBA is either reset or DOA. Either 1088 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1089 * on the lpfc_els_sgl_list so that it can either be freed if the 1090 * driver is unloading or reposted if the driver is restarting 1091 * the port. 1092 */ 1093 1094 /* sgl_list_lock required because worker thread uses this 1095 * list. 1096 */ 1097 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1098 list_for_each_entry(sglq_entry, 1099 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1100 sglq_entry->state = SGL_FREED; 1101 1102 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1103 &phba->sli4_hba.lpfc_els_sgl_list); 1104 1105 1106 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1107 1108 /* abts_xxxx_buf_list_lock required because worker thread uses this 1109 * list. 1110 */ 1111 spin_lock_irq(&phba->hbalock); 1112 cnt = 0; 1113 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1114 qp = &phba->sli4_hba.hdwq[idx]; 1115 1116 spin_lock(&qp->abts_io_buf_list_lock); 1117 list_splice_init(&qp->lpfc_abts_io_buf_list, 1118 &aborts); 1119 1120 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1121 psb->pCmd = NULL; 1122 psb->status = IOSTAT_SUCCESS; 1123 cnt++; 1124 } 1125 spin_lock(&qp->io_buf_list_put_lock); 1126 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1127 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1128 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1129 qp->abts_scsi_io_bufs = 0; 1130 qp->abts_nvme_io_bufs = 0; 1131 spin_unlock(&qp->io_buf_list_put_lock); 1132 spin_unlock(&qp->abts_io_buf_list_lock); 1133 } 1134 spin_unlock_irq(&phba->hbalock); 1135 1136 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1137 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1138 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1139 &nvmet_aborts); 1140 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1141 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1142 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1143 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1144 } 1145 } 1146 1147 lpfc_sli4_free_sp_events(phba); 1148 return cnt; 1149 } 1150 1151 /** 1152 * lpfc_hba_down_post - Wrapper func for hba down post routine 1153 * @phba: pointer to lpfc HBA data structure. 1154 * 1155 * This routine wraps the actual SLI3 or SLI4 routine for performing 1156 * uninitialization after the HBA is reset when bring down the SLI Layer. 1157 * 1158 * Return codes 1159 * 0 - success. 1160 * Any other value - error. 1161 **/ 1162 int 1163 lpfc_hba_down_post(struct lpfc_hba *phba) 1164 { 1165 return (*phba->lpfc_hba_down_post)(phba); 1166 } 1167 1168 /** 1169 * lpfc_hb_timeout - The HBA-timer timeout handler 1170 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1171 * 1172 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1173 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1174 * work-port-events bitmap and the worker thread is notified. This timeout 1175 * event will be used by the worker thread to invoke the actual timeout 1176 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1177 * be performed in the timeout handler and the HBA timeout event bit shall 1178 * be cleared by the worker thread after it has taken the event bitmap out. 1179 **/ 1180 static void 1181 lpfc_hb_timeout(struct timer_list *t) 1182 { 1183 struct lpfc_hba *phba; 1184 uint32_t tmo_posted; 1185 unsigned long iflag; 1186 1187 phba = from_timer(phba, t, hb_tmofunc); 1188 1189 /* Check for heart beat timeout conditions */ 1190 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1191 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1192 if (!tmo_posted) 1193 phba->pport->work_port_events |= WORKER_HB_TMO; 1194 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1195 1196 /* Tell the worker thread there is work to do */ 1197 if (!tmo_posted) 1198 lpfc_worker_wake_up(phba); 1199 return; 1200 } 1201 1202 /** 1203 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1204 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1205 * 1206 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1207 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1208 * work-port-events bitmap and the worker thread is notified. This timeout 1209 * event will be used by the worker thread to invoke the actual timeout 1210 * handler routine, lpfc_rrq_handler. Any periodical operations will 1211 * be performed in the timeout handler and the RRQ timeout event bit shall 1212 * be cleared by the worker thread after it has taken the event bitmap out. 1213 **/ 1214 static void 1215 lpfc_rrq_timeout(struct timer_list *t) 1216 { 1217 struct lpfc_hba *phba; 1218 unsigned long iflag; 1219 1220 phba = from_timer(phba, t, rrq_tmr); 1221 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1222 if (!(phba->pport->load_flag & FC_UNLOADING)) 1223 phba->hba_flag |= HBA_RRQ_ACTIVE; 1224 else 1225 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1226 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1227 1228 if (!(phba->pport->load_flag & FC_UNLOADING)) 1229 lpfc_worker_wake_up(phba); 1230 } 1231 1232 /** 1233 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1234 * @phba: pointer to lpfc hba data structure. 1235 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1236 * 1237 * This is the callback function to the lpfc heart-beat mailbox command. 1238 * If configured, the lpfc driver issues the heart-beat mailbox command to 1239 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1240 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1241 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1242 * heart-beat outstanding state. Once the mailbox command comes back and 1243 * no error conditions detected, the heart-beat mailbox command timer is 1244 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1245 * state is cleared for the next heart-beat. If the timer expired with the 1246 * heart-beat outstanding state set, the driver will put the HBA offline. 1247 **/ 1248 static void 1249 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1250 { 1251 unsigned long drvr_flag; 1252 1253 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1254 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1255 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1256 1257 /* Check and reset heart-beat timer if necessary */ 1258 mempool_free(pmboxq, phba->mbox_mem_pool); 1259 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1260 !(phba->link_state == LPFC_HBA_ERROR) && 1261 !(phba->pport->load_flag & FC_UNLOADING)) 1262 mod_timer(&phba->hb_tmofunc, 1263 jiffies + 1264 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1265 return; 1266 } 1267 1268 /* 1269 * lpfc_idle_stat_delay_work - idle_stat tracking 1270 * 1271 * This routine tracks per-cq idle_stat and determines polling decisions. 1272 * 1273 * Return codes: 1274 * None 1275 **/ 1276 static void 1277 lpfc_idle_stat_delay_work(struct work_struct *work) 1278 { 1279 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1280 struct lpfc_hba, 1281 idle_stat_delay_work); 1282 struct lpfc_queue *cq; 1283 struct lpfc_sli4_hdw_queue *hdwq; 1284 struct lpfc_idle_stat *idle_stat; 1285 u32 i, idle_percent; 1286 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1287 1288 if (phba->pport->load_flag & FC_UNLOADING) 1289 return; 1290 1291 if (phba->link_state == LPFC_HBA_ERROR || 1292 phba->pport->fc_flag & FC_OFFLINE_MODE || 1293 phba->cmf_active_mode != LPFC_CFG_OFF) 1294 goto requeue; 1295 1296 for_each_present_cpu(i) { 1297 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1298 cq = hdwq->io_cq; 1299 1300 /* Skip if we've already handled this cq's primary CPU */ 1301 if (cq->chann != i) 1302 continue; 1303 1304 idle_stat = &phba->sli4_hba.idle_stat[i]; 1305 1306 /* get_cpu_idle_time returns values as running counters. Thus, 1307 * to know the amount for this period, the prior counter values 1308 * need to be subtracted from the current counter values. 1309 * From there, the idle time stat can be calculated as a 1310 * percentage of 100 - the sum of the other consumption times. 1311 */ 1312 wall_idle = get_cpu_idle_time(i, &wall, 1); 1313 diff_idle = wall_idle - idle_stat->prev_idle; 1314 diff_wall = wall - idle_stat->prev_wall; 1315 1316 if (diff_wall <= diff_idle) 1317 busy_time = 0; 1318 else 1319 busy_time = diff_wall - diff_idle; 1320 1321 idle_percent = div64_u64(100 * busy_time, diff_wall); 1322 idle_percent = 100 - idle_percent; 1323 1324 if (idle_percent < 15) 1325 cq->poll_mode = LPFC_QUEUE_WORK; 1326 else 1327 cq->poll_mode = LPFC_IRQ_POLL; 1328 1329 idle_stat->prev_idle = wall_idle; 1330 idle_stat->prev_wall = wall; 1331 } 1332 1333 requeue: 1334 schedule_delayed_work(&phba->idle_stat_delay_work, 1335 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1336 } 1337 1338 static void 1339 lpfc_hb_eq_delay_work(struct work_struct *work) 1340 { 1341 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1342 struct lpfc_hba, eq_delay_work); 1343 struct lpfc_eq_intr_info *eqi, *eqi_new; 1344 struct lpfc_queue *eq, *eq_next; 1345 unsigned char *ena_delay = NULL; 1346 uint32_t usdelay; 1347 int i; 1348 1349 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1350 return; 1351 1352 if (phba->link_state == LPFC_HBA_ERROR || 1353 phba->pport->fc_flag & FC_OFFLINE_MODE) 1354 goto requeue; 1355 1356 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1357 GFP_KERNEL); 1358 if (!ena_delay) 1359 goto requeue; 1360 1361 for (i = 0; i < phba->cfg_irq_chann; i++) { 1362 /* Get the EQ corresponding to the IRQ vector */ 1363 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1364 if (!eq) 1365 continue; 1366 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1367 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1368 ena_delay[eq->last_cpu] = 1; 1369 } 1370 } 1371 1372 for_each_present_cpu(i) { 1373 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1374 if (ena_delay[i]) { 1375 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1376 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1377 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1378 } else { 1379 usdelay = 0; 1380 } 1381 1382 eqi->icnt = 0; 1383 1384 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1385 if (unlikely(eq->last_cpu != i)) { 1386 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1387 eq->last_cpu); 1388 list_move_tail(&eq->cpu_list, &eqi_new->list); 1389 continue; 1390 } 1391 if (usdelay != eq->q_mode) 1392 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1393 usdelay); 1394 } 1395 } 1396 1397 kfree(ena_delay); 1398 1399 requeue: 1400 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1401 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1402 } 1403 1404 /** 1405 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1406 * @phba: pointer to lpfc hba data structure. 1407 * 1408 * For each heartbeat, this routine does some heuristic methods to adjust 1409 * XRI distribution. The goal is to fully utilize free XRIs. 1410 **/ 1411 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1412 { 1413 u32 i; 1414 u32 hwq_count; 1415 1416 hwq_count = phba->cfg_hdw_queue; 1417 for (i = 0; i < hwq_count; i++) { 1418 /* Adjust XRIs in private pool */ 1419 lpfc_adjust_pvt_pool_count(phba, i); 1420 1421 /* Adjust high watermark */ 1422 lpfc_adjust_high_watermark(phba, i); 1423 1424 #ifdef LPFC_MXP_STAT 1425 /* Snapshot pbl, pvt and busy count */ 1426 lpfc_snapshot_mxp(phba, i); 1427 #endif 1428 } 1429 } 1430 1431 /** 1432 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1433 * @phba: pointer to lpfc hba data structure. 1434 * 1435 * If a HB mbox is not already in progrees, this routine will allocate 1436 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1437 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1438 **/ 1439 int 1440 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1441 { 1442 LPFC_MBOXQ_t *pmboxq; 1443 int retval; 1444 1445 /* Is a Heartbeat mbox already in progress */ 1446 if (phba->hba_flag & HBA_HBEAT_INP) 1447 return 0; 1448 1449 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1450 if (!pmboxq) 1451 return -ENOMEM; 1452 1453 lpfc_heart_beat(phba, pmboxq); 1454 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1455 pmboxq->vport = phba->pport; 1456 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1457 1458 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1459 mempool_free(pmboxq, phba->mbox_mem_pool); 1460 return -ENXIO; 1461 } 1462 phba->hba_flag |= HBA_HBEAT_INP; 1463 1464 return 0; 1465 } 1466 1467 /** 1468 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1469 * @phba: pointer to lpfc hba data structure. 1470 * 1471 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1472 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1473 * of the value of lpfc_enable_hba_heartbeat. 1474 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1475 * try to issue a MBX_HEARTBEAT mbox command. 1476 **/ 1477 void 1478 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1479 { 1480 if (phba->cfg_enable_hba_heartbeat) 1481 return; 1482 phba->hba_flag |= HBA_HBEAT_TMO; 1483 } 1484 1485 /** 1486 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1487 * @phba: pointer to lpfc hba data structure. 1488 * 1489 * This is the actual HBA-timer timeout handler to be invoked by the worker 1490 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1491 * handler performs any periodic operations needed for the device. If such 1492 * periodic event has already been attended to either in the interrupt handler 1493 * or by processing slow-ring or fast-ring events within the HBA-timer 1494 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1495 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1496 * is configured and there is no heart-beat mailbox command outstanding, a 1497 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1498 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1499 * to offline. 1500 **/ 1501 void 1502 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1503 { 1504 struct lpfc_vport **vports; 1505 struct lpfc_dmabuf *buf_ptr; 1506 int retval = 0; 1507 int i, tmo; 1508 struct lpfc_sli *psli = &phba->sli; 1509 LIST_HEAD(completions); 1510 1511 if (phba->cfg_xri_rebalancing) { 1512 /* Multi-XRI pools handler */ 1513 lpfc_hb_mxp_handler(phba); 1514 } 1515 1516 vports = lpfc_create_vport_work_array(phba); 1517 if (vports != NULL) 1518 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1519 lpfc_rcv_seq_check_edtov(vports[i]); 1520 lpfc_fdmi_change_check(vports[i]); 1521 } 1522 lpfc_destroy_vport_work_array(phba, vports); 1523 1524 if ((phba->link_state == LPFC_HBA_ERROR) || 1525 (phba->pport->load_flag & FC_UNLOADING) || 1526 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1527 return; 1528 1529 if (phba->elsbuf_cnt && 1530 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1531 spin_lock_irq(&phba->hbalock); 1532 list_splice_init(&phba->elsbuf, &completions); 1533 phba->elsbuf_cnt = 0; 1534 phba->elsbuf_prev_cnt = 0; 1535 spin_unlock_irq(&phba->hbalock); 1536 1537 while (!list_empty(&completions)) { 1538 list_remove_head(&completions, buf_ptr, 1539 struct lpfc_dmabuf, list); 1540 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1541 kfree(buf_ptr); 1542 } 1543 } 1544 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1545 1546 /* If there is no heart beat outstanding, issue a heartbeat command */ 1547 if (phba->cfg_enable_hba_heartbeat) { 1548 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1549 spin_lock_irq(&phba->pport->work_port_lock); 1550 if (time_after(phba->last_completion_time + 1551 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1552 jiffies)) { 1553 spin_unlock_irq(&phba->pport->work_port_lock); 1554 if (phba->hba_flag & HBA_HBEAT_INP) 1555 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1556 else 1557 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1558 goto out; 1559 } 1560 spin_unlock_irq(&phba->pport->work_port_lock); 1561 1562 /* Check if a MBX_HEARTBEAT is already in progress */ 1563 if (phba->hba_flag & HBA_HBEAT_INP) { 1564 /* 1565 * If heart beat timeout called with HBA_HBEAT_INP set 1566 * we need to give the hb mailbox cmd a chance to 1567 * complete or TMO. 1568 */ 1569 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1570 "0459 Adapter heartbeat still outstanding: " 1571 "last compl time was %d ms.\n", 1572 jiffies_to_msecs(jiffies 1573 - phba->last_completion_time)); 1574 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1575 } else { 1576 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1577 (list_empty(&psli->mboxq))) { 1578 1579 retval = lpfc_issue_hb_mbox(phba); 1580 if (retval) { 1581 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1582 goto out; 1583 } 1584 phba->skipped_hb = 0; 1585 } else if (time_before_eq(phba->last_completion_time, 1586 phba->skipped_hb)) { 1587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1588 "2857 Last completion time not " 1589 " updated in %d ms\n", 1590 jiffies_to_msecs(jiffies 1591 - phba->last_completion_time)); 1592 } else 1593 phba->skipped_hb = jiffies; 1594 1595 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1596 goto out; 1597 } 1598 } else { 1599 /* Check to see if we want to force a MBX_HEARTBEAT */ 1600 if (phba->hba_flag & HBA_HBEAT_TMO) { 1601 retval = lpfc_issue_hb_mbox(phba); 1602 if (retval) 1603 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1604 else 1605 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1606 goto out; 1607 } 1608 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1609 } 1610 out: 1611 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1612 } 1613 1614 /** 1615 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1616 * @phba: pointer to lpfc hba data structure. 1617 * 1618 * This routine is called to bring the HBA offline when HBA hardware error 1619 * other than Port Error 6 has been detected. 1620 **/ 1621 static void 1622 lpfc_offline_eratt(struct lpfc_hba *phba) 1623 { 1624 struct lpfc_sli *psli = &phba->sli; 1625 1626 spin_lock_irq(&phba->hbalock); 1627 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1628 spin_unlock_irq(&phba->hbalock); 1629 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1630 1631 lpfc_offline(phba); 1632 lpfc_reset_barrier(phba); 1633 spin_lock_irq(&phba->hbalock); 1634 lpfc_sli_brdreset(phba); 1635 spin_unlock_irq(&phba->hbalock); 1636 lpfc_hba_down_post(phba); 1637 lpfc_sli_brdready(phba, HS_MBRDY); 1638 lpfc_unblock_mgmt_io(phba); 1639 phba->link_state = LPFC_HBA_ERROR; 1640 return; 1641 } 1642 1643 /** 1644 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1645 * @phba: pointer to lpfc hba data structure. 1646 * 1647 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1648 * other than Port Error 6 has been detected. 1649 **/ 1650 void 1651 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1652 { 1653 spin_lock_irq(&phba->hbalock); 1654 if (phba->link_state == LPFC_HBA_ERROR && 1655 phba->hba_flag & HBA_PCI_ERR) { 1656 spin_unlock_irq(&phba->hbalock); 1657 return; 1658 } 1659 phba->link_state = LPFC_HBA_ERROR; 1660 spin_unlock_irq(&phba->hbalock); 1661 1662 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1663 lpfc_sli_flush_io_rings(phba); 1664 lpfc_offline(phba); 1665 lpfc_hba_down_post(phba); 1666 lpfc_unblock_mgmt_io(phba); 1667 } 1668 1669 /** 1670 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1671 * @phba: pointer to lpfc hba data structure. 1672 * 1673 * This routine is invoked to handle the deferred HBA hardware error 1674 * conditions. This type of error is indicated by HBA by setting ER1 1675 * and another ER bit in the host status register. The driver will 1676 * wait until the ER1 bit clears before handling the error condition. 1677 **/ 1678 static void 1679 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1680 { 1681 uint32_t old_host_status = phba->work_hs; 1682 struct lpfc_sli *psli = &phba->sli; 1683 1684 /* If the pci channel is offline, ignore possible errors, 1685 * since we cannot communicate with the pci card anyway. 1686 */ 1687 if (pci_channel_offline(phba->pcidev)) { 1688 spin_lock_irq(&phba->hbalock); 1689 phba->hba_flag &= ~DEFER_ERATT; 1690 spin_unlock_irq(&phba->hbalock); 1691 return; 1692 } 1693 1694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1695 "0479 Deferred Adapter Hardware Error " 1696 "Data: x%x x%x x%x\n", 1697 phba->work_hs, phba->work_status[0], 1698 phba->work_status[1]); 1699 1700 spin_lock_irq(&phba->hbalock); 1701 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1702 spin_unlock_irq(&phba->hbalock); 1703 1704 1705 /* 1706 * Firmware stops when it triggred erratt. That could cause the I/Os 1707 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1708 * SCSI layer retry it after re-establishing link. 1709 */ 1710 lpfc_sli_abort_fcp_rings(phba); 1711 1712 /* 1713 * There was a firmware error. Take the hba offline and then 1714 * attempt to restart it. 1715 */ 1716 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1717 lpfc_offline(phba); 1718 1719 /* Wait for the ER1 bit to clear.*/ 1720 while (phba->work_hs & HS_FFER1) { 1721 msleep(100); 1722 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1723 phba->work_hs = UNPLUG_ERR ; 1724 break; 1725 } 1726 /* If driver is unloading let the worker thread continue */ 1727 if (phba->pport->load_flag & FC_UNLOADING) { 1728 phba->work_hs = 0; 1729 break; 1730 } 1731 } 1732 1733 /* 1734 * This is to ptrotect against a race condition in which 1735 * first write to the host attention register clear the 1736 * host status register. 1737 */ 1738 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1739 phba->work_hs = old_host_status & ~HS_FFER1; 1740 1741 spin_lock_irq(&phba->hbalock); 1742 phba->hba_flag &= ~DEFER_ERATT; 1743 spin_unlock_irq(&phba->hbalock); 1744 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1745 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1746 } 1747 1748 static void 1749 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1750 { 1751 struct lpfc_board_event_header board_event; 1752 struct Scsi_Host *shost; 1753 1754 board_event.event_type = FC_REG_BOARD_EVENT; 1755 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1756 shost = lpfc_shost_from_vport(phba->pport); 1757 fc_host_post_vendor_event(shost, fc_get_event_number(), 1758 sizeof(board_event), 1759 (char *) &board_event, 1760 LPFC_NL_VENDOR_ID); 1761 } 1762 1763 /** 1764 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1765 * @phba: pointer to lpfc hba data structure. 1766 * 1767 * This routine is invoked to handle the following HBA hardware error 1768 * conditions: 1769 * 1 - HBA error attention interrupt 1770 * 2 - DMA ring index out of range 1771 * 3 - Mailbox command came back as unknown 1772 **/ 1773 static void 1774 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1775 { 1776 struct lpfc_vport *vport = phba->pport; 1777 struct lpfc_sli *psli = &phba->sli; 1778 uint32_t event_data; 1779 unsigned long temperature; 1780 struct temp_event temp_event_data; 1781 struct Scsi_Host *shost; 1782 1783 /* If the pci channel is offline, ignore possible errors, 1784 * since we cannot communicate with the pci card anyway. 1785 */ 1786 if (pci_channel_offline(phba->pcidev)) { 1787 spin_lock_irq(&phba->hbalock); 1788 phba->hba_flag &= ~DEFER_ERATT; 1789 spin_unlock_irq(&phba->hbalock); 1790 return; 1791 } 1792 1793 /* If resets are disabled then leave the HBA alone and return */ 1794 if (!phba->cfg_enable_hba_reset) 1795 return; 1796 1797 /* Send an internal error event to mgmt application */ 1798 lpfc_board_errevt_to_mgmt(phba); 1799 1800 if (phba->hba_flag & DEFER_ERATT) 1801 lpfc_handle_deferred_eratt(phba); 1802 1803 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1804 if (phba->work_hs & HS_FFER6) 1805 /* Re-establishing Link */ 1806 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1807 "1301 Re-establishing Link " 1808 "Data: x%x x%x x%x\n", 1809 phba->work_hs, phba->work_status[0], 1810 phba->work_status[1]); 1811 if (phba->work_hs & HS_FFER8) 1812 /* Device Zeroization */ 1813 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1814 "2861 Host Authentication device " 1815 "zeroization Data:x%x x%x x%x\n", 1816 phba->work_hs, phba->work_status[0], 1817 phba->work_status[1]); 1818 1819 spin_lock_irq(&phba->hbalock); 1820 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1821 spin_unlock_irq(&phba->hbalock); 1822 1823 /* 1824 * Firmware stops when it triggled erratt with HS_FFER6. 1825 * That could cause the I/Os dropped by the firmware. 1826 * Error iocb (I/O) on txcmplq and let the SCSI layer 1827 * retry it after re-establishing link. 1828 */ 1829 lpfc_sli_abort_fcp_rings(phba); 1830 1831 /* 1832 * There was a firmware error. Take the hba offline and then 1833 * attempt to restart it. 1834 */ 1835 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1836 lpfc_offline(phba); 1837 lpfc_sli_brdrestart(phba); 1838 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1839 lpfc_unblock_mgmt_io(phba); 1840 return; 1841 } 1842 lpfc_unblock_mgmt_io(phba); 1843 } else if (phba->work_hs & HS_CRIT_TEMP) { 1844 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1845 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1846 temp_event_data.event_code = LPFC_CRIT_TEMP; 1847 temp_event_data.data = (uint32_t)temperature; 1848 1849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1850 "0406 Adapter maximum temperature exceeded " 1851 "(%ld), taking this port offline " 1852 "Data: x%x x%x x%x\n", 1853 temperature, phba->work_hs, 1854 phba->work_status[0], phba->work_status[1]); 1855 1856 shost = lpfc_shost_from_vport(phba->pport); 1857 fc_host_post_vendor_event(shost, fc_get_event_number(), 1858 sizeof(temp_event_data), 1859 (char *) &temp_event_data, 1860 SCSI_NL_VID_TYPE_PCI 1861 | PCI_VENDOR_ID_EMULEX); 1862 1863 spin_lock_irq(&phba->hbalock); 1864 phba->over_temp_state = HBA_OVER_TEMP; 1865 spin_unlock_irq(&phba->hbalock); 1866 lpfc_offline_eratt(phba); 1867 1868 } else { 1869 /* The if clause above forces this code path when the status 1870 * failure is a value other than FFER6. Do not call the offline 1871 * twice. This is the adapter hardware error path. 1872 */ 1873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1874 "0457 Adapter Hardware Error " 1875 "Data: x%x x%x x%x\n", 1876 phba->work_hs, 1877 phba->work_status[0], phba->work_status[1]); 1878 1879 event_data = FC_REG_DUMP_EVENT; 1880 shost = lpfc_shost_from_vport(vport); 1881 fc_host_post_vendor_event(shost, fc_get_event_number(), 1882 sizeof(event_data), (char *) &event_data, 1883 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1884 1885 lpfc_offline_eratt(phba); 1886 } 1887 return; 1888 } 1889 1890 /** 1891 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1892 * @phba: pointer to lpfc hba data structure. 1893 * @mbx_action: flag for mailbox shutdown action. 1894 * @en_rn_msg: send reset/port recovery message. 1895 * This routine is invoked to perform an SLI4 port PCI function reset in 1896 * response to port status register polling attention. It waits for port 1897 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1898 * During this process, interrupt vectors are freed and later requested 1899 * for handling possible port resource change. 1900 **/ 1901 static int 1902 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1903 bool en_rn_msg) 1904 { 1905 int rc; 1906 uint32_t intr_mode; 1907 LPFC_MBOXQ_t *mboxq; 1908 1909 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1910 LPFC_SLI_INTF_IF_TYPE_2) { 1911 /* 1912 * On error status condition, driver need to wait for port 1913 * ready before performing reset. 1914 */ 1915 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1916 if (rc) 1917 return rc; 1918 } 1919 1920 /* need reset: attempt for port recovery */ 1921 if (en_rn_msg) 1922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1923 "2887 Reset Needed: Attempting Port " 1924 "Recovery...\n"); 1925 1926 /* If we are no wait, the HBA has been reset and is not 1927 * functional, thus we should clear 1928 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1929 */ 1930 if (mbx_action == LPFC_MBX_NO_WAIT) { 1931 spin_lock_irq(&phba->hbalock); 1932 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1933 if (phba->sli.mbox_active) { 1934 mboxq = phba->sli.mbox_active; 1935 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1936 __lpfc_mbox_cmpl_put(phba, mboxq); 1937 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1938 phba->sli.mbox_active = NULL; 1939 } 1940 spin_unlock_irq(&phba->hbalock); 1941 } 1942 1943 lpfc_offline_prep(phba, mbx_action); 1944 lpfc_sli_flush_io_rings(phba); 1945 lpfc_offline(phba); 1946 /* release interrupt for possible resource change */ 1947 lpfc_sli4_disable_intr(phba); 1948 rc = lpfc_sli_brdrestart(phba); 1949 if (rc) { 1950 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1951 "6309 Failed to restart board\n"); 1952 return rc; 1953 } 1954 /* request and enable interrupt */ 1955 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1956 if (intr_mode == LPFC_INTR_ERROR) { 1957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1958 "3175 Failed to enable interrupt\n"); 1959 return -EIO; 1960 } 1961 phba->intr_mode = intr_mode; 1962 rc = lpfc_online(phba); 1963 if (rc == 0) 1964 lpfc_unblock_mgmt_io(phba); 1965 1966 return rc; 1967 } 1968 1969 /** 1970 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1971 * @phba: pointer to lpfc hba data structure. 1972 * 1973 * This routine is invoked to handle the SLI4 HBA hardware error attention 1974 * conditions. 1975 **/ 1976 static void 1977 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1978 { 1979 struct lpfc_vport *vport = phba->pport; 1980 uint32_t event_data; 1981 struct Scsi_Host *shost; 1982 uint32_t if_type; 1983 struct lpfc_register portstat_reg = {0}; 1984 uint32_t reg_err1, reg_err2; 1985 uint32_t uerrlo_reg, uemasklo_reg; 1986 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1987 bool en_rn_msg = true; 1988 struct temp_event temp_event_data; 1989 struct lpfc_register portsmphr_reg; 1990 int rc, i; 1991 1992 /* If the pci channel is offline, ignore possible errors, since 1993 * we cannot communicate with the pci card anyway. 1994 */ 1995 if (pci_channel_offline(phba->pcidev)) { 1996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1997 "3166 pci channel is offline\n"); 1998 return; 1999 } 2000 2001 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 2002 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2003 switch (if_type) { 2004 case LPFC_SLI_INTF_IF_TYPE_0: 2005 pci_rd_rc1 = lpfc_readl( 2006 phba->sli4_hba.u.if_type0.UERRLOregaddr, 2007 &uerrlo_reg); 2008 pci_rd_rc2 = lpfc_readl( 2009 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 2010 &uemasklo_reg); 2011 /* consider PCI bus read error as pci_channel_offline */ 2012 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2013 return; 2014 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2015 lpfc_sli4_offline_eratt(phba); 2016 return; 2017 } 2018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2019 "7623 Checking UE recoverable"); 2020 2021 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 2022 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2023 &portsmphr_reg.word0)) 2024 continue; 2025 2026 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 2027 &portsmphr_reg); 2028 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2029 LPFC_PORT_SEM_UE_RECOVERABLE) 2030 break; 2031 /*Sleep for 1Sec, before checking SEMAPHORE */ 2032 msleep(1000); 2033 } 2034 2035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2036 "4827 smphr_port_status x%x : Waited %dSec", 2037 smphr_port_status, i); 2038 2039 /* Recoverable UE, reset the HBA device */ 2040 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2041 LPFC_PORT_SEM_UE_RECOVERABLE) { 2042 for (i = 0; i < 20; i++) { 2043 msleep(1000); 2044 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2045 &portsmphr_reg.word0) && 2046 (LPFC_POST_STAGE_PORT_READY == 2047 bf_get(lpfc_port_smphr_port_status, 2048 &portsmphr_reg))) { 2049 rc = lpfc_sli4_port_sta_fn_reset(phba, 2050 LPFC_MBX_NO_WAIT, en_rn_msg); 2051 if (rc == 0) 2052 return; 2053 lpfc_printf_log(phba, KERN_ERR, 2054 LOG_TRACE_EVENT, 2055 "4215 Failed to recover UE"); 2056 break; 2057 } 2058 } 2059 } 2060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2061 "7624 Firmware not ready: Failing UE recovery," 2062 " waited %dSec", i); 2063 phba->link_state = LPFC_HBA_ERROR; 2064 break; 2065 2066 case LPFC_SLI_INTF_IF_TYPE_2: 2067 case LPFC_SLI_INTF_IF_TYPE_6: 2068 pci_rd_rc1 = lpfc_readl( 2069 phba->sli4_hba.u.if_type2.STATUSregaddr, 2070 &portstat_reg.word0); 2071 /* consider PCI bus read error as pci_channel_offline */ 2072 if (pci_rd_rc1 == -EIO) { 2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2074 "3151 PCI bus read access failure: x%x\n", 2075 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2076 lpfc_sli4_offline_eratt(phba); 2077 return; 2078 } 2079 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2080 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2081 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2083 "2889 Port Overtemperature event, " 2084 "taking port offline Data: x%x x%x\n", 2085 reg_err1, reg_err2); 2086 2087 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2088 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2089 temp_event_data.event_code = LPFC_CRIT_TEMP; 2090 temp_event_data.data = 0xFFFFFFFF; 2091 2092 shost = lpfc_shost_from_vport(phba->pport); 2093 fc_host_post_vendor_event(shost, fc_get_event_number(), 2094 sizeof(temp_event_data), 2095 (char *)&temp_event_data, 2096 SCSI_NL_VID_TYPE_PCI 2097 | PCI_VENDOR_ID_EMULEX); 2098 2099 spin_lock_irq(&phba->hbalock); 2100 phba->over_temp_state = HBA_OVER_TEMP; 2101 spin_unlock_irq(&phba->hbalock); 2102 lpfc_sli4_offline_eratt(phba); 2103 return; 2104 } 2105 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2106 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2108 "3143 Port Down: Firmware Update " 2109 "Detected\n"); 2110 en_rn_msg = false; 2111 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2112 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2114 "3144 Port Down: Debug Dump\n"); 2115 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2116 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2118 "3145 Port Down: Provisioning\n"); 2119 2120 /* If resets are disabled then leave the HBA alone and return */ 2121 if (!phba->cfg_enable_hba_reset) 2122 return; 2123 2124 /* Check port status register for function reset */ 2125 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2126 en_rn_msg); 2127 if (rc == 0) { 2128 /* don't report event on forced debug dump */ 2129 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2130 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2131 return; 2132 else 2133 break; 2134 } 2135 /* fall through for not able to recover */ 2136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2137 "3152 Unrecoverable error\n"); 2138 phba->link_state = LPFC_HBA_ERROR; 2139 break; 2140 case LPFC_SLI_INTF_IF_TYPE_1: 2141 default: 2142 break; 2143 } 2144 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2145 "3123 Report dump event to upper layer\n"); 2146 /* Send an internal error event to mgmt application */ 2147 lpfc_board_errevt_to_mgmt(phba); 2148 2149 event_data = FC_REG_DUMP_EVENT; 2150 shost = lpfc_shost_from_vport(vport); 2151 fc_host_post_vendor_event(shost, fc_get_event_number(), 2152 sizeof(event_data), (char *) &event_data, 2153 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2154 } 2155 2156 /** 2157 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2158 * @phba: pointer to lpfc HBA data structure. 2159 * 2160 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2161 * routine from the API jump table function pointer from the lpfc_hba struct. 2162 * 2163 * Return codes 2164 * 0 - success. 2165 * Any other value - error. 2166 **/ 2167 void 2168 lpfc_handle_eratt(struct lpfc_hba *phba) 2169 { 2170 (*phba->lpfc_handle_eratt)(phba); 2171 } 2172 2173 /** 2174 * lpfc_handle_latt - The HBA link event handler 2175 * @phba: pointer to lpfc hba data structure. 2176 * 2177 * This routine is invoked from the worker thread to handle a HBA host 2178 * attention link event. SLI3 only. 2179 **/ 2180 void 2181 lpfc_handle_latt(struct lpfc_hba *phba) 2182 { 2183 struct lpfc_vport *vport = phba->pport; 2184 struct lpfc_sli *psli = &phba->sli; 2185 LPFC_MBOXQ_t *pmb; 2186 volatile uint32_t control; 2187 struct lpfc_dmabuf *mp; 2188 int rc = 0; 2189 2190 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2191 if (!pmb) { 2192 rc = 1; 2193 goto lpfc_handle_latt_err_exit; 2194 } 2195 2196 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2197 if (!mp) { 2198 rc = 2; 2199 goto lpfc_handle_latt_free_pmb; 2200 } 2201 2202 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2203 if (!mp->virt) { 2204 rc = 3; 2205 goto lpfc_handle_latt_free_mp; 2206 } 2207 2208 /* Cleanup any outstanding ELS commands */ 2209 lpfc_els_flush_all_cmd(phba); 2210 2211 psli->slistat.link_event++; 2212 lpfc_read_topology(phba, pmb, mp); 2213 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2214 pmb->vport = vport; 2215 /* Block ELS IOCBs until we have processed this mbox command */ 2216 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2217 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2218 if (rc == MBX_NOT_FINISHED) { 2219 rc = 4; 2220 goto lpfc_handle_latt_free_mbuf; 2221 } 2222 2223 /* Clear Link Attention in HA REG */ 2224 spin_lock_irq(&phba->hbalock); 2225 writel(HA_LATT, phba->HAregaddr); 2226 readl(phba->HAregaddr); /* flush */ 2227 spin_unlock_irq(&phba->hbalock); 2228 2229 return; 2230 2231 lpfc_handle_latt_free_mbuf: 2232 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2233 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2234 lpfc_handle_latt_free_mp: 2235 kfree(mp); 2236 lpfc_handle_latt_free_pmb: 2237 mempool_free(pmb, phba->mbox_mem_pool); 2238 lpfc_handle_latt_err_exit: 2239 /* Enable Link attention interrupts */ 2240 spin_lock_irq(&phba->hbalock); 2241 psli->sli_flag |= LPFC_PROCESS_LA; 2242 control = readl(phba->HCregaddr); 2243 control |= HC_LAINT_ENA; 2244 writel(control, phba->HCregaddr); 2245 readl(phba->HCregaddr); /* flush */ 2246 2247 /* Clear Link Attention in HA REG */ 2248 writel(HA_LATT, phba->HAregaddr); 2249 readl(phba->HAregaddr); /* flush */ 2250 spin_unlock_irq(&phba->hbalock); 2251 lpfc_linkdown(phba); 2252 phba->link_state = LPFC_HBA_ERROR; 2253 2254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2255 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2256 2257 return; 2258 } 2259 2260 /** 2261 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2262 * @phba: pointer to lpfc hba data structure. 2263 * @vpd: pointer to the vital product data. 2264 * @len: length of the vital product data in bytes. 2265 * 2266 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2267 * an array of characters. In this routine, the ModelName, ProgramType, and 2268 * ModelDesc, etc. fields of the phba data structure will be populated. 2269 * 2270 * Return codes 2271 * 0 - pointer to the VPD passed in is NULL 2272 * 1 - success 2273 **/ 2274 int 2275 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2276 { 2277 uint8_t lenlo, lenhi; 2278 int Length; 2279 int i, j; 2280 int finished = 0; 2281 int index = 0; 2282 2283 if (!vpd) 2284 return 0; 2285 2286 /* Vital Product */ 2287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2288 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2289 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2290 (uint32_t) vpd[3]); 2291 while (!finished && (index < (len - 4))) { 2292 switch (vpd[index]) { 2293 case 0x82: 2294 case 0x91: 2295 index += 1; 2296 lenlo = vpd[index]; 2297 index += 1; 2298 lenhi = vpd[index]; 2299 index += 1; 2300 i = ((((unsigned short)lenhi) << 8) + lenlo); 2301 index += i; 2302 break; 2303 case 0x90: 2304 index += 1; 2305 lenlo = vpd[index]; 2306 index += 1; 2307 lenhi = vpd[index]; 2308 index += 1; 2309 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2310 if (Length > len - index) 2311 Length = len - index; 2312 while (Length > 0) { 2313 /* Look for Serial Number */ 2314 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2315 index += 2; 2316 i = vpd[index]; 2317 index += 1; 2318 j = 0; 2319 Length -= (3+i); 2320 while(i--) { 2321 phba->SerialNumber[j++] = vpd[index++]; 2322 if (j == 31) 2323 break; 2324 } 2325 phba->SerialNumber[j] = 0; 2326 continue; 2327 } 2328 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2329 phba->vpd_flag |= VPD_MODEL_DESC; 2330 index += 2; 2331 i = vpd[index]; 2332 index += 1; 2333 j = 0; 2334 Length -= (3+i); 2335 while(i--) { 2336 phba->ModelDesc[j++] = vpd[index++]; 2337 if (j == 255) 2338 break; 2339 } 2340 phba->ModelDesc[j] = 0; 2341 continue; 2342 } 2343 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2344 phba->vpd_flag |= VPD_MODEL_NAME; 2345 index += 2; 2346 i = vpd[index]; 2347 index += 1; 2348 j = 0; 2349 Length -= (3+i); 2350 while(i--) { 2351 phba->ModelName[j++] = vpd[index++]; 2352 if (j == 79) 2353 break; 2354 } 2355 phba->ModelName[j] = 0; 2356 continue; 2357 } 2358 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2359 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2360 index += 2; 2361 i = vpd[index]; 2362 index += 1; 2363 j = 0; 2364 Length -= (3+i); 2365 while(i--) { 2366 phba->ProgramType[j++] = vpd[index++]; 2367 if (j == 255) 2368 break; 2369 } 2370 phba->ProgramType[j] = 0; 2371 continue; 2372 } 2373 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2374 phba->vpd_flag |= VPD_PORT; 2375 index += 2; 2376 i = vpd[index]; 2377 index += 1; 2378 j = 0; 2379 Length -= (3+i); 2380 while(i--) { 2381 if ((phba->sli_rev == LPFC_SLI_REV4) && 2382 (phba->sli4_hba.pport_name_sta == 2383 LPFC_SLI4_PPNAME_GET)) { 2384 j++; 2385 index++; 2386 } else 2387 phba->Port[j++] = vpd[index++]; 2388 if (j == 19) 2389 break; 2390 } 2391 if ((phba->sli_rev != LPFC_SLI_REV4) || 2392 (phba->sli4_hba.pport_name_sta == 2393 LPFC_SLI4_PPNAME_NON)) 2394 phba->Port[j] = 0; 2395 continue; 2396 } 2397 else { 2398 index += 2; 2399 i = vpd[index]; 2400 index += 1; 2401 index += i; 2402 Length -= (3 + i); 2403 } 2404 } 2405 finished = 0; 2406 break; 2407 case 0x78: 2408 finished = 1; 2409 break; 2410 default: 2411 index ++; 2412 break; 2413 } 2414 } 2415 2416 return(1); 2417 } 2418 2419 /** 2420 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2421 * @phba: pointer to lpfc hba data structure. 2422 * @mdp: pointer to the data structure to hold the derived model name. 2423 * @descp: pointer to the data structure to hold the derived description. 2424 * 2425 * This routine retrieves HBA's description based on its registered PCI device 2426 * ID. The @descp passed into this function points to an array of 256 chars. It 2427 * shall be returned with the model name, maximum speed, and the host bus type. 2428 * The @mdp passed into this function points to an array of 80 chars. When the 2429 * function returns, the @mdp will be filled with the model name. 2430 **/ 2431 static void 2432 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2433 { 2434 lpfc_vpd_t *vp; 2435 uint16_t dev_id = phba->pcidev->device; 2436 int max_speed; 2437 int GE = 0; 2438 int oneConnect = 0; /* default is not a oneConnect */ 2439 struct { 2440 char *name; 2441 char *bus; 2442 char *function; 2443 } m = {"<Unknown>", "", ""}; 2444 2445 if (mdp && mdp[0] != '\0' 2446 && descp && descp[0] != '\0') 2447 return; 2448 2449 if (phba->lmt & LMT_64Gb) 2450 max_speed = 64; 2451 else if (phba->lmt & LMT_32Gb) 2452 max_speed = 32; 2453 else if (phba->lmt & LMT_16Gb) 2454 max_speed = 16; 2455 else if (phba->lmt & LMT_10Gb) 2456 max_speed = 10; 2457 else if (phba->lmt & LMT_8Gb) 2458 max_speed = 8; 2459 else if (phba->lmt & LMT_4Gb) 2460 max_speed = 4; 2461 else if (phba->lmt & LMT_2Gb) 2462 max_speed = 2; 2463 else if (phba->lmt & LMT_1Gb) 2464 max_speed = 1; 2465 else 2466 max_speed = 0; 2467 2468 vp = &phba->vpd; 2469 2470 switch (dev_id) { 2471 case PCI_DEVICE_ID_FIREFLY: 2472 m = (typeof(m)){"LP6000", "PCI", 2473 "Obsolete, Unsupported Fibre Channel Adapter"}; 2474 break; 2475 case PCI_DEVICE_ID_SUPERFLY: 2476 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2477 m = (typeof(m)){"LP7000", "PCI", ""}; 2478 else 2479 m = (typeof(m)){"LP7000E", "PCI", ""}; 2480 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2481 break; 2482 case PCI_DEVICE_ID_DRAGONFLY: 2483 m = (typeof(m)){"LP8000", "PCI", 2484 "Obsolete, Unsupported Fibre Channel Adapter"}; 2485 break; 2486 case PCI_DEVICE_ID_CENTAUR: 2487 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2488 m = (typeof(m)){"LP9002", "PCI", ""}; 2489 else 2490 m = (typeof(m)){"LP9000", "PCI", ""}; 2491 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2492 break; 2493 case PCI_DEVICE_ID_RFLY: 2494 m = (typeof(m)){"LP952", "PCI", 2495 "Obsolete, Unsupported Fibre Channel Adapter"}; 2496 break; 2497 case PCI_DEVICE_ID_PEGASUS: 2498 m = (typeof(m)){"LP9802", "PCI-X", 2499 "Obsolete, Unsupported Fibre Channel Adapter"}; 2500 break; 2501 case PCI_DEVICE_ID_THOR: 2502 m = (typeof(m)){"LP10000", "PCI-X", 2503 "Obsolete, Unsupported Fibre Channel Adapter"}; 2504 break; 2505 case PCI_DEVICE_ID_VIPER: 2506 m = (typeof(m)){"LPX1000", "PCI-X", 2507 "Obsolete, Unsupported Fibre Channel Adapter"}; 2508 break; 2509 case PCI_DEVICE_ID_PFLY: 2510 m = (typeof(m)){"LP982", "PCI-X", 2511 "Obsolete, Unsupported Fibre Channel Adapter"}; 2512 break; 2513 case PCI_DEVICE_ID_TFLY: 2514 m = (typeof(m)){"LP1050", "PCI-X", 2515 "Obsolete, Unsupported Fibre Channel Adapter"}; 2516 break; 2517 case PCI_DEVICE_ID_HELIOS: 2518 m = (typeof(m)){"LP11000", "PCI-X2", 2519 "Obsolete, Unsupported Fibre Channel Adapter"}; 2520 break; 2521 case PCI_DEVICE_ID_HELIOS_SCSP: 2522 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2523 "Obsolete, Unsupported Fibre Channel Adapter"}; 2524 break; 2525 case PCI_DEVICE_ID_HELIOS_DCSP: 2526 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2527 "Obsolete, Unsupported Fibre Channel Adapter"}; 2528 break; 2529 case PCI_DEVICE_ID_NEPTUNE: 2530 m = (typeof(m)){"LPe1000", "PCIe", 2531 "Obsolete, Unsupported Fibre Channel Adapter"}; 2532 break; 2533 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2534 m = (typeof(m)){"LPe1000-SP", "PCIe", 2535 "Obsolete, Unsupported Fibre Channel Adapter"}; 2536 break; 2537 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2538 m = (typeof(m)){"LPe1002-SP", "PCIe", 2539 "Obsolete, Unsupported Fibre Channel Adapter"}; 2540 break; 2541 case PCI_DEVICE_ID_BMID: 2542 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2543 break; 2544 case PCI_DEVICE_ID_BSMB: 2545 m = (typeof(m)){"LP111", "PCI-X2", 2546 "Obsolete, Unsupported Fibre Channel Adapter"}; 2547 break; 2548 case PCI_DEVICE_ID_ZEPHYR: 2549 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2550 break; 2551 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2552 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2553 break; 2554 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2555 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2556 GE = 1; 2557 break; 2558 case PCI_DEVICE_ID_ZMID: 2559 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2560 break; 2561 case PCI_DEVICE_ID_ZSMB: 2562 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2563 break; 2564 case PCI_DEVICE_ID_LP101: 2565 m = (typeof(m)){"LP101", "PCI-X", 2566 "Obsolete, Unsupported Fibre Channel Adapter"}; 2567 break; 2568 case PCI_DEVICE_ID_LP10000S: 2569 m = (typeof(m)){"LP10000-S", "PCI", 2570 "Obsolete, Unsupported Fibre Channel Adapter"}; 2571 break; 2572 case PCI_DEVICE_ID_LP11000S: 2573 m = (typeof(m)){"LP11000-S", "PCI-X2", 2574 "Obsolete, Unsupported Fibre Channel Adapter"}; 2575 break; 2576 case PCI_DEVICE_ID_LPE11000S: 2577 m = (typeof(m)){"LPe11000-S", "PCIe", 2578 "Obsolete, Unsupported Fibre Channel Adapter"}; 2579 break; 2580 case PCI_DEVICE_ID_SAT: 2581 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2582 break; 2583 case PCI_DEVICE_ID_SAT_MID: 2584 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2585 break; 2586 case PCI_DEVICE_ID_SAT_SMB: 2587 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2588 break; 2589 case PCI_DEVICE_ID_SAT_DCSP: 2590 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2591 break; 2592 case PCI_DEVICE_ID_SAT_SCSP: 2593 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2594 break; 2595 case PCI_DEVICE_ID_SAT_S: 2596 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2597 break; 2598 case PCI_DEVICE_ID_HORNET: 2599 m = (typeof(m)){"LP21000", "PCIe", 2600 "Obsolete, Unsupported FCoE Adapter"}; 2601 GE = 1; 2602 break; 2603 case PCI_DEVICE_ID_PROTEUS_VF: 2604 m = (typeof(m)){"LPev12000", "PCIe IOV", 2605 "Obsolete, Unsupported Fibre Channel Adapter"}; 2606 break; 2607 case PCI_DEVICE_ID_PROTEUS_PF: 2608 m = (typeof(m)){"LPev12000", "PCIe IOV", 2609 "Obsolete, Unsupported Fibre Channel Adapter"}; 2610 break; 2611 case PCI_DEVICE_ID_PROTEUS_S: 2612 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2613 "Obsolete, Unsupported Fibre Channel Adapter"}; 2614 break; 2615 case PCI_DEVICE_ID_TIGERSHARK: 2616 oneConnect = 1; 2617 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2618 break; 2619 case PCI_DEVICE_ID_TOMCAT: 2620 oneConnect = 1; 2621 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2622 break; 2623 case PCI_DEVICE_ID_FALCON: 2624 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2625 "EmulexSecure Fibre"}; 2626 break; 2627 case PCI_DEVICE_ID_BALIUS: 2628 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2629 "Obsolete, Unsupported Fibre Channel Adapter"}; 2630 break; 2631 case PCI_DEVICE_ID_LANCER_FC: 2632 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2633 break; 2634 case PCI_DEVICE_ID_LANCER_FC_VF: 2635 m = (typeof(m)){"LPe16000", "PCIe", 2636 "Obsolete, Unsupported Fibre Channel Adapter"}; 2637 break; 2638 case PCI_DEVICE_ID_LANCER_FCOE: 2639 oneConnect = 1; 2640 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2641 break; 2642 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2643 oneConnect = 1; 2644 m = (typeof(m)){"OCe15100", "PCIe", 2645 "Obsolete, Unsupported FCoE"}; 2646 break; 2647 case PCI_DEVICE_ID_LANCER_G6_FC: 2648 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2649 break; 2650 case PCI_DEVICE_ID_LANCER_G7_FC: 2651 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2652 break; 2653 case PCI_DEVICE_ID_LANCER_G7P_FC: 2654 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2655 break; 2656 case PCI_DEVICE_ID_SKYHAWK: 2657 case PCI_DEVICE_ID_SKYHAWK_VF: 2658 oneConnect = 1; 2659 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2660 break; 2661 default: 2662 m = (typeof(m)){"Unknown", "", ""}; 2663 break; 2664 } 2665 2666 if (mdp && mdp[0] == '\0') 2667 snprintf(mdp, 79,"%s", m.name); 2668 /* 2669 * oneConnect hba requires special processing, they are all initiators 2670 * and we put the port number on the end 2671 */ 2672 if (descp && descp[0] == '\0') { 2673 if (oneConnect) 2674 snprintf(descp, 255, 2675 "Emulex OneConnect %s, %s Initiator %s", 2676 m.name, m.function, 2677 phba->Port); 2678 else if (max_speed == 0) 2679 snprintf(descp, 255, 2680 "Emulex %s %s %s", 2681 m.name, m.bus, m.function); 2682 else 2683 snprintf(descp, 255, 2684 "Emulex %s %d%s %s %s", 2685 m.name, max_speed, (GE) ? "GE" : "Gb", 2686 m.bus, m.function); 2687 } 2688 } 2689 2690 /** 2691 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2692 * @phba: pointer to lpfc hba data structure. 2693 * @pring: pointer to a IOCB ring. 2694 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2695 * 2696 * This routine posts a given number of IOCBs with the associated DMA buffer 2697 * descriptors specified by the cnt argument to the given IOCB ring. 2698 * 2699 * Return codes 2700 * The number of IOCBs NOT able to be posted to the IOCB ring. 2701 **/ 2702 int 2703 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2704 { 2705 IOCB_t *icmd; 2706 struct lpfc_iocbq *iocb; 2707 struct lpfc_dmabuf *mp1, *mp2; 2708 2709 cnt += pring->missbufcnt; 2710 2711 /* While there are buffers to post */ 2712 while (cnt > 0) { 2713 /* Allocate buffer for command iocb */ 2714 iocb = lpfc_sli_get_iocbq(phba); 2715 if (iocb == NULL) { 2716 pring->missbufcnt = cnt; 2717 return cnt; 2718 } 2719 icmd = &iocb->iocb; 2720 2721 /* 2 buffers can be posted per command */ 2722 /* Allocate buffer to post */ 2723 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2724 if (mp1) 2725 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2726 if (!mp1 || !mp1->virt) { 2727 kfree(mp1); 2728 lpfc_sli_release_iocbq(phba, iocb); 2729 pring->missbufcnt = cnt; 2730 return cnt; 2731 } 2732 2733 INIT_LIST_HEAD(&mp1->list); 2734 /* Allocate buffer to post */ 2735 if (cnt > 1) { 2736 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2737 if (mp2) 2738 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2739 &mp2->phys); 2740 if (!mp2 || !mp2->virt) { 2741 kfree(mp2); 2742 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2743 kfree(mp1); 2744 lpfc_sli_release_iocbq(phba, iocb); 2745 pring->missbufcnt = cnt; 2746 return cnt; 2747 } 2748 2749 INIT_LIST_HEAD(&mp2->list); 2750 } else { 2751 mp2 = NULL; 2752 } 2753 2754 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2755 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2756 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2757 icmd->ulpBdeCount = 1; 2758 cnt--; 2759 if (mp2) { 2760 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2761 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2762 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2763 cnt--; 2764 icmd->ulpBdeCount = 2; 2765 } 2766 2767 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2768 icmd->ulpLe = 1; 2769 2770 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2771 IOCB_ERROR) { 2772 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2773 kfree(mp1); 2774 cnt++; 2775 if (mp2) { 2776 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2777 kfree(mp2); 2778 cnt++; 2779 } 2780 lpfc_sli_release_iocbq(phba, iocb); 2781 pring->missbufcnt = cnt; 2782 return cnt; 2783 } 2784 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2785 if (mp2) 2786 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2787 } 2788 pring->missbufcnt = 0; 2789 return 0; 2790 } 2791 2792 /** 2793 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2794 * @phba: pointer to lpfc hba data structure. 2795 * 2796 * This routine posts initial receive IOCB buffers to the ELS ring. The 2797 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2798 * set to 64 IOCBs. SLI3 only. 2799 * 2800 * Return codes 2801 * 0 - success (currently always success) 2802 **/ 2803 static int 2804 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2805 { 2806 struct lpfc_sli *psli = &phba->sli; 2807 2808 /* Ring 0, ELS / CT buffers */ 2809 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2810 /* Ring 2 - FCP no buffers needed */ 2811 2812 return 0; 2813 } 2814 2815 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2816 2817 /** 2818 * lpfc_sha_init - Set up initial array of hash table entries 2819 * @HashResultPointer: pointer to an array as hash table. 2820 * 2821 * This routine sets up the initial values to the array of hash table entries 2822 * for the LC HBAs. 2823 **/ 2824 static void 2825 lpfc_sha_init(uint32_t * HashResultPointer) 2826 { 2827 HashResultPointer[0] = 0x67452301; 2828 HashResultPointer[1] = 0xEFCDAB89; 2829 HashResultPointer[2] = 0x98BADCFE; 2830 HashResultPointer[3] = 0x10325476; 2831 HashResultPointer[4] = 0xC3D2E1F0; 2832 } 2833 2834 /** 2835 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2836 * @HashResultPointer: pointer to an initial/result hash table. 2837 * @HashWorkingPointer: pointer to an working hash table. 2838 * 2839 * This routine iterates an initial hash table pointed by @HashResultPointer 2840 * with the values from the working hash table pointeed by @HashWorkingPointer. 2841 * The results are putting back to the initial hash table, returned through 2842 * the @HashResultPointer as the result hash table. 2843 **/ 2844 static void 2845 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2846 { 2847 int t; 2848 uint32_t TEMP; 2849 uint32_t A, B, C, D, E; 2850 t = 16; 2851 do { 2852 HashWorkingPointer[t] = 2853 S(1, 2854 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2855 8] ^ 2856 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2857 } while (++t <= 79); 2858 t = 0; 2859 A = HashResultPointer[0]; 2860 B = HashResultPointer[1]; 2861 C = HashResultPointer[2]; 2862 D = HashResultPointer[3]; 2863 E = HashResultPointer[4]; 2864 2865 do { 2866 if (t < 20) { 2867 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2868 } else if (t < 40) { 2869 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2870 } else if (t < 60) { 2871 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2872 } else { 2873 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2874 } 2875 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2876 E = D; 2877 D = C; 2878 C = S(30, B); 2879 B = A; 2880 A = TEMP; 2881 } while (++t <= 79); 2882 2883 HashResultPointer[0] += A; 2884 HashResultPointer[1] += B; 2885 HashResultPointer[2] += C; 2886 HashResultPointer[3] += D; 2887 HashResultPointer[4] += E; 2888 2889 } 2890 2891 /** 2892 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2893 * @RandomChallenge: pointer to the entry of host challenge random number array. 2894 * @HashWorking: pointer to the entry of the working hash array. 2895 * 2896 * This routine calculates the working hash array referred by @HashWorking 2897 * from the challenge random numbers associated with the host, referred by 2898 * @RandomChallenge. The result is put into the entry of the working hash 2899 * array and returned by reference through @HashWorking. 2900 **/ 2901 static void 2902 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2903 { 2904 *HashWorking = (*RandomChallenge ^ *HashWorking); 2905 } 2906 2907 /** 2908 * lpfc_hba_init - Perform special handling for LC HBA initialization 2909 * @phba: pointer to lpfc hba data structure. 2910 * @hbainit: pointer to an array of unsigned 32-bit integers. 2911 * 2912 * This routine performs the special handling for LC HBA initialization. 2913 **/ 2914 void 2915 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2916 { 2917 int t; 2918 uint32_t *HashWorking; 2919 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2920 2921 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2922 if (!HashWorking) 2923 return; 2924 2925 HashWorking[0] = HashWorking[78] = *pwwnn++; 2926 HashWorking[1] = HashWorking[79] = *pwwnn; 2927 2928 for (t = 0; t < 7; t++) 2929 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2930 2931 lpfc_sha_init(hbainit); 2932 lpfc_sha_iterate(hbainit, HashWorking); 2933 kfree(HashWorking); 2934 } 2935 2936 /** 2937 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2938 * @vport: pointer to a virtual N_Port data structure. 2939 * 2940 * This routine performs the necessary cleanups before deleting the @vport. 2941 * It invokes the discovery state machine to perform necessary state 2942 * transitions and to release the ndlps associated with the @vport. Note, 2943 * the physical port is treated as @vport 0. 2944 **/ 2945 void 2946 lpfc_cleanup(struct lpfc_vport *vport) 2947 { 2948 struct lpfc_hba *phba = vport->phba; 2949 struct lpfc_nodelist *ndlp, *next_ndlp; 2950 int i = 0; 2951 2952 if (phba->link_state > LPFC_LINK_DOWN) 2953 lpfc_port_link_failure(vport); 2954 2955 /* Clean up VMID resources */ 2956 if (lpfc_is_vmid_enabled(phba)) 2957 lpfc_vmid_vport_cleanup(vport); 2958 2959 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2960 if (vport->port_type != LPFC_PHYSICAL_PORT && 2961 ndlp->nlp_DID == Fabric_DID) { 2962 /* Just free up ndlp with Fabric_DID for vports */ 2963 lpfc_nlp_put(ndlp); 2964 continue; 2965 } 2966 2967 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2968 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2969 lpfc_nlp_put(ndlp); 2970 continue; 2971 } 2972 2973 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2974 * DEVICE_RM event. 2975 */ 2976 if (ndlp->nlp_type & NLP_FABRIC && 2977 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2978 lpfc_disc_state_machine(vport, ndlp, NULL, 2979 NLP_EVT_DEVICE_RECOVERY); 2980 2981 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2982 lpfc_disc_state_machine(vport, ndlp, NULL, 2983 NLP_EVT_DEVICE_RM); 2984 } 2985 2986 /* At this point, ALL ndlp's should be gone 2987 * because of the previous NLP_EVT_DEVICE_RM. 2988 * Lets wait for this to happen, if needed. 2989 */ 2990 while (!list_empty(&vport->fc_nodes)) { 2991 if (i++ > 3000) { 2992 lpfc_printf_vlog(vport, KERN_ERR, 2993 LOG_TRACE_EVENT, 2994 "0233 Nodelist not empty\n"); 2995 list_for_each_entry_safe(ndlp, next_ndlp, 2996 &vport->fc_nodes, nlp_listp) { 2997 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2998 LOG_TRACE_EVENT, 2999 "0282 did:x%x ndlp:x%px " 3000 "refcnt:%d xflags x%x nflag x%x\n", 3001 ndlp->nlp_DID, (void *)ndlp, 3002 kref_read(&ndlp->kref), 3003 ndlp->fc4_xpt_flags, 3004 ndlp->nlp_flag); 3005 } 3006 break; 3007 } 3008 3009 /* Wait for any activity on ndlps to settle */ 3010 msleep(10); 3011 } 3012 lpfc_cleanup_vports_rrqs(vport, NULL); 3013 } 3014 3015 /** 3016 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 3017 * @vport: pointer to a virtual N_Port data structure. 3018 * 3019 * This routine stops all the timers associated with a @vport. This function 3020 * is invoked before disabling or deleting a @vport. Note that the physical 3021 * port is treated as @vport 0. 3022 **/ 3023 void 3024 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3025 { 3026 del_timer_sync(&vport->els_tmofunc); 3027 del_timer_sync(&vport->delayed_disc_tmo); 3028 lpfc_can_disctmo(vport); 3029 return; 3030 } 3031 3032 /** 3033 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3034 * @phba: pointer to lpfc hba data structure. 3035 * 3036 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 3037 * caller of this routine should already hold the host lock. 3038 **/ 3039 void 3040 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3041 { 3042 /* Clear pending FCF rediscovery wait flag */ 3043 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3044 3045 /* Now, try to stop the timer */ 3046 del_timer(&phba->fcf.redisc_wait); 3047 } 3048 3049 /** 3050 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3051 * @phba: pointer to lpfc hba data structure. 3052 * 3053 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3054 * checks whether the FCF rediscovery wait timer is pending with the host 3055 * lock held before proceeding with disabling the timer and clearing the 3056 * wait timer pendig flag. 3057 **/ 3058 void 3059 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3060 { 3061 spin_lock_irq(&phba->hbalock); 3062 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3063 /* FCF rediscovery timer already fired or stopped */ 3064 spin_unlock_irq(&phba->hbalock); 3065 return; 3066 } 3067 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3068 /* Clear failover in progress flags */ 3069 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3070 spin_unlock_irq(&phba->hbalock); 3071 } 3072 3073 /** 3074 * lpfc_cmf_stop - Stop CMF processing 3075 * @phba: pointer to lpfc hba data structure. 3076 * 3077 * This is called when the link goes down or if CMF mode is turned OFF. 3078 * It is also called when going offline or unloaded just before the 3079 * congestion info buffer is unregistered. 3080 **/ 3081 void 3082 lpfc_cmf_stop(struct lpfc_hba *phba) 3083 { 3084 int cpu; 3085 struct lpfc_cgn_stat *cgs; 3086 3087 /* We only do something if CMF is enabled */ 3088 if (!phba->sli4_hba.pc_sli4_params.cmf) 3089 return; 3090 3091 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3092 "6221 Stop CMF / Cancel Timer\n"); 3093 3094 /* Cancel the CMF timer */ 3095 hrtimer_cancel(&phba->cmf_timer); 3096 3097 /* Zero CMF counters */ 3098 atomic_set(&phba->cmf_busy, 0); 3099 for_each_present_cpu(cpu) { 3100 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3101 atomic64_set(&cgs->total_bytes, 0); 3102 atomic64_set(&cgs->rcv_bytes, 0); 3103 atomic_set(&cgs->rx_io_cnt, 0); 3104 atomic64_set(&cgs->rx_latency, 0); 3105 } 3106 atomic_set(&phba->cmf_bw_wait, 0); 3107 3108 /* Resume any blocked IO - Queue unblock on workqueue */ 3109 queue_work(phba->wq, &phba->unblock_request_work); 3110 } 3111 3112 static inline uint64_t 3113 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3114 { 3115 uint64_t rate = lpfc_sli_port_speed_get(phba); 3116 3117 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3118 } 3119 3120 void 3121 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3122 { 3123 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3124 "6223 Signal CMF init\n"); 3125 3126 /* Use the new fc_linkspeed to recalculate */ 3127 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3128 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3129 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3130 phba->cmf_interval_rate, 1000); 3131 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3132 3133 /* This is a signal to firmware to sync up CMF BW with link speed */ 3134 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3135 } 3136 3137 /** 3138 * lpfc_cmf_start - Start CMF processing 3139 * @phba: pointer to lpfc hba data structure. 3140 * 3141 * This is called when the link comes up or if CMF mode is turned OFF 3142 * to Monitor or Managed. 3143 **/ 3144 void 3145 lpfc_cmf_start(struct lpfc_hba *phba) 3146 { 3147 struct lpfc_cgn_stat *cgs; 3148 int cpu; 3149 3150 /* We only do something if CMF is enabled */ 3151 if (!phba->sli4_hba.pc_sli4_params.cmf || 3152 phba->cmf_active_mode == LPFC_CFG_OFF) 3153 return; 3154 3155 /* Reinitialize congestion buffer info */ 3156 lpfc_init_congestion_buf(phba); 3157 3158 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3159 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3160 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3161 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3162 3163 atomic_set(&phba->cmf_busy, 0); 3164 for_each_present_cpu(cpu) { 3165 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3166 atomic64_set(&cgs->total_bytes, 0); 3167 atomic64_set(&cgs->rcv_bytes, 0); 3168 atomic_set(&cgs->rx_io_cnt, 0); 3169 atomic64_set(&cgs->rx_latency, 0); 3170 } 3171 phba->cmf_latency.tv_sec = 0; 3172 phba->cmf_latency.tv_nsec = 0; 3173 3174 lpfc_cmf_signal_init(phba); 3175 3176 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3177 "6222 Start CMF / Timer\n"); 3178 3179 phba->cmf_timer_cnt = 0; 3180 hrtimer_start(&phba->cmf_timer, 3181 ktime_set(0, LPFC_CMF_INTERVAL * 1000000), 3182 HRTIMER_MODE_REL); 3183 /* Setup for latency check in IO cmpl routines */ 3184 ktime_get_real_ts64(&phba->cmf_latency); 3185 3186 atomic_set(&phba->cmf_bw_wait, 0); 3187 atomic_set(&phba->cmf_stop_io, 0); 3188 } 3189 3190 /** 3191 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3192 * @phba: pointer to lpfc hba data structure. 3193 * 3194 * This routine stops all the timers associated with a HBA. This function is 3195 * invoked before either putting a HBA offline or unloading the driver. 3196 **/ 3197 void 3198 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3199 { 3200 if (phba->pport) 3201 lpfc_stop_vport_timers(phba->pport); 3202 cancel_delayed_work_sync(&phba->eq_delay_work); 3203 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3204 del_timer_sync(&phba->sli.mbox_tmo); 3205 del_timer_sync(&phba->fabric_block_timer); 3206 del_timer_sync(&phba->eratt_poll); 3207 del_timer_sync(&phba->hb_tmofunc); 3208 if (phba->sli_rev == LPFC_SLI_REV4) { 3209 del_timer_sync(&phba->rrq_tmr); 3210 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3211 } 3212 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3213 3214 switch (phba->pci_dev_grp) { 3215 case LPFC_PCI_DEV_LP: 3216 /* Stop any LightPulse device specific driver timers */ 3217 del_timer_sync(&phba->fcp_poll_timer); 3218 break; 3219 case LPFC_PCI_DEV_OC: 3220 /* Stop any OneConnect device specific driver timers */ 3221 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3222 break; 3223 default: 3224 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3225 "0297 Invalid device group (x%x)\n", 3226 phba->pci_dev_grp); 3227 break; 3228 } 3229 return; 3230 } 3231 3232 /** 3233 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3234 * @phba: pointer to lpfc hba data structure. 3235 * @mbx_action: flag for mailbox no wait action. 3236 * 3237 * This routine marks a HBA's management interface as blocked. Once the HBA's 3238 * management interface is marked as blocked, all the user space access to 3239 * the HBA, whether they are from sysfs interface or libdfc interface will 3240 * all be blocked. The HBA is set to block the management interface when the 3241 * driver prepares the HBA interface for online or offline. 3242 **/ 3243 static void 3244 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3245 { 3246 unsigned long iflag; 3247 uint8_t actcmd = MBX_HEARTBEAT; 3248 unsigned long timeout; 3249 3250 spin_lock_irqsave(&phba->hbalock, iflag); 3251 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3252 spin_unlock_irqrestore(&phba->hbalock, iflag); 3253 if (mbx_action == LPFC_MBX_NO_WAIT) 3254 return; 3255 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3256 spin_lock_irqsave(&phba->hbalock, iflag); 3257 if (phba->sli.mbox_active) { 3258 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3259 /* Determine how long we might wait for the active mailbox 3260 * command to be gracefully completed by firmware. 3261 */ 3262 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3263 phba->sli.mbox_active) * 1000) + jiffies; 3264 } 3265 spin_unlock_irqrestore(&phba->hbalock, iflag); 3266 3267 /* Wait for the outstnading mailbox command to complete */ 3268 while (phba->sli.mbox_active) { 3269 /* Check active mailbox complete status every 2ms */ 3270 msleep(2); 3271 if (time_after(jiffies, timeout)) { 3272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3273 "2813 Mgmt IO is Blocked %x " 3274 "- mbox cmd %x still active\n", 3275 phba->sli.sli_flag, actcmd); 3276 break; 3277 } 3278 } 3279 } 3280 3281 /** 3282 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3283 * @phba: pointer to lpfc hba data structure. 3284 * 3285 * Allocate RPIs for all active remote nodes. This is needed whenever 3286 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3287 * is to fixup the temporary rpi assignments. 3288 **/ 3289 void 3290 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3291 { 3292 struct lpfc_nodelist *ndlp, *next_ndlp; 3293 struct lpfc_vport **vports; 3294 int i, rpi; 3295 3296 if (phba->sli_rev != LPFC_SLI_REV4) 3297 return; 3298 3299 vports = lpfc_create_vport_work_array(phba); 3300 if (vports == NULL) 3301 return; 3302 3303 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3304 if (vports[i]->load_flag & FC_UNLOADING) 3305 continue; 3306 3307 list_for_each_entry_safe(ndlp, next_ndlp, 3308 &vports[i]->fc_nodes, 3309 nlp_listp) { 3310 rpi = lpfc_sli4_alloc_rpi(phba); 3311 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3312 /* TODO print log? */ 3313 continue; 3314 } 3315 ndlp->nlp_rpi = rpi; 3316 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3317 LOG_NODE | LOG_DISCOVERY, 3318 "0009 Assign RPI x%x to ndlp x%px " 3319 "DID:x%06x flg:x%x\n", 3320 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3321 ndlp->nlp_flag); 3322 } 3323 } 3324 lpfc_destroy_vport_work_array(phba, vports); 3325 } 3326 3327 /** 3328 * lpfc_create_expedite_pool - create expedite pool 3329 * @phba: pointer to lpfc hba data structure. 3330 * 3331 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3332 * to expedite pool. Mark them as expedite. 3333 **/ 3334 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3335 { 3336 struct lpfc_sli4_hdw_queue *qp; 3337 struct lpfc_io_buf *lpfc_ncmd; 3338 struct lpfc_io_buf *lpfc_ncmd_next; 3339 struct lpfc_epd_pool *epd_pool; 3340 unsigned long iflag; 3341 3342 epd_pool = &phba->epd_pool; 3343 qp = &phba->sli4_hba.hdwq[0]; 3344 3345 spin_lock_init(&epd_pool->lock); 3346 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3347 spin_lock(&epd_pool->lock); 3348 INIT_LIST_HEAD(&epd_pool->list); 3349 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3350 &qp->lpfc_io_buf_list_put, list) { 3351 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3352 lpfc_ncmd->expedite = true; 3353 qp->put_io_bufs--; 3354 epd_pool->count++; 3355 if (epd_pool->count >= XRI_BATCH) 3356 break; 3357 } 3358 spin_unlock(&epd_pool->lock); 3359 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3360 } 3361 3362 /** 3363 * lpfc_destroy_expedite_pool - destroy expedite pool 3364 * @phba: pointer to lpfc hba data structure. 3365 * 3366 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3367 * of HWQ 0. Clear the mark. 3368 **/ 3369 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3370 { 3371 struct lpfc_sli4_hdw_queue *qp; 3372 struct lpfc_io_buf *lpfc_ncmd; 3373 struct lpfc_io_buf *lpfc_ncmd_next; 3374 struct lpfc_epd_pool *epd_pool; 3375 unsigned long iflag; 3376 3377 epd_pool = &phba->epd_pool; 3378 qp = &phba->sli4_hba.hdwq[0]; 3379 3380 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3381 spin_lock(&epd_pool->lock); 3382 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3383 &epd_pool->list, list) { 3384 list_move_tail(&lpfc_ncmd->list, 3385 &qp->lpfc_io_buf_list_put); 3386 lpfc_ncmd->flags = false; 3387 qp->put_io_bufs++; 3388 epd_pool->count--; 3389 } 3390 spin_unlock(&epd_pool->lock); 3391 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3392 } 3393 3394 /** 3395 * lpfc_create_multixri_pools - create multi-XRI pools 3396 * @phba: pointer to lpfc hba data structure. 3397 * 3398 * This routine initialize public, private per HWQ. Then, move XRIs from 3399 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3400 * Initialized. 3401 **/ 3402 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3403 { 3404 u32 i, j; 3405 u32 hwq_count; 3406 u32 count_per_hwq; 3407 struct lpfc_io_buf *lpfc_ncmd; 3408 struct lpfc_io_buf *lpfc_ncmd_next; 3409 unsigned long iflag; 3410 struct lpfc_sli4_hdw_queue *qp; 3411 struct lpfc_multixri_pool *multixri_pool; 3412 struct lpfc_pbl_pool *pbl_pool; 3413 struct lpfc_pvt_pool *pvt_pool; 3414 3415 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3416 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3417 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3418 phba->sli4_hba.io_xri_cnt); 3419 3420 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3421 lpfc_create_expedite_pool(phba); 3422 3423 hwq_count = phba->cfg_hdw_queue; 3424 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3425 3426 for (i = 0; i < hwq_count; i++) { 3427 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3428 3429 if (!multixri_pool) { 3430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3431 "1238 Failed to allocate memory for " 3432 "multixri_pool\n"); 3433 3434 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3435 lpfc_destroy_expedite_pool(phba); 3436 3437 j = 0; 3438 while (j < i) { 3439 qp = &phba->sli4_hba.hdwq[j]; 3440 kfree(qp->p_multixri_pool); 3441 j++; 3442 } 3443 phba->cfg_xri_rebalancing = 0; 3444 return; 3445 } 3446 3447 qp = &phba->sli4_hba.hdwq[i]; 3448 qp->p_multixri_pool = multixri_pool; 3449 3450 multixri_pool->xri_limit = count_per_hwq; 3451 multixri_pool->rrb_next_hwqid = i; 3452 3453 /* Deal with public free xri pool */ 3454 pbl_pool = &multixri_pool->pbl_pool; 3455 spin_lock_init(&pbl_pool->lock); 3456 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3457 spin_lock(&pbl_pool->lock); 3458 INIT_LIST_HEAD(&pbl_pool->list); 3459 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3460 &qp->lpfc_io_buf_list_put, list) { 3461 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3462 qp->put_io_bufs--; 3463 pbl_pool->count++; 3464 } 3465 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3466 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3467 pbl_pool->count, i); 3468 spin_unlock(&pbl_pool->lock); 3469 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3470 3471 /* Deal with private free xri pool */ 3472 pvt_pool = &multixri_pool->pvt_pool; 3473 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3474 pvt_pool->low_watermark = XRI_BATCH; 3475 spin_lock_init(&pvt_pool->lock); 3476 spin_lock_irqsave(&pvt_pool->lock, iflag); 3477 INIT_LIST_HEAD(&pvt_pool->list); 3478 pvt_pool->count = 0; 3479 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3480 } 3481 } 3482 3483 /** 3484 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3485 * @phba: pointer to lpfc hba data structure. 3486 * 3487 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3488 **/ 3489 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3490 { 3491 u32 i; 3492 u32 hwq_count; 3493 struct lpfc_io_buf *lpfc_ncmd; 3494 struct lpfc_io_buf *lpfc_ncmd_next; 3495 unsigned long iflag; 3496 struct lpfc_sli4_hdw_queue *qp; 3497 struct lpfc_multixri_pool *multixri_pool; 3498 struct lpfc_pbl_pool *pbl_pool; 3499 struct lpfc_pvt_pool *pvt_pool; 3500 3501 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3502 lpfc_destroy_expedite_pool(phba); 3503 3504 if (!(phba->pport->load_flag & FC_UNLOADING)) 3505 lpfc_sli_flush_io_rings(phba); 3506 3507 hwq_count = phba->cfg_hdw_queue; 3508 3509 for (i = 0; i < hwq_count; i++) { 3510 qp = &phba->sli4_hba.hdwq[i]; 3511 multixri_pool = qp->p_multixri_pool; 3512 if (!multixri_pool) 3513 continue; 3514 3515 qp->p_multixri_pool = NULL; 3516 3517 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3518 3519 /* Deal with public free xri pool */ 3520 pbl_pool = &multixri_pool->pbl_pool; 3521 spin_lock(&pbl_pool->lock); 3522 3523 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3524 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3525 pbl_pool->count, i); 3526 3527 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3528 &pbl_pool->list, list) { 3529 list_move_tail(&lpfc_ncmd->list, 3530 &qp->lpfc_io_buf_list_put); 3531 qp->put_io_bufs++; 3532 pbl_pool->count--; 3533 } 3534 3535 INIT_LIST_HEAD(&pbl_pool->list); 3536 pbl_pool->count = 0; 3537 3538 spin_unlock(&pbl_pool->lock); 3539 3540 /* Deal with private free xri pool */ 3541 pvt_pool = &multixri_pool->pvt_pool; 3542 spin_lock(&pvt_pool->lock); 3543 3544 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3545 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3546 pvt_pool->count, i); 3547 3548 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3549 &pvt_pool->list, list) { 3550 list_move_tail(&lpfc_ncmd->list, 3551 &qp->lpfc_io_buf_list_put); 3552 qp->put_io_bufs++; 3553 pvt_pool->count--; 3554 } 3555 3556 INIT_LIST_HEAD(&pvt_pool->list); 3557 pvt_pool->count = 0; 3558 3559 spin_unlock(&pvt_pool->lock); 3560 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3561 3562 kfree(multixri_pool); 3563 } 3564 } 3565 3566 /** 3567 * lpfc_online - Initialize and bring a HBA online 3568 * @phba: pointer to lpfc hba data structure. 3569 * 3570 * This routine initializes the HBA and brings a HBA online. During this 3571 * process, the management interface is blocked to prevent user space access 3572 * to the HBA interfering with the driver initialization. 3573 * 3574 * Return codes 3575 * 0 - successful 3576 * 1 - failed 3577 **/ 3578 int 3579 lpfc_online(struct lpfc_hba *phba) 3580 { 3581 struct lpfc_vport *vport; 3582 struct lpfc_vport **vports; 3583 int i, error = 0; 3584 bool vpis_cleared = false; 3585 3586 if (!phba) 3587 return 0; 3588 vport = phba->pport; 3589 3590 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3591 return 0; 3592 3593 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3594 "0458 Bring Adapter online\n"); 3595 3596 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3597 3598 if (phba->sli_rev == LPFC_SLI_REV4) { 3599 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3600 lpfc_unblock_mgmt_io(phba); 3601 return 1; 3602 } 3603 spin_lock_irq(&phba->hbalock); 3604 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3605 vpis_cleared = true; 3606 spin_unlock_irq(&phba->hbalock); 3607 3608 /* Reestablish the local initiator port. 3609 * The offline process destroyed the previous lport. 3610 */ 3611 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3612 !phba->nvmet_support) { 3613 error = lpfc_nvme_create_localport(phba->pport); 3614 if (error) 3615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3616 "6132 NVME restore reg failed " 3617 "on nvmei error x%x\n", error); 3618 } 3619 } else { 3620 lpfc_sli_queue_init(phba); 3621 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3622 lpfc_unblock_mgmt_io(phba); 3623 return 1; 3624 } 3625 } 3626 3627 vports = lpfc_create_vport_work_array(phba); 3628 if (vports != NULL) { 3629 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3630 struct Scsi_Host *shost; 3631 shost = lpfc_shost_from_vport(vports[i]); 3632 spin_lock_irq(shost->host_lock); 3633 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3634 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3635 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3636 if (phba->sli_rev == LPFC_SLI_REV4) { 3637 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3638 if ((vpis_cleared) && 3639 (vports[i]->port_type != 3640 LPFC_PHYSICAL_PORT)) 3641 vports[i]->vpi = 0; 3642 } 3643 spin_unlock_irq(shost->host_lock); 3644 } 3645 } 3646 lpfc_destroy_vport_work_array(phba, vports); 3647 3648 if (phba->cfg_xri_rebalancing) 3649 lpfc_create_multixri_pools(phba); 3650 3651 lpfc_cpuhp_add(phba); 3652 3653 lpfc_unblock_mgmt_io(phba); 3654 return 0; 3655 } 3656 3657 /** 3658 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3659 * @phba: pointer to lpfc hba data structure. 3660 * 3661 * This routine marks a HBA's management interface as not blocked. Once the 3662 * HBA's management interface is marked as not blocked, all the user space 3663 * access to the HBA, whether they are from sysfs interface or libdfc 3664 * interface will be allowed. The HBA is set to block the management interface 3665 * when the driver prepares the HBA interface for online or offline and then 3666 * set to unblock the management interface afterwards. 3667 **/ 3668 void 3669 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3670 { 3671 unsigned long iflag; 3672 3673 spin_lock_irqsave(&phba->hbalock, iflag); 3674 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3675 spin_unlock_irqrestore(&phba->hbalock, iflag); 3676 } 3677 3678 /** 3679 * lpfc_offline_prep - Prepare a HBA to be brought offline 3680 * @phba: pointer to lpfc hba data structure. 3681 * @mbx_action: flag for mailbox shutdown action. 3682 * 3683 * This routine is invoked to prepare a HBA to be brought offline. It performs 3684 * unregistration login to all the nodes on all vports and flushes the mailbox 3685 * queue to make it ready to be brought offline. 3686 **/ 3687 void 3688 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3689 { 3690 struct lpfc_vport *vport = phba->pport; 3691 struct lpfc_nodelist *ndlp, *next_ndlp; 3692 struct lpfc_vport **vports; 3693 struct Scsi_Host *shost; 3694 int i; 3695 int offline = 0; 3696 3697 if (vport->fc_flag & FC_OFFLINE_MODE) 3698 return; 3699 3700 lpfc_block_mgmt_io(phba, mbx_action); 3701 3702 lpfc_linkdown(phba); 3703 3704 offline = pci_channel_offline(phba->pcidev); 3705 3706 /* Issue an unreg_login to all nodes on all vports */ 3707 vports = lpfc_create_vport_work_array(phba); 3708 if (vports != NULL) { 3709 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3710 if (vports[i]->load_flag & FC_UNLOADING) 3711 continue; 3712 shost = lpfc_shost_from_vport(vports[i]); 3713 spin_lock_irq(shost->host_lock); 3714 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3715 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3716 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3717 spin_unlock_irq(shost->host_lock); 3718 3719 shost = lpfc_shost_from_vport(vports[i]); 3720 list_for_each_entry_safe(ndlp, next_ndlp, 3721 &vports[i]->fc_nodes, 3722 nlp_listp) { 3723 3724 spin_lock_irq(&ndlp->lock); 3725 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3726 spin_unlock_irq(&ndlp->lock); 3727 3728 if (offline) { 3729 spin_lock_irq(&ndlp->lock); 3730 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3731 NLP_RPI_REGISTERED); 3732 spin_unlock_irq(&ndlp->lock); 3733 } else { 3734 lpfc_unreg_rpi(vports[i], ndlp); 3735 } 3736 /* 3737 * Whenever an SLI4 port goes offline, free the 3738 * RPI. Get a new RPI when the adapter port 3739 * comes back online. 3740 */ 3741 if (phba->sli_rev == LPFC_SLI_REV4) { 3742 lpfc_printf_vlog(vports[i], KERN_INFO, 3743 LOG_NODE | LOG_DISCOVERY, 3744 "0011 Free RPI x%x on " 3745 "ndlp: x%px did x%x\n", 3746 ndlp->nlp_rpi, ndlp, 3747 ndlp->nlp_DID); 3748 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3749 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3750 } 3751 3752 if (ndlp->nlp_type & NLP_FABRIC) { 3753 lpfc_disc_state_machine(vports[i], ndlp, 3754 NULL, NLP_EVT_DEVICE_RECOVERY); 3755 3756 /* Don't remove the node unless the node 3757 * has been unregistered with the 3758 * transport, and we're not in recovery 3759 * before dev_loss_tmo triggered. 3760 * Otherwise, let dev_loss take care of 3761 * the node. 3762 */ 3763 if (!(ndlp->save_flags & 3764 NLP_IN_RECOV_POST_DEV_LOSS) && 3765 !(ndlp->fc4_xpt_flags & 3766 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3767 lpfc_disc_state_machine 3768 (vports[i], ndlp, 3769 NULL, 3770 NLP_EVT_DEVICE_RM); 3771 } 3772 } 3773 } 3774 } 3775 lpfc_destroy_vport_work_array(phba, vports); 3776 3777 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3778 3779 if (phba->wq) 3780 flush_workqueue(phba->wq); 3781 } 3782 3783 /** 3784 * lpfc_offline - Bring a HBA offline 3785 * @phba: pointer to lpfc hba data structure. 3786 * 3787 * This routine actually brings a HBA offline. It stops all the timers 3788 * associated with the HBA, brings down the SLI layer, and eventually 3789 * marks the HBA as in offline state for the upper layer protocol. 3790 **/ 3791 void 3792 lpfc_offline(struct lpfc_hba *phba) 3793 { 3794 struct Scsi_Host *shost; 3795 struct lpfc_vport **vports; 3796 int i; 3797 3798 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3799 return; 3800 3801 /* stop port and all timers associated with this hba */ 3802 lpfc_stop_port(phba); 3803 3804 /* Tear down the local and target port registrations. The 3805 * nvme transports need to cleanup. 3806 */ 3807 lpfc_nvmet_destroy_targetport(phba); 3808 lpfc_nvme_destroy_localport(phba->pport); 3809 3810 vports = lpfc_create_vport_work_array(phba); 3811 if (vports != NULL) 3812 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3813 lpfc_stop_vport_timers(vports[i]); 3814 lpfc_destroy_vport_work_array(phba, vports); 3815 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3816 "0460 Bring Adapter offline\n"); 3817 /* Bring down the SLI Layer and cleanup. The HBA is offline 3818 now. */ 3819 lpfc_sli_hba_down(phba); 3820 spin_lock_irq(&phba->hbalock); 3821 phba->work_ha = 0; 3822 spin_unlock_irq(&phba->hbalock); 3823 vports = lpfc_create_vport_work_array(phba); 3824 if (vports != NULL) 3825 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3826 shost = lpfc_shost_from_vport(vports[i]); 3827 spin_lock_irq(shost->host_lock); 3828 vports[i]->work_port_events = 0; 3829 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3830 spin_unlock_irq(shost->host_lock); 3831 } 3832 lpfc_destroy_vport_work_array(phba, vports); 3833 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3834 * in hba_unset 3835 */ 3836 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3837 __lpfc_cpuhp_remove(phba); 3838 3839 if (phba->cfg_xri_rebalancing) 3840 lpfc_destroy_multixri_pools(phba); 3841 } 3842 3843 /** 3844 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3845 * @phba: pointer to lpfc hba data structure. 3846 * 3847 * This routine is to free all the SCSI buffers and IOCBs from the driver 3848 * list back to kernel. It is called from lpfc_pci_remove_one to free 3849 * the internal resources before the device is removed from the system. 3850 **/ 3851 static void 3852 lpfc_scsi_free(struct lpfc_hba *phba) 3853 { 3854 struct lpfc_io_buf *sb, *sb_next; 3855 3856 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3857 return; 3858 3859 spin_lock_irq(&phba->hbalock); 3860 3861 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3862 3863 spin_lock(&phba->scsi_buf_list_put_lock); 3864 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3865 list) { 3866 list_del(&sb->list); 3867 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3868 sb->dma_handle); 3869 kfree(sb); 3870 phba->total_scsi_bufs--; 3871 } 3872 spin_unlock(&phba->scsi_buf_list_put_lock); 3873 3874 spin_lock(&phba->scsi_buf_list_get_lock); 3875 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3876 list) { 3877 list_del(&sb->list); 3878 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3879 sb->dma_handle); 3880 kfree(sb); 3881 phba->total_scsi_bufs--; 3882 } 3883 spin_unlock(&phba->scsi_buf_list_get_lock); 3884 spin_unlock_irq(&phba->hbalock); 3885 } 3886 3887 /** 3888 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3889 * @phba: pointer to lpfc hba data structure. 3890 * 3891 * This routine is to free all the IO buffers and IOCBs from the driver 3892 * list back to kernel. It is called from lpfc_pci_remove_one to free 3893 * the internal resources before the device is removed from the system. 3894 **/ 3895 void 3896 lpfc_io_free(struct lpfc_hba *phba) 3897 { 3898 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3899 struct lpfc_sli4_hdw_queue *qp; 3900 int idx; 3901 3902 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3903 qp = &phba->sli4_hba.hdwq[idx]; 3904 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3905 spin_lock(&qp->io_buf_list_put_lock); 3906 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3907 &qp->lpfc_io_buf_list_put, 3908 list) { 3909 list_del(&lpfc_ncmd->list); 3910 qp->put_io_bufs--; 3911 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3912 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3913 if (phba->cfg_xpsgl && !phba->nvmet_support) 3914 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3915 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3916 kfree(lpfc_ncmd); 3917 qp->total_io_bufs--; 3918 } 3919 spin_unlock(&qp->io_buf_list_put_lock); 3920 3921 spin_lock(&qp->io_buf_list_get_lock); 3922 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3923 &qp->lpfc_io_buf_list_get, 3924 list) { 3925 list_del(&lpfc_ncmd->list); 3926 qp->get_io_bufs--; 3927 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3928 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3929 if (phba->cfg_xpsgl && !phba->nvmet_support) 3930 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3931 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3932 kfree(lpfc_ncmd); 3933 qp->total_io_bufs--; 3934 } 3935 spin_unlock(&qp->io_buf_list_get_lock); 3936 } 3937 } 3938 3939 /** 3940 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3941 * @phba: pointer to lpfc hba data structure. 3942 * 3943 * This routine first calculates the sizes of the current els and allocated 3944 * scsi sgl lists, and then goes through all sgls to updates the physical 3945 * XRIs assigned due to port function reset. During port initialization, the 3946 * current els and allocated scsi sgl lists are 0s. 3947 * 3948 * Return codes 3949 * 0 - successful (for now, it always returns 0) 3950 **/ 3951 int 3952 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3953 { 3954 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3955 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3956 LIST_HEAD(els_sgl_list); 3957 int rc; 3958 3959 /* 3960 * update on pci function's els xri-sgl list 3961 */ 3962 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3963 3964 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3965 /* els xri-sgl expanded */ 3966 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3967 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3968 "3157 ELS xri-sgl count increased from " 3969 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3970 els_xri_cnt); 3971 /* allocate the additional els sgls */ 3972 for (i = 0; i < xri_cnt; i++) { 3973 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3974 GFP_KERNEL); 3975 if (sglq_entry == NULL) { 3976 lpfc_printf_log(phba, KERN_ERR, 3977 LOG_TRACE_EVENT, 3978 "2562 Failure to allocate an " 3979 "ELS sgl entry:%d\n", i); 3980 rc = -ENOMEM; 3981 goto out_free_mem; 3982 } 3983 sglq_entry->buff_type = GEN_BUFF_TYPE; 3984 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3985 &sglq_entry->phys); 3986 if (sglq_entry->virt == NULL) { 3987 kfree(sglq_entry); 3988 lpfc_printf_log(phba, KERN_ERR, 3989 LOG_TRACE_EVENT, 3990 "2563 Failure to allocate an " 3991 "ELS mbuf:%d\n", i); 3992 rc = -ENOMEM; 3993 goto out_free_mem; 3994 } 3995 sglq_entry->sgl = sglq_entry->virt; 3996 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3997 sglq_entry->state = SGL_FREED; 3998 list_add_tail(&sglq_entry->list, &els_sgl_list); 3999 } 4000 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4001 list_splice_init(&els_sgl_list, 4002 &phba->sli4_hba.lpfc_els_sgl_list); 4003 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4004 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 4005 /* els xri-sgl shrinked */ 4006 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 4007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4008 "3158 ELS xri-sgl count decreased from " 4009 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4010 els_xri_cnt); 4011 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4012 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 4013 &els_sgl_list); 4014 /* release extra els sgls from list */ 4015 for (i = 0; i < xri_cnt; i++) { 4016 list_remove_head(&els_sgl_list, 4017 sglq_entry, struct lpfc_sglq, list); 4018 if (sglq_entry) { 4019 __lpfc_mbuf_free(phba, sglq_entry->virt, 4020 sglq_entry->phys); 4021 kfree(sglq_entry); 4022 } 4023 } 4024 list_splice_init(&els_sgl_list, 4025 &phba->sli4_hba.lpfc_els_sgl_list); 4026 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4027 } else 4028 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4029 "3163 ELS xri-sgl count unchanged: %d\n", 4030 els_xri_cnt); 4031 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 4032 4033 /* update xris to els sgls on the list */ 4034 sglq_entry = NULL; 4035 sglq_entry_next = NULL; 4036 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4037 &phba->sli4_hba.lpfc_els_sgl_list, list) { 4038 lxri = lpfc_sli4_next_xritag(phba); 4039 if (lxri == NO_XRI) { 4040 lpfc_printf_log(phba, KERN_ERR, 4041 LOG_TRACE_EVENT, 4042 "2400 Failed to allocate xri for " 4043 "ELS sgl\n"); 4044 rc = -ENOMEM; 4045 goto out_free_mem; 4046 } 4047 sglq_entry->sli4_lxritag = lxri; 4048 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4049 } 4050 return 0; 4051 4052 out_free_mem: 4053 lpfc_free_els_sgl_list(phba); 4054 return rc; 4055 } 4056 4057 /** 4058 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4059 * @phba: pointer to lpfc hba data structure. 4060 * 4061 * This routine first calculates the sizes of the current els and allocated 4062 * scsi sgl lists, and then goes through all sgls to updates the physical 4063 * XRIs assigned due to port function reset. During port initialization, the 4064 * current els and allocated scsi sgl lists are 0s. 4065 * 4066 * Return codes 4067 * 0 - successful (for now, it always returns 0) 4068 **/ 4069 int 4070 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4071 { 4072 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4073 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4074 uint16_t nvmet_xri_cnt; 4075 LIST_HEAD(nvmet_sgl_list); 4076 int rc; 4077 4078 /* 4079 * update on pci function's nvmet xri-sgl list 4080 */ 4081 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4082 4083 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4084 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4085 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4086 /* els xri-sgl expanded */ 4087 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4088 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4089 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4090 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4091 /* allocate the additional nvmet sgls */ 4092 for (i = 0; i < xri_cnt; i++) { 4093 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4094 GFP_KERNEL); 4095 if (sglq_entry == NULL) { 4096 lpfc_printf_log(phba, KERN_ERR, 4097 LOG_TRACE_EVENT, 4098 "6303 Failure to allocate an " 4099 "NVMET sgl entry:%d\n", i); 4100 rc = -ENOMEM; 4101 goto out_free_mem; 4102 } 4103 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4104 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4105 &sglq_entry->phys); 4106 if (sglq_entry->virt == NULL) { 4107 kfree(sglq_entry); 4108 lpfc_printf_log(phba, KERN_ERR, 4109 LOG_TRACE_EVENT, 4110 "6304 Failure to allocate an " 4111 "NVMET buf:%d\n", i); 4112 rc = -ENOMEM; 4113 goto out_free_mem; 4114 } 4115 sglq_entry->sgl = sglq_entry->virt; 4116 memset(sglq_entry->sgl, 0, 4117 phba->cfg_sg_dma_buf_size); 4118 sglq_entry->state = SGL_FREED; 4119 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4120 } 4121 spin_lock_irq(&phba->hbalock); 4122 spin_lock(&phba->sli4_hba.sgl_list_lock); 4123 list_splice_init(&nvmet_sgl_list, 4124 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4125 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4126 spin_unlock_irq(&phba->hbalock); 4127 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4128 /* nvmet xri-sgl shrunk */ 4129 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4130 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4131 "6305 NVMET xri-sgl count decreased from " 4132 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4133 nvmet_xri_cnt); 4134 spin_lock_irq(&phba->hbalock); 4135 spin_lock(&phba->sli4_hba.sgl_list_lock); 4136 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4137 &nvmet_sgl_list); 4138 /* release extra nvmet sgls from list */ 4139 for (i = 0; i < xri_cnt; i++) { 4140 list_remove_head(&nvmet_sgl_list, 4141 sglq_entry, struct lpfc_sglq, list); 4142 if (sglq_entry) { 4143 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4144 sglq_entry->phys); 4145 kfree(sglq_entry); 4146 } 4147 } 4148 list_splice_init(&nvmet_sgl_list, 4149 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4150 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4151 spin_unlock_irq(&phba->hbalock); 4152 } else 4153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4154 "6306 NVMET xri-sgl count unchanged: %d\n", 4155 nvmet_xri_cnt); 4156 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4157 4158 /* update xris to nvmet sgls on the list */ 4159 sglq_entry = NULL; 4160 sglq_entry_next = NULL; 4161 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4162 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4163 lxri = lpfc_sli4_next_xritag(phba); 4164 if (lxri == NO_XRI) { 4165 lpfc_printf_log(phba, KERN_ERR, 4166 LOG_TRACE_EVENT, 4167 "6307 Failed to allocate xri for " 4168 "NVMET sgl\n"); 4169 rc = -ENOMEM; 4170 goto out_free_mem; 4171 } 4172 sglq_entry->sli4_lxritag = lxri; 4173 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4174 } 4175 return 0; 4176 4177 out_free_mem: 4178 lpfc_free_nvmet_sgl_list(phba); 4179 return rc; 4180 } 4181 4182 int 4183 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4184 { 4185 LIST_HEAD(blist); 4186 struct lpfc_sli4_hdw_queue *qp; 4187 struct lpfc_io_buf *lpfc_cmd; 4188 struct lpfc_io_buf *iobufp, *prev_iobufp; 4189 int idx, cnt, xri, inserted; 4190 4191 cnt = 0; 4192 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4193 qp = &phba->sli4_hba.hdwq[idx]; 4194 spin_lock_irq(&qp->io_buf_list_get_lock); 4195 spin_lock(&qp->io_buf_list_put_lock); 4196 4197 /* Take everything off the get and put lists */ 4198 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4199 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4200 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4201 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4202 cnt += qp->get_io_bufs + qp->put_io_bufs; 4203 qp->get_io_bufs = 0; 4204 qp->put_io_bufs = 0; 4205 qp->total_io_bufs = 0; 4206 spin_unlock(&qp->io_buf_list_put_lock); 4207 spin_unlock_irq(&qp->io_buf_list_get_lock); 4208 } 4209 4210 /* 4211 * Take IO buffers off blist and put on cbuf sorted by XRI. 4212 * This is because POST_SGL takes a sequential range of XRIs 4213 * to post to the firmware. 4214 */ 4215 for (idx = 0; idx < cnt; idx++) { 4216 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4217 if (!lpfc_cmd) 4218 return cnt; 4219 if (idx == 0) { 4220 list_add_tail(&lpfc_cmd->list, cbuf); 4221 continue; 4222 } 4223 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4224 inserted = 0; 4225 prev_iobufp = NULL; 4226 list_for_each_entry(iobufp, cbuf, list) { 4227 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4228 if (prev_iobufp) 4229 list_add(&lpfc_cmd->list, 4230 &prev_iobufp->list); 4231 else 4232 list_add(&lpfc_cmd->list, cbuf); 4233 inserted = 1; 4234 break; 4235 } 4236 prev_iobufp = iobufp; 4237 } 4238 if (!inserted) 4239 list_add_tail(&lpfc_cmd->list, cbuf); 4240 } 4241 return cnt; 4242 } 4243 4244 int 4245 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4246 { 4247 struct lpfc_sli4_hdw_queue *qp; 4248 struct lpfc_io_buf *lpfc_cmd; 4249 int idx, cnt; 4250 4251 qp = phba->sli4_hba.hdwq; 4252 cnt = 0; 4253 while (!list_empty(cbuf)) { 4254 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4255 list_remove_head(cbuf, lpfc_cmd, 4256 struct lpfc_io_buf, list); 4257 if (!lpfc_cmd) 4258 return cnt; 4259 cnt++; 4260 qp = &phba->sli4_hba.hdwq[idx]; 4261 lpfc_cmd->hdwq_no = idx; 4262 lpfc_cmd->hdwq = qp; 4263 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 4264 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 4265 spin_lock(&qp->io_buf_list_put_lock); 4266 list_add_tail(&lpfc_cmd->list, 4267 &qp->lpfc_io_buf_list_put); 4268 qp->put_io_bufs++; 4269 qp->total_io_bufs++; 4270 spin_unlock(&qp->io_buf_list_put_lock); 4271 } 4272 } 4273 return cnt; 4274 } 4275 4276 /** 4277 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4278 * @phba: pointer to lpfc hba data structure. 4279 * 4280 * This routine first calculates the sizes of the current els and allocated 4281 * scsi sgl lists, and then goes through all sgls to updates the physical 4282 * XRIs assigned due to port function reset. During port initialization, the 4283 * current els and allocated scsi sgl lists are 0s. 4284 * 4285 * Return codes 4286 * 0 - successful (for now, it always returns 0) 4287 **/ 4288 int 4289 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4290 { 4291 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4292 uint16_t i, lxri, els_xri_cnt; 4293 uint16_t io_xri_cnt, io_xri_max; 4294 LIST_HEAD(io_sgl_list); 4295 int rc, cnt; 4296 4297 /* 4298 * update on pci function's allocated nvme xri-sgl list 4299 */ 4300 4301 /* maximum number of xris available for nvme buffers */ 4302 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4303 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4304 phba->sli4_hba.io_xri_max = io_xri_max; 4305 4306 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4307 "6074 Current allocated XRI sgl count:%d, " 4308 "maximum XRI count:%d\n", 4309 phba->sli4_hba.io_xri_cnt, 4310 phba->sli4_hba.io_xri_max); 4311 4312 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4313 4314 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4315 /* max nvme xri shrunk below the allocated nvme buffers */ 4316 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4317 phba->sli4_hba.io_xri_max; 4318 /* release the extra allocated nvme buffers */ 4319 for (i = 0; i < io_xri_cnt; i++) { 4320 list_remove_head(&io_sgl_list, lpfc_ncmd, 4321 struct lpfc_io_buf, list); 4322 if (lpfc_ncmd) { 4323 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4324 lpfc_ncmd->data, 4325 lpfc_ncmd->dma_handle); 4326 kfree(lpfc_ncmd); 4327 } 4328 } 4329 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4330 } 4331 4332 /* update xris associated to remaining allocated nvme buffers */ 4333 lpfc_ncmd = NULL; 4334 lpfc_ncmd_next = NULL; 4335 phba->sli4_hba.io_xri_cnt = cnt; 4336 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4337 &io_sgl_list, list) { 4338 lxri = lpfc_sli4_next_xritag(phba); 4339 if (lxri == NO_XRI) { 4340 lpfc_printf_log(phba, KERN_ERR, 4341 LOG_TRACE_EVENT, 4342 "6075 Failed to allocate xri for " 4343 "nvme buffer\n"); 4344 rc = -ENOMEM; 4345 goto out_free_mem; 4346 } 4347 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4348 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4349 } 4350 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4351 return 0; 4352 4353 out_free_mem: 4354 lpfc_io_free(phba); 4355 return rc; 4356 } 4357 4358 /** 4359 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4360 * @phba: Pointer to lpfc hba data structure. 4361 * @num_to_alloc: The requested number of buffers to allocate. 4362 * 4363 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4364 * the nvme buffer contains all the necessary information needed to initiate 4365 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4366 * them on a list, it post them to the port by using SGL block post. 4367 * 4368 * Return codes: 4369 * int - number of IO buffers that were allocated and posted. 4370 * 0 = failure, less than num_to_alloc is a partial failure. 4371 **/ 4372 int 4373 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4374 { 4375 struct lpfc_io_buf *lpfc_ncmd; 4376 struct lpfc_iocbq *pwqeq; 4377 uint16_t iotag, lxri = 0; 4378 int bcnt, num_posted; 4379 LIST_HEAD(prep_nblist); 4380 LIST_HEAD(post_nblist); 4381 LIST_HEAD(nvme_nblist); 4382 4383 phba->sli4_hba.io_xri_cnt = 0; 4384 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4385 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4386 if (!lpfc_ncmd) 4387 break; 4388 /* 4389 * Get memory from the pci pool to map the virt space to 4390 * pci bus space for an I/O. The DMA buffer includes the 4391 * number of SGE's necessary to support the sg_tablesize. 4392 */ 4393 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4394 GFP_KERNEL, 4395 &lpfc_ncmd->dma_handle); 4396 if (!lpfc_ncmd->data) { 4397 kfree(lpfc_ncmd); 4398 break; 4399 } 4400 4401 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4402 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4403 } else { 4404 /* 4405 * 4K Page alignment is CRITICAL to BlockGuard, double 4406 * check to be sure. 4407 */ 4408 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4409 (((unsigned long)(lpfc_ncmd->data) & 4410 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4411 lpfc_printf_log(phba, KERN_ERR, 4412 LOG_TRACE_EVENT, 4413 "3369 Memory alignment err: " 4414 "addr=%lx\n", 4415 (unsigned long)lpfc_ncmd->data); 4416 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4417 lpfc_ncmd->data, 4418 lpfc_ncmd->dma_handle); 4419 kfree(lpfc_ncmd); 4420 break; 4421 } 4422 } 4423 4424 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4425 4426 lxri = lpfc_sli4_next_xritag(phba); 4427 if (lxri == NO_XRI) { 4428 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4429 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4430 kfree(lpfc_ncmd); 4431 break; 4432 } 4433 pwqeq = &lpfc_ncmd->cur_iocbq; 4434 4435 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4436 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4437 if (iotag == 0) { 4438 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4439 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4440 kfree(lpfc_ncmd); 4441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4442 "6121 Failed to allocate IOTAG for" 4443 " XRI:0x%x\n", lxri); 4444 lpfc_sli4_free_xri(phba, lxri); 4445 break; 4446 } 4447 pwqeq->sli4_lxritag = lxri; 4448 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4449 pwqeq->context1 = lpfc_ncmd; 4450 4451 /* Initialize local short-hand pointers. */ 4452 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4453 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4454 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4455 spin_lock_init(&lpfc_ncmd->buf_lock); 4456 4457 /* add the nvme buffer to a post list */ 4458 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4459 phba->sli4_hba.io_xri_cnt++; 4460 } 4461 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4462 "6114 Allocate %d out of %d requested new NVME " 4463 "buffers\n", bcnt, num_to_alloc); 4464 4465 /* post the list of nvme buffer sgls to port if available */ 4466 if (!list_empty(&post_nblist)) 4467 num_posted = lpfc_sli4_post_io_sgl_list( 4468 phba, &post_nblist, bcnt); 4469 else 4470 num_posted = 0; 4471 4472 return num_posted; 4473 } 4474 4475 static uint64_t 4476 lpfc_get_wwpn(struct lpfc_hba *phba) 4477 { 4478 uint64_t wwn; 4479 int rc; 4480 LPFC_MBOXQ_t *mboxq; 4481 MAILBOX_t *mb; 4482 4483 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4484 GFP_KERNEL); 4485 if (!mboxq) 4486 return (uint64_t)-1; 4487 4488 /* First get WWN of HBA instance */ 4489 lpfc_read_nv(phba, mboxq); 4490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4491 if (rc != MBX_SUCCESS) { 4492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4493 "6019 Mailbox failed , mbxCmd x%x " 4494 "READ_NV, mbxStatus x%x\n", 4495 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4496 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4497 mempool_free(mboxq, phba->mbox_mem_pool); 4498 return (uint64_t) -1; 4499 } 4500 mb = &mboxq->u.mb; 4501 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4502 /* wwn is WWPN of HBA instance */ 4503 mempool_free(mboxq, phba->mbox_mem_pool); 4504 if (phba->sli_rev == LPFC_SLI_REV4) 4505 return be64_to_cpu(wwn); 4506 else 4507 return rol64(wwn, 32); 4508 } 4509 4510 /** 4511 * lpfc_vmid_res_alloc - Allocates resources for VMID 4512 * @phba: pointer to lpfc hba data structure. 4513 * @vport: pointer to vport data structure 4514 * 4515 * This routine allocated the resources needed for the VMID. 4516 * 4517 * Return codes 4518 * 0 on Success 4519 * Non-0 on Failure 4520 */ 4521 static int 4522 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4523 { 4524 /* VMID feature is supported only on SLI4 */ 4525 if (phba->sli_rev == LPFC_SLI_REV3) { 4526 phba->cfg_vmid_app_header = 0; 4527 phba->cfg_vmid_priority_tagging = 0; 4528 } 4529 4530 if (lpfc_is_vmid_enabled(phba)) { 4531 vport->vmid = 4532 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4533 GFP_KERNEL); 4534 if (!vport->vmid) 4535 return -ENOMEM; 4536 4537 rwlock_init(&vport->vmid_lock); 4538 4539 /* Set the VMID parameters for the vport */ 4540 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4541 vport->vmid_inactivity_timeout = 4542 phba->cfg_vmid_inactivity_timeout; 4543 vport->max_vmid = phba->cfg_max_vmid; 4544 vport->cur_vmid_cnt = 0; 4545 4546 vport->vmid_priority_range = bitmap_zalloc 4547 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4548 4549 if (!vport->vmid_priority_range) { 4550 kfree(vport->vmid); 4551 return -ENOMEM; 4552 } 4553 4554 hash_init(vport->hash_table); 4555 } 4556 return 0; 4557 } 4558 4559 /** 4560 * lpfc_create_port - Create an FC port 4561 * @phba: pointer to lpfc hba data structure. 4562 * @instance: a unique integer ID to this FC port. 4563 * @dev: pointer to the device data structure. 4564 * 4565 * This routine creates a FC port for the upper layer protocol. The FC port 4566 * can be created on top of either a physical port or a virtual port provided 4567 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4568 * and associates the FC port created before adding the shost into the SCSI 4569 * layer. 4570 * 4571 * Return codes 4572 * @vport - pointer to the virtual N_Port data structure. 4573 * NULL - port create failed. 4574 **/ 4575 struct lpfc_vport * 4576 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4577 { 4578 struct lpfc_vport *vport; 4579 struct Scsi_Host *shost = NULL; 4580 struct scsi_host_template *template; 4581 int error = 0; 4582 int i; 4583 uint64_t wwn; 4584 bool use_no_reset_hba = false; 4585 int rc; 4586 4587 if (lpfc_no_hba_reset_cnt) { 4588 if (phba->sli_rev < LPFC_SLI_REV4 && 4589 dev == &phba->pcidev->dev) { 4590 /* Reset the port first */ 4591 lpfc_sli_brdrestart(phba); 4592 rc = lpfc_sli_chipset_init(phba); 4593 if (rc) 4594 return NULL; 4595 } 4596 wwn = lpfc_get_wwpn(phba); 4597 } 4598 4599 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4600 if (wwn == lpfc_no_hba_reset[i]) { 4601 lpfc_printf_log(phba, KERN_ERR, 4602 LOG_TRACE_EVENT, 4603 "6020 Setting use_no_reset port=%llx\n", 4604 wwn); 4605 use_no_reset_hba = true; 4606 break; 4607 } 4608 } 4609 4610 /* Seed template for SCSI host registration */ 4611 if (dev == &phba->pcidev->dev) { 4612 template = &phba->port_template; 4613 4614 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4615 /* Seed physical port template */ 4616 memcpy(template, &lpfc_template, sizeof(*template)); 4617 4618 if (use_no_reset_hba) 4619 /* template is for a no reset SCSI Host */ 4620 template->eh_host_reset_handler = NULL; 4621 4622 /* Template for all vports this physical port creates */ 4623 memcpy(&phba->vport_template, &lpfc_template, 4624 sizeof(*template)); 4625 phba->vport_template.shost_groups = lpfc_vport_groups; 4626 phba->vport_template.eh_bus_reset_handler = NULL; 4627 phba->vport_template.eh_host_reset_handler = NULL; 4628 phba->vport_template.vendor_id = 0; 4629 4630 /* Initialize the host templates with updated value */ 4631 if (phba->sli_rev == LPFC_SLI_REV4) { 4632 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4633 phba->vport_template.sg_tablesize = 4634 phba->cfg_scsi_seg_cnt; 4635 } else { 4636 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4637 phba->vport_template.sg_tablesize = 4638 phba->cfg_sg_seg_cnt; 4639 } 4640 4641 } else { 4642 /* NVMET is for physical port only */ 4643 memcpy(template, &lpfc_template_nvme, 4644 sizeof(*template)); 4645 } 4646 } else { 4647 template = &phba->vport_template; 4648 } 4649 4650 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4651 if (!shost) 4652 goto out; 4653 4654 vport = (struct lpfc_vport *) shost->hostdata; 4655 vport->phba = phba; 4656 vport->load_flag |= FC_LOADING; 4657 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4658 vport->fc_rscn_flush = 0; 4659 lpfc_get_vport_cfgparam(vport); 4660 4661 /* Adjust value in vport */ 4662 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4663 4664 shost->unique_id = instance; 4665 shost->max_id = LPFC_MAX_TARGET; 4666 shost->max_lun = vport->cfg_max_luns; 4667 shost->this_id = -1; 4668 shost->max_cmd_len = 16; 4669 4670 if (phba->sli_rev == LPFC_SLI_REV4) { 4671 if (!phba->cfg_fcp_mq_threshold || 4672 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4673 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4674 4675 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4676 phba->cfg_fcp_mq_threshold); 4677 4678 shost->dma_boundary = 4679 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4680 4681 if (phba->cfg_xpsgl && !phba->nvmet_support) 4682 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4683 else 4684 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4685 } else 4686 /* SLI-3 has a limited number of hardware queues (3), 4687 * thus there is only one for FCP processing. 4688 */ 4689 shost->nr_hw_queues = 1; 4690 4691 /* 4692 * Set initial can_queue value since 0 is no longer supported and 4693 * scsi_add_host will fail. This will be adjusted later based on the 4694 * max xri value determined in hba setup. 4695 */ 4696 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4697 if (dev != &phba->pcidev->dev) { 4698 shost->transportt = lpfc_vport_transport_template; 4699 vport->port_type = LPFC_NPIV_PORT; 4700 } else { 4701 shost->transportt = lpfc_transport_template; 4702 vport->port_type = LPFC_PHYSICAL_PORT; 4703 } 4704 4705 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4706 "9081 CreatePort TMPLATE type %x TBLsize %d " 4707 "SEGcnt %d/%d\n", 4708 vport->port_type, shost->sg_tablesize, 4709 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4710 4711 /* Allocate the resources for VMID */ 4712 rc = lpfc_vmid_res_alloc(phba, vport); 4713 4714 if (rc) 4715 goto out; 4716 4717 /* Initialize all internally managed lists. */ 4718 INIT_LIST_HEAD(&vport->fc_nodes); 4719 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4720 spin_lock_init(&vport->work_port_lock); 4721 4722 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4723 4724 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4725 4726 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4727 4728 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4729 lpfc_setup_bg(phba, shost); 4730 4731 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4732 if (error) 4733 goto out_put_shost; 4734 4735 spin_lock_irq(&phba->port_list_lock); 4736 list_add_tail(&vport->listentry, &phba->port_list); 4737 spin_unlock_irq(&phba->port_list_lock); 4738 return vport; 4739 4740 out_put_shost: 4741 kfree(vport->vmid); 4742 bitmap_free(vport->vmid_priority_range); 4743 scsi_host_put(shost); 4744 out: 4745 return NULL; 4746 } 4747 4748 /** 4749 * destroy_port - destroy an FC port 4750 * @vport: pointer to an lpfc virtual N_Port data structure. 4751 * 4752 * This routine destroys a FC port from the upper layer protocol. All the 4753 * resources associated with the port are released. 4754 **/ 4755 void 4756 destroy_port(struct lpfc_vport *vport) 4757 { 4758 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4759 struct lpfc_hba *phba = vport->phba; 4760 4761 lpfc_debugfs_terminate(vport); 4762 fc_remove_host(shost); 4763 scsi_remove_host(shost); 4764 4765 spin_lock_irq(&phba->port_list_lock); 4766 list_del_init(&vport->listentry); 4767 spin_unlock_irq(&phba->port_list_lock); 4768 4769 lpfc_cleanup(vport); 4770 return; 4771 } 4772 4773 /** 4774 * lpfc_get_instance - Get a unique integer ID 4775 * 4776 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4777 * uses the kernel idr facility to perform the task. 4778 * 4779 * Return codes: 4780 * instance - a unique integer ID allocated as the new instance. 4781 * -1 - lpfc get instance failed. 4782 **/ 4783 int 4784 lpfc_get_instance(void) 4785 { 4786 int ret; 4787 4788 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4789 return ret < 0 ? -1 : ret; 4790 } 4791 4792 /** 4793 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4794 * @shost: pointer to SCSI host data structure. 4795 * @time: elapsed time of the scan in jiffies. 4796 * 4797 * This routine is called by the SCSI layer with a SCSI host to determine 4798 * whether the scan host is finished. 4799 * 4800 * Note: there is no scan_start function as adapter initialization will have 4801 * asynchronously kicked off the link initialization. 4802 * 4803 * Return codes 4804 * 0 - SCSI host scan is not over yet. 4805 * 1 - SCSI host scan is over. 4806 **/ 4807 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4808 { 4809 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4810 struct lpfc_hba *phba = vport->phba; 4811 int stat = 0; 4812 4813 spin_lock_irq(shost->host_lock); 4814 4815 if (vport->load_flag & FC_UNLOADING) { 4816 stat = 1; 4817 goto finished; 4818 } 4819 if (time >= msecs_to_jiffies(30 * 1000)) { 4820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4821 "0461 Scanning longer than 30 " 4822 "seconds. Continuing initialization\n"); 4823 stat = 1; 4824 goto finished; 4825 } 4826 if (time >= msecs_to_jiffies(15 * 1000) && 4827 phba->link_state <= LPFC_LINK_DOWN) { 4828 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4829 "0465 Link down longer than 15 " 4830 "seconds. Continuing initialization\n"); 4831 stat = 1; 4832 goto finished; 4833 } 4834 4835 if (vport->port_state != LPFC_VPORT_READY) 4836 goto finished; 4837 if (vport->num_disc_nodes || vport->fc_prli_sent) 4838 goto finished; 4839 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4840 goto finished; 4841 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4842 goto finished; 4843 4844 stat = 1; 4845 4846 finished: 4847 spin_unlock_irq(shost->host_lock); 4848 return stat; 4849 } 4850 4851 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4852 { 4853 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4854 struct lpfc_hba *phba = vport->phba; 4855 4856 fc_host_supported_speeds(shost) = 0; 4857 /* 4858 * Avoid reporting supported link speed for FCoE as it can't be 4859 * controlled via FCoE. 4860 */ 4861 if (phba->hba_flag & HBA_FCOE_MODE) 4862 return; 4863 4864 if (phba->lmt & LMT_256Gb) 4865 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4866 if (phba->lmt & LMT_128Gb) 4867 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4868 if (phba->lmt & LMT_64Gb) 4869 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4870 if (phba->lmt & LMT_32Gb) 4871 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4872 if (phba->lmt & LMT_16Gb) 4873 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4874 if (phba->lmt & LMT_10Gb) 4875 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4876 if (phba->lmt & LMT_8Gb) 4877 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4878 if (phba->lmt & LMT_4Gb) 4879 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4880 if (phba->lmt & LMT_2Gb) 4881 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4882 if (phba->lmt & LMT_1Gb) 4883 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4884 } 4885 4886 /** 4887 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4888 * @shost: pointer to SCSI host data structure. 4889 * 4890 * This routine initializes a given SCSI host attributes on a FC port. The 4891 * SCSI host can be either on top of a physical port or a virtual port. 4892 **/ 4893 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4894 { 4895 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4896 struct lpfc_hba *phba = vport->phba; 4897 /* 4898 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4899 */ 4900 4901 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4902 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4903 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4904 4905 memset(fc_host_supported_fc4s(shost), 0, 4906 sizeof(fc_host_supported_fc4s(shost))); 4907 fc_host_supported_fc4s(shost)[2] = 1; 4908 fc_host_supported_fc4s(shost)[7] = 1; 4909 4910 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4911 sizeof fc_host_symbolic_name(shost)); 4912 4913 lpfc_host_supported_speeds_set(shost); 4914 4915 fc_host_maxframe_size(shost) = 4916 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4917 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4918 4919 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4920 4921 /* This value is also unchanging */ 4922 memset(fc_host_active_fc4s(shost), 0, 4923 sizeof(fc_host_active_fc4s(shost))); 4924 fc_host_active_fc4s(shost)[2] = 1; 4925 fc_host_active_fc4s(shost)[7] = 1; 4926 4927 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4928 spin_lock_irq(shost->host_lock); 4929 vport->load_flag &= ~FC_LOADING; 4930 spin_unlock_irq(shost->host_lock); 4931 } 4932 4933 /** 4934 * lpfc_stop_port_s3 - Stop SLI3 device port 4935 * @phba: pointer to lpfc hba data structure. 4936 * 4937 * This routine is invoked to stop an SLI3 device port, it stops the device 4938 * from generating interrupts and stops the device driver's timers for the 4939 * device. 4940 **/ 4941 static void 4942 lpfc_stop_port_s3(struct lpfc_hba *phba) 4943 { 4944 /* Clear all interrupt enable conditions */ 4945 writel(0, phba->HCregaddr); 4946 readl(phba->HCregaddr); /* flush */ 4947 /* Clear all pending interrupts */ 4948 writel(0xffffffff, phba->HAregaddr); 4949 readl(phba->HAregaddr); /* flush */ 4950 4951 /* Reset some HBA SLI setup states */ 4952 lpfc_stop_hba_timers(phba); 4953 phba->pport->work_port_events = 0; 4954 } 4955 4956 /** 4957 * lpfc_stop_port_s4 - Stop SLI4 device port 4958 * @phba: pointer to lpfc hba data structure. 4959 * 4960 * This routine is invoked to stop an SLI4 device port, it stops the device 4961 * from generating interrupts and stops the device driver's timers for the 4962 * device. 4963 **/ 4964 static void 4965 lpfc_stop_port_s4(struct lpfc_hba *phba) 4966 { 4967 /* Reset some HBA SLI4 setup states */ 4968 lpfc_stop_hba_timers(phba); 4969 if (phba->pport) 4970 phba->pport->work_port_events = 0; 4971 phba->sli4_hba.intr_enable = 0; 4972 } 4973 4974 /** 4975 * lpfc_stop_port - Wrapper function for stopping hba port 4976 * @phba: Pointer to HBA context object. 4977 * 4978 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4979 * the API jump table function pointer from the lpfc_hba struct. 4980 **/ 4981 void 4982 lpfc_stop_port(struct lpfc_hba *phba) 4983 { 4984 phba->lpfc_stop_port(phba); 4985 4986 if (phba->wq) 4987 flush_workqueue(phba->wq); 4988 } 4989 4990 /** 4991 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4992 * @phba: Pointer to hba for which this call is being executed. 4993 * 4994 * This routine starts the timer waiting for the FCF rediscovery to complete. 4995 **/ 4996 void 4997 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4998 { 4999 unsigned long fcf_redisc_wait_tmo = 5000 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 5001 /* Start fcf rediscovery wait period timer */ 5002 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 5003 spin_lock_irq(&phba->hbalock); 5004 /* Allow action to new fcf asynchronous event */ 5005 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 5006 /* Mark the FCF rediscovery pending state */ 5007 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 5008 spin_unlock_irq(&phba->hbalock); 5009 } 5010 5011 /** 5012 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 5013 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5014 * 5015 * This routine is invoked when waiting for FCF table rediscover has been 5016 * timed out. If new FCF record(s) has (have) been discovered during the 5017 * wait period, a new FCF event shall be added to the FCOE async event 5018 * list, and then worker thread shall be waked up for processing from the 5019 * worker thread context. 5020 **/ 5021 static void 5022 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 5023 { 5024 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 5025 5026 /* Don't send FCF rediscovery event if timer cancelled */ 5027 spin_lock_irq(&phba->hbalock); 5028 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 5029 spin_unlock_irq(&phba->hbalock); 5030 return; 5031 } 5032 /* Clear FCF rediscovery timer pending flag */ 5033 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 5034 /* FCF rediscovery event to worker thread */ 5035 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 5036 spin_unlock_irq(&phba->hbalock); 5037 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 5038 "2776 FCF rediscover quiescent timer expired\n"); 5039 /* wake up worker thread */ 5040 lpfc_worker_wake_up(phba); 5041 } 5042 5043 /** 5044 * lpfc_vmid_poll - VMID timeout detection 5045 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5046 * 5047 * This routine is invoked when there is no I/O on by a VM for the specified 5048 * amount of time. When this situation is detected, the VMID has to be 5049 * deregistered from the switch and all the local resources freed. The VMID 5050 * will be reassigned to the VM once the I/O begins. 5051 **/ 5052 static void 5053 lpfc_vmid_poll(struct timer_list *t) 5054 { 5055 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5056 u32 wake_up = 0; 5057 5058 /* check if there is a need to issue QFPA */ 5059 if (phba->pport->vmid_priority_tagging) { 5060 wake_up = 1; 5061 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5062 } 5063 5064 /* Is the vmid inactivity timer enabled */ 5065 if (phba->pport->vmid_inactivity_timeout || 5066 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 5067 wake_up = 1; 5068 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5069 } 5070 5071 if (wake_up) 5072 lpfc_worker_wake_up(phba); 5073 5074 /* restart the timer for the next iteration */ 5075 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5076 LPFC_VMID_TIMER)); 5077 } 5078 5079 /** 5080 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5081 * @phba: pointer to lpfc hba data structure. 5082 * @acqe_link: pointer to the async link completion queue entry. 5083 * 5084 * This routine is to parse the SLI4 link-attention link fault code. 5085 **/ 5086 static void 5087 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5088 struct lpfc_acqe_link *acqe_link) 5089 { 5090 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5091 case LPFC_ASYNC_LINK_FAULT_NONE: 5092 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5093 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5094 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5095 break; 5096 default: 5097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5098 "0398 Unknown link fault code: x%x\n", 5099 bf_get(lpfc_acqe_link_fault, acqe_link)); 5100 break; 5101 } 5102 } 5103 5104 /** 5105 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5106 * @phba: pointer to lpfc hba data structure. 5107 * @acqe_link: pointer to the async link completion queue entry. 5108 * 5109 * This routine is to parse the SLI4 link attention type and translate it 5110 * into the base driver's link attention type coding. 5111 * 5112 * Return: Link attention type in terms of base driver's coding. 5113 **/ 5114 static uint8_t 5115 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5116 struct lpfc_acqe_link *acqe_link) 5117 { 5118 uint8_t att_type; 5119 5120 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5121 case LPFC_ASYNC_LINK_STATUS_DOWN: 5122 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5123 att_type = LPFC_ATT_LINK_DOWN; 5124 break; 5125 case LPFC_ASYNC_LINK_STATUS_UP: 5126 /* Ignore physical link up events - wait for logical link up */ 5127 att_type = LPFC_ATT_RESERVED; 5128 break; 5129 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5130 att_type = LPFC_ATT_LINK_UP; 5131 break; 5132 default: 5133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5134 "0399 Invalid link attention type: x%x\n", 5135 bf_get(lpfc_acqe_link_status, acqe_link)); 5136 att_type = LPFC_ATT_RESERVED; 5137 break; 5138 } 5139 return att_type; 5140 } 5141 5142 /** 5143 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5144 * @phba: pointer to lpfc hba data structure. 5145 * 5146 * This routine is to get an SLI3 FC port's link speed in Mbps. 5147 * 5148 * Return: link speed in terms of Mbps. 5149 **/ 5150 uint32_t 5151 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5152 { 5153 uint32_t link_speed; 5154 5155 if (!lpfc_is_link_up(phba)) 5156 return 0; 5157 5158 if (phba->sli_rev <= LPFC_SLI_REV3) { 5159 switch (phba->fc_linkspeed) { 5160 case LPFC_LINK_SPEED_1GHZ: 5161 link_speed = 1000; 5162 break; 5163 case LPFC_LINK_SPEED_2GHZ: 5164 link_speed = 2000; 5165 break; 5166 case LPFC_LINK_SPEED_4GHZ: 5167 link_speed = 4000; 5168 break; 5169 case LPFC_LINK_SPEED_8GHZ: 5170 link_speed = 8000; 5171 break; 5172 case LPFC_LINK_SPEED_10GHZ: 5173 link_speed = 10000; 5174 break; 5175 case LPFC_LINK_SPEED_16GHZ: 5176 link_speed = 16000; 5177 break; 5178 default: 5179 link_speed = 0; 5180 } 5181 } else { 5182 if (phba->sli4_hba.link_state.logical_speed) 5183 link_speed = 5184 phba->sli4_hba.link_state.logical_speed; 5185 else 5186 link_speed = phba->sli4_hba.link_state.speed; 5187 } 5188 return link_speed; 5189 } 5190 5191 /** 5192 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5193 * @phba: pointer to lpfc hba data structure. 5194 * @evt_code: asynchronous event code. 5195 * @speed_code: asynchronous event link speed code. 5196 * 5197 * This routine is to parse the giving SLI4 async event link speed code into 5198 * value of Mbps for the link speed. 5199 * 5200 * Return: link speed in terms of Mbps. 5201 **/ 5202 static uint32_t 5203 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5204 uint8_t speed_code) 5205 { 5206 uint32_t port_speed; 5207 5208 switch (evt_code) { 5209 case LPFC_TRAILER_CODE_LINK: 5210 switch (speed_code) { 5211 case LPFC_ASYNC_LINK_SPEED_ZERO: 5212 port_speed = 0; 5213 break; 5214 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5215 port_speed = 10; 5216 break; 5217 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5218 port_speed = 100; 5219 break; 5220 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5221 port_speed = 1000; 5222 break; 5223 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5224 port_speed = 10000; 5225 break; 5226 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5227 port_speed = 20000; 5228 break; 5229 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5230 port_speed = 25000; 5231 break; 5232 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5233 port_speed = 40000; 5234 break; 5235 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5236 port_speed = 100000; 5237 break; 5238 default: 5239 port_speed = 0; 5240 } 5241 break; 5242 case LPFC_TRAILER_CODE_FC: 5243 switch (speed_code) { 5244 case LPFC_FC_LA_SPEED_UNKNOWN: 5245 port_speed = 0; 5246 break; 5247 case LPFC_FC_LA_SPEED_1G: 5248 port_speed = 1000; 5249 break; 5250 case LPFC_FC_LA_SPEED_2G: 5251 port_speed = 2000; 5252 break; 5253 case LPFC_FC_LA_SPEED_4G: 5254 port_speed = 4000; 5255 break; 5256 case LPFC_FC_LA_SPEED_8G: 5257 port_speed = 8000; 5258 break; 5259 case LPFC_FC_LA_SPEED_10G: 5260 port_speed = 10000; 5261 break; 5262 case LPFC_FC_LA_SPEED_16G: 5263 port_speed = 16000; 5264 break; 5265 case LPFC_FC_LA_SPEED_32G: 5266 port_speed = 32000; 5267 break; 5268 case LPFC_FC_LA_SPEED_64G: 5269 port_speed = 64000; 5270 break; 5271 case LPFC_FC_LA_SPEED_128G: 5272 port_speed = 128000; 5273 break; 5274 case LPFC_FC_LA_SPEED_256G: 5275 port_speed = 256000; 5276 break; 5277 default: 5278 port_speed = 0; 5279 } 5280 break; 5281 default: 5282 port_speed = 0; 5283 } 5284 return port_speed; 5285 } 5286 5287 /** 5288 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5289 * @phba: pointer to lpfc hba data structure. 5290 * @acqe_link: pointer to the async link completion queue entry. 5291 * 5292 * This routine is to handle the SLI4 asynchronous FCoE link event. 5293 **/ 5294 static void 5295 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5296 struct lpfc_acqe_link *acqe_link) 5297 { 5298 struct lpfc_dmabuf *mp; 5299 LPFC_MBOXQ_t *pmb; 5300 MAILBOX_t *mb; 5301 struct lpfc_mbx_read_top *la; 5302 uint8_t att_type; 5303 int rc; 5304 5305 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5306 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5307 return; 5308 phba->fcoe_eventtag = acqe_link->event_tag; 5309 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5310 if (!pmb) { 5311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5312 "0395 The mboxq allocation failed\n"); 5313 return; 5314 } 5315 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5316 if (!mp) { 5317 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5318 "0396 The lpfc_dmabuf allocation failed\n"); 5319 goto out_free_pmb; 5320 } 5321 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5322 if (!mp->virt) { 5323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5324 "0397 The mbuf allocation failed\n"); 5325 goto out_free_dmabuf; 5326 } 5327 5328 /* Cleanup any outstanding ELS commands */ 5329 lpfc_els_flush_all_cmd(phba); 5330 5331 /* Block ELS IOCBs until we have done process link event */ 5332 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5333 5334 /* Update link event statistics */ 5335 phba->sli.slistat.link_event++; 5336 5337 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5338 lpfc_read_topology(phba, pmb, mp); 5339 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5340 pmb->vport = phba->pport; 5341 5342 /* Keep the link status for extra SLI4 state machine reference */ 5343 phba->sli4_hba.link_state.speed = 5344 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5345 bf_get(lpfc_acqe_link_speed, acqe_link)); 5346 phba->sli4_hba.link_state.duplex = 5347 bf_get(lpfc_acqe_link_duplex, acqe_link); 5348 phba->sli4_hba.link_state.status = 5349 bf_get(lpfc_acqe_link_status, acqe_link); 5350 phba->sli4_hba.link_state.type = 5351 bf_get(lpfc_acqe_link_type, acqe_link); 5352 phba->sli4_hba.link_state.number = 5353 bf_get(lpfc_acqe_link_number, acqe_link); 5354 phba->sli4_hba.link_state.fault = 5355 bf_get(lpfc_acqe_link_fault, acqe_link); 5356 phba->sli4_hba.link_state.logical_speed = 5357 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5358 5359 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5360 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5361 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5362 "Logical speed:%dMbps Fault:%d\n", 5363 phba->sli4_hba.link_state.speed, 5364 phba->sli4_hba.link_state.topology, 5365 phba->sli4_hba.link_state.status, 5366 phba->sli4_hba.link_state.type, 5367 phba->sli4_hba.link_state.number, 5368 phba->sli4_hba.link_state.logical_speed, 5369 phba->sli4_hba.link_state.fault); 5370 /* 5371 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5372 * topology info. Note: Optional for non FC-AL ports. 5373 */ 5374 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5375 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5376 if (rc == MBX_NOT_FINISHED) { 5377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5378 goto out_free_dmabuf; 5379 } 5380 return; 5381 } 5382 /* 5383 * For FCoE Mode: fill in all the topology information we need and call 5384 * the READ_TOPOLOGY completion routine to continue without actually 5385 * sending the READ_TOPOLOGY mailbox command to the port. 5386 */ 5387 /* Initialize completion status */ 5388 mb = &pmb->u.mb; 5389 mb->mbxStatus = MBX_SUCCESS; 5390 5391 /* Parse port fault information field */ 5392 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5393 5394 /* Parse and translate link attention fields */ 5395 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5396 la->eventTag = acqe_link->event_tag; 5397 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5398 bf_set(lpfc_mbx_read_top_link_spd, la, 5399 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5400 5401 /* Fake the the following irrelvant fields */ 5402 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5403 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5404 bf_set(lpfc_mbx_read_top_il, la, 0); 5405 bf_set(lpfc_mbx_read_top_pb, la, 0); 5406 bf_set(lpfc_mbx_read_top_fa, la, 0); 5407 bf_set(lpfc_mbx_read_top_mm, la, 0); 5408 5409 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5410 lpfc_mbx_cmpl_read_topology(phba, pmb); 5411 5412 return; 5413 5414 out_free_dmabuf: 5415 kfree(mp); 5416 out_free_pmb: 5417 mempool_free(pmb, phba->mbox_mem_pool); 5418 } 5419 5420 /** 5421 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5422 * topology. 5423 * @phba: pointer to lpfc hba data structure. 5424 * @speed_code: asynchronous event link speed code. 5425 * 5426 * This routine is to parse the giving SLI4 async event link speed code into 5427 * value of Read topology link speed. 5428 * 5429 * Return: link speed in terms of Read topology. 5430 **/ 5431 static uint8_t 5432 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5433 { 5434 uint8_t port_speed; 5435 5436 switch (speed_code) { 5437 case LPFC_FC_LA_SPEED_1G: 5438 port_speed = LPFC_LINK_SPEED_1GHZ; 5439 break; 5440 case LPFC_FC_LA_SPEED_2G: 5441 port_speed = LPFC_LINK_SPEED_2GHZ; 5442 break; 5443 case LPFC_FC_LA_SPEED_4G: 5444 port_speed = LPFC_LINK_SPEED_4GHZ; 5445 break; 5446 case LPFC_FC_LA_SPEED_8G: 5447 port_speed = LPFC_LINK_SPEED_8GHZ; 5448 break; 5449 case LPFC_FC_LA_SPEED_16G: 5450 port_speed = LPFC_LINK_SPEED_16GHZ; 5451 break; 5452 case LPFC_FC_LA_SPEED_32G: 5453 port_speed = LPFC_LINK_SPEED_32GHZ; 5454 break; 5455 case LPFC_FC_LA_SPEED_64G: 5456 port_speed = LPFC_LINK_SPEED_64GHZ; 5457 break; 5458 case LPFC_FC_LA_SPEED_128G: 5459 port_speed = LPFC_LINK_SPEED_128GHZ; 5460 break; 5461 case LPFC_FC_LA_SPEED_256G: 5462 port_speed = LPFC_LINK_SPEED_256GHZ; 5463 break; 5464 default: 5465 port_speed = 0; 5466 break; 5467 } 5468 5469 return port_speed; 5470 } 5471 5472 void 5473 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5474 { 5475 struct rxtable_entry *entry; 5476 int cnt = 0, head, tail, last, start; 5477 5478 head = atomic_read(&phba->rxtable_idx_head); 5479 tail = atomic_read(&phba->rxtable_idx_tail); 5480 if (!phba->rxtable || head == tail) { 5481 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 5482 "4411 Rxtable is empty\n"); 5483 return; 5484 } 5485 last = tail; 5486 start = head; 5487 5488 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ 5489 while (start != last) { 5490 if (start) 5491 start--; 5492 else 5493 start = LPFC_MAX_RXMONITOR_ENTRY - 1; 5494 entry = &phba->rxtable[start]; 5495 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5496 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " 5497 "Lat %lld ASz %lld Info %02d BWUtil %d " 5498 "Int %d slot %d\n", 5499 cnt, entry->max_bytes_per_interval, 5500 entry->total_bytes, entry->rcv_bytes, 5501 entry->avg_io_latency, entry->avg_io_size, 5502 entry->cmf_info, entry->timer_utilization, 5503 entry->timer_interval, start); 5504 cnt++; 5505 if (cnt >= LPFC_MAX_RXMONITOR_DUMP) 5506 return; 5507 } 5508 } 5509 5510 /** 5511 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5512 * @phba: pointer to lpfc hba data structure. 5513 * @dtag: FPIN descriptor received 5514 * 5515 * Increment the FPIN received counter/time when it happens. 5516 */ 5517 void 5518 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5519 { 5520 struct lpfc_cgn_info *cp; 5521 struct tm broken; 5522 struct timespec64 cur_time; 5523 u32 cnt; 5524 u16 value; 5525 5526 /* Make sure we have a congestion info buffer */ 5527 if (!phba->cgn_i) 5528 return; 5529 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5530 ktime_get_real_ts64(&cur_time); 5531 time64_to_tm(cur_time.tv_sec, 0, &broken); 5532 5533 /* Update congestion statistics */ 5534 switch (dtag) { 5535 case ELS_DTAG_LNK_INTEGRITY: 5536 cnt = le32_to_cpu(cp->link_integ_notification); 5537 cnt++; 5538 cp->link_integ_notification = cpu_to_le32(cnt); 5539 5540 cp->cgn_stat_lnk_month = broken.tm_mon + 1; 5541 cp->cgn_stat_lnk_day = broken.tm_mday; 5542 cp->cgn_stat_lnk_year = broken.tm_year - 100; 5543 cp->cgn_stat_lnk_hour = broken.tm_hour; 5544 cp->cgn_stat_lnk_min = broken.tm_min; 5545 cp->cgn_stat_lnk_sec = broken.tm_sec; 5546 break; 5547 case ELS_DTAG_DELIVERY: 5548 cnt = le32_to_cpu(cp->delivery_notification); 5549 cnt++; 5550 cp->delivery_notification = cpu_to_le32(cnt); 5551 5552 cp->cgn_stat_del_month = broken.tm_mon + 1; 5553 cp->cgn_stat_del_day = broken.tm_mday; 5554 cp->cgn_stat_del_year = broken.tm_year - 100; 5555 cp->cgn_stat_del_hour = broken.tm_hour; 5556 cp->cgn_stat_del_min = broken.tm_min; 5557 cp->cgn_stat_del_sec = broken.tm_sec; 5558 break; 5559 case ELS_DTAG_PEER_CONGEST: 5560 cnt = le32_to_cpu(cp->cgn_peer_notification); 5561 cnt++; 5562 cp->cgn_peer_notification = cpu_to_le32(cnt); 5563 5564 cp->cgn_stat_peer_month = broken.tm_mon + 1; 5565 cp->cgn_stat_peer_day = broken.tm_mday; 5566 cp->cgn_stat_peer_year = broken.tm_year - 100; 5567 cp->cgn_stat_peer_hour = broken.tm_hour; 5568 cp->cgn_stat_peer_min = broken.tm_min; 5569 cp->cgn_stat_peer_sec = broken.tm_sec; 5570 break; 5571 case ELS_DTAG_CONGESTION: 5572 cnt = le32_to_cpu(cp->cgn_notification); 5573 cnt++; 5574 cp->cgn_notification = cpu_to_le32(cnt); 5575 5576 cp->cgn_stat_cgn_month = broken.tm_mon + 1; 5577 cp->cgn_stat_cgn_day = broken.tm_mday; 5578 cp->cgn_stat_cgn_year = broken.tm_year - 100; 5579 cp->cgn_stat_cgn_hour = broken.tm_hour; 5580 cp->cgn_stat_cgn_min = broken.tm_min; 5581 cp->cgn_stat_cgn_sec = broken.tm_sec; 5582 } 5583 if (phba->cgn_fpin_frequency && 5584 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5585 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5586 cp->cgn_stat_npm = value; 5587 } 5588 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5589 LPFC_CGN_CRC32_SEED); 5590 cp->cgn_info_crc = cpu_to_le32(value); 5591 } 5592 5593 /** 5594 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer 5595 * @phba: pointer to lpfc hba data structure. 5596 * 5597 * Save the congestion event data every minute. 5598 * On the hour collapse all the minute data into hour data. Every day 5599 * collapse all the hour data into daily data. Separate driver 5600 * and fabrc congestion event counters that will be saved out 5601 * to the registered congestion buffer every minute. 5602 */ 5603 static void 5604 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) 5605 { 5606 struct lpfc_cgn_info *cp; 5607 struct tm broken; 5608 struct timespec64 cur_time; 5609 uint32_t i, index; 5610 uint16_t value, mvalue; 5611 uint64_t bps; 5612 uint32_t mbps; 5613 uint32_t dvalue, wvalue, lvalue, avalue; 5614 uint64_t latsum; 5615 __le16 *ptr; 5616 __le32 *lptr; 5617 __le16 *mptr; 5618 5619 /* Make sure we have a congestion info buffer */ 5620 if (!phba->cgn_i) 5621 return; 5622 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5623 5624 if (time_before(jiffies, phba->cgn_evt_timestamp)) 5625 return; 5626 phba->cgn_evt_timestamp = jiffies + 5627 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5628 phba->cgn_evt_minute++; 5629 5630 /* We should get to this point in the routine on 1 minute intervals */ 5631 5632 ktime_get_real_ts64(&cur_time); 5633 time64_to_tm(cur_time.tv_sec, 0, &broken); 5634 5635 if (phba->cgn_fpin_frequency && 5636 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5637 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5638 cp->cgn_stat_npm = value; 5639 } 5640 5641 /* Read and clear the latency counters for this minute */ 5642 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5643 latsum = atomic64_read(&phba->cgn_latency_evt); 5644 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5645 atomic64_set(&phba->cgn_latency_evt, 0); 5646 5647 /* We need to store MB/sec bandwidth in the congestion information. 5648 * block_cnt is count of 512 byte blocks for the entire minute, 5649 * bps will get bytes per sec before finally converting to MB/sec. 5650 */ 5651 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5652 phba->rx_block_cnt = 0; 5653 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5654 5655 /* Every minute */ 5656 /* cgn parameters */ 5657 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5658 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5659 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5660 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5661 5662 /* Fill in default LUN qdepth */ 5663 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5664 cp->cgn_lunq = cpu_to_le16(value); 5665 5666 /* Record congestion buffer info - every minute 5667 * cgn_driver_evt_cnt (Driver events) 5668 * cgn_fabric_warn_cnt (Congestion Warnings) 5669 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5670 * cgn_fabric_alarm_cnt (Congestion Alarms) 5671 */ 5672 index = ++cp->cgn_index_minute; 5673 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5674 cp->cgn_index_minute = 0; 5675 index = 0; 5676 } 5677 5678 /* Get the number of driver events in this sample and reset counter */ 5679 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5680 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5681 5682 /* Get the number of warning events - FPIN and Signal for this minute */ 5683 wvalue = 0; 5684 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5685 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5686 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5687 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5688 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5689 5690 /* Get the number of alarm events - FPIN and Signal for this minute */ 5691 avalue = 0; 5692 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5693 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5694 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5695 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5696 5697 /* Collect the driver, warning, alarm and latency counts for this 5698 * minute into the driver congestion buffer. 5699 */ 5700 ptr = &cp->cgn_drvr_min[index]; 5701 value = (uint16_t)dvalue; 5702 *ptr = cpu_to_le16(value); 5703 5704 ptr = &cp->cgn_warn_min[index]; 5705 value = (uint16_t)wvalue; 5706 *ptr = cpu_to_le16(value); 5707 5708 ptr = &cp->cgn_alarm_min[index]; 5709 value = (uint16_t)avalue; 5710 *ptr = cpu_to_le16(value); 5711 5712 lptr = &cp->cgn_latency_min[index]; 5713 if (lvalue) { 5714 lvalue = (uint32_t)div_u64(latsum, lvalue); 5715 *lptr = cpu_to_le32(lvalue); 5716 } else { 5717 *lptr = 0; 5718 } 5719 5720 /* Collect the bandwidth value into the driver's congesion buffer. */ 5721 mptr = &cp->cgn_bw_min[index]; 5722 *mptr = cpu_to_le16(mvalue); 5723 5724 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5725 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5726 index, dvalue, wvalue, *lptr, mvalue, avalue); 5727 5728 /* Every hour */ 5729 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5730 /* Record congestion buffer info - every hour 5731 * Collapse all minutes into an hour 5732 */ 5733 index = ++cp->cgn_index_hour; 5734 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5735 cp->cgn_index_hour = 0; 5736 index = 0; 5737 } 5738 5739 dvalue = 0; 5740 wvalue = 0; 5741 lvalue = 0; 5742 avalue = 0; 5743 mvalue = 0; 5744 mbps = 0; 5745 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5746 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5747 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5748 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5749 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5750 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5751 } 5752 if (lvalue) /* Avg of latency averages */ 5753 lvalue /= LPFC_MIN_HOUR; 5754 if (mbps) /* Avg of Bandwidth averages */ 5755 mvalue = mbps / LPFC_MIN_HOUR; 5756 5757 lptr = &cp->cgn_drvr_hr[index]; 5758 *lptr = cpu_to_le32(dvalue); 5759 lptr = &cp->cgn_warn_hr[index]; 5760 *lptr = cpu_to_le32(wvalue); 5761 lptr = &cp->cgn_latency_hr[index]; 5762 *lptr = cpu_to_le32(lvalue); 5763 mptr = &cp->cgn_bw_hr[index]; 5764 *mptr = cpu_to_le16(mvalue); 5765 lptr = &cp->cgn_alarm_hr[index]; 5766 *lptr = cpu_to_le32(avalue); 5767 5768 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5769 "2419 Congestion Info - hour " 5770 "(%d): %d %d %d %d %d\n", 5771 index, dvalue, wvalue, lvalue, mvalue, avalue); 5772 } 5773 5774 /* Every day */ 5775 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5776 /* Record congestion buffer info - every hour 5777 * Collapse all hours into a day. Rotate days 5778 * after LPFC_MAX_CGN_DAYS. 5779 */ 5780 index = ++cp->cgn_index_day; 5781 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5782 cp->cgn_index_day = 0; 5783 index = 0; 5784 } 5785 5786 /* Anytime we overwrite daily index 0, after we wrap, 5787 * we will be overwriting the oldest day, so we must 5788 * update the congestion data start time for that day. 5789 * That start time should have previously been saved after 5790 * we wrote the last days worth of data. 5791 */ 5792 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { 5793 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); 5794 5795 cp->cgn_info_month = broken.tm_mon + 1; 5796 cp->cgn_info_day = broken.tm_mday; 5797 cp->cgn_info_year = broken.tm_year - 100; 5798 cp->cgn_info_hour = broken.tm_hour; 5799 cp->cgn_info_minute = broken.tm_min; 5800 cp->cgn_info_second = broken.tm_sec; 5801 5802 lpfc_printf_log 5803 (phba, KERN_INFO, LOG_CGN_MGMT, 5804 "2646 CGNInfo idx0 Start Time: " 5805 "%d/%d/%d %d:%d:%d\n", 5806 cp->cgn_info_day, cp->cgn_info_month, 5807 cp->cgn_info_year, cp->cgn_info_hour, 5808 cp->cgn_info_minute, cp->cgn_info_second); 5809 } 5810 5811 dvalue = 0; 5812 wvalue = 0; 5813 lvalue = 0; 5814 mvalue = 0; 5815 mbps = 0; 5816 avalue = 0; 5817 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5818 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5819 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5820 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5821 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5822 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5823 } 5824 if (lvalue) /* Avg of latency averages */ 5825 lvalue /= LPFC_HOUR_DAY; 5826 if (mbps) /* Avg of Bandwidth averages */ 5827 mvalue = mbps / LPFC_HOUR_DAY; 5828 5829 lptr = &cp->cgn_drvr_day[index]; 5830 *lptr = cpu_to_le32(dvalue); 5831 lptr = &cp->cgn_warn_day[index]; 5832 *lptr = cpu_to_le32(wvalue); 5833 lptr = &cp->cgn_latency_day[index]; 5834 *lptr = cpu_to_le32(lvalue); 5835 mptr = &cp->cgn_bw_day[index]; 5836 *mptr = cpu_to_le16(mvalue); 5837 lptr = &cp->cgn_alarm_day[index]; 5838 *lptr = cpu_to_le32(avalue); 5839 5840 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5841 "2420 Congestion Info - daily (%d): " 5842 "%d %d %d %d %d\n", 5843 index, dvalue, wvalue, lvalue, mvalue, avalue); 5844 5845 /* We just wrote LPFC_MAX_CGN_DAYS of data, 5846 * so we are wrapped on any data after this. 5847 * Save this as the start time for the next day. 5848 */ 5849 if (index == (LPFC_MAX_CGN_DAYS - 1)) { 5850 phba->hba_flag |= HBA_CGN_DAY_WRAP; 5851 ktime_get_real_ts64(&phba->cgn_daily_ts); 5852 } 5853 } 5854 5855 /* Use the frequency found in the last rcv'ed FPIN */ 5856 value = phba->cgn_fpin_frequency; 5857 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) 5858 cp->cgn_warn_freq = cpu_to_le16(value); 5859 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) 5860 cp->cgn_alarm_freq = cpu_to_le16(value); 5861 5862 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 5863 * are received by the HBA 5864 */ 5865 value = phba->cgn_sig_freq; 5866 5867 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5868 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5869 cp->cgn_warn_freq = cpu_to_le16(value); 5870 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5871 cp->cgn_alarm_freq = cpu_to_le16(value); 5872 5873 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5874 LPFC_CGN_CRC32_SEED); 5875 cp->cgn_info_crc = cpu_to_le32(lvalue); 5876 } 5877 5878 /** 5879 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5880 * @phba: The Hba for which this call is being executed. 5881 * 5882 * The routine calculates the latency from the beginning of the CMF timer 5883 * interval to the current point in time. It is called from IO completion 5884 * when we exceed our Bandwidth limitation for the time interval. 5885 */ 5886 uint32_t 5887 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5888 { 5889 struct timespec64 cmpl_time; 5890 uint32_t msec = 0; 5891 5892 ktime_get_real_ts64(&cmpl_time); 5893 5894 /* This routine works on a ms granularity so sec and usec are 5895 * converted accordingly. 5896 */ 5897 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5898 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5899 NSEC_PER_MSEC; 5900 } else { 5901 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5902 msec = (cmpl_time.tv_sec - 5903 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5904 msec += ((cmpl_time.tv_nsec - 5905 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5906 } else { 5907 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5908 1) * MSEC_PER_SEC; 5909 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5910 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5911 } 5912 } 5913 return msec; 5914 } 5915 5916 /** 5917 * lpfc_cmf_timer - This is the timer function for one congestion 5918 * rate interval. 5919 * @timer: Pointer to the high resolution timer that expired 5920 */ 5921 static enum hrtimer_restart 5922 lpfc_cmf_timer(struct hrtimer *timer) 5923 { 5924 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5925 cmf_timer); 5926 struct rxtable_entry *entry; 5927 uint32_t io_cnt; 5928 uint32_t head, tail; 5929 uint32_t busy, max_read; 5930 uint64_t total, rcv, lat, mbpi, extra, cnt; 5931 int timer_interval = LPFC_CMF_INTERVAL; 5932 uint32_t ms; 5933 struct lpfc_cgn_stat *cgs; 5934 int cpu; 5935 5936 /* Only restart the timer if congestion mgmt is on */ 5937 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5938 !phba->cmf_latency.tv_sec) { 5939 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5940 "6224 CMF timer exit: %d %lld\n", 5941 phba->cmf_active_mode, 5942 (uint64_t)phba->cmf_latency.tv_sec); 5943 return HRTIMER_NORESTART; 5944 } 5945 5946 /* If pport is not ready yet, just exit and wait for 5947 * the next timer cycle to hit. 5948 */ 5949 if (!phba->pport) 5950 goto skip; 5951 5952 /* Do not block SCSI IO while in the timer routine since 5953 * total_bytes will be cleared 5954 */ 5955 atomic_set(&phba->cmf_stop_io, 1); 5956 5957 /* First we need to calculate the actual ms between 5958 * the last timer interrupt and this one. We ask for 5959 * LPFC_CMF_INTERVAL, however the actual time may 5960 * vary depending on system overhead. 5961 */ 5962 ms = lpfc_calc_cmf_latency(phba); 5963 5964 5965 /* Immediately after we calculate the time since the last 5966 * timer interrupt, set the start time for the next 5967 * interrupt 5968 */ 5969 ktime_get_real_ts64(&phba->cmf_latency); 5970 5971 phba->cmf_link_byte_count = 5972 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 5973 5974 /* Collect all the stats from the prior timer interval */ 5975 total = 0; 5976 io_cnt = 0; 5977 lat = 0; 5978 rcv = 0; 5979 for_each_present_cpu(cpu) { 5980 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 5981 total += atomic64_xchg(&cgs->total_bytes, 0); 5982 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 5983 lat += atomic64_xchg(&cgs->rx_latency, 0); 5984 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 5985 } 5986 5987 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 5988 * returned from the last CMF_SYNC_WQE issued, from 5989 * cmf_last_sync_bw. This will be the target BW for 5990 * this next timer interval. 5991 */ 5992 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 5993 phba->link_state != LPFC_LINK_DOWN && 5994 phba->hba_flag & HBA_SETUP) { 5995 mbpi = phba->cmf_last_sync_bw; 5996 phba->cmf_last_sync_bw = 0; 5997 extra = 0; 5998 5999 /* Calculate any extra bytes needed to account for the 6000 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 6001 * calculate the adjustment needed for total to reflect 6002 * a full LPFC_CMF_INTERVAL. 6003 */ 6004 if (ms && ms < LPFC_CMF_INTERVAL) { 6005 cnt = div_u64(total, ms); /* bytes per ms */ 6006 cnt *= LPFC_CMF_INTERVAL; /* what total should be */ 6007 6008 /* If the timeout is scheduled to be shorter, 6009 * this value may skew the data, so cap it at mbpi. 6010 */ 6011 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) 6012 cnt = mbpi; 6013 6014 extra = cnt - total; 6015 } 6016 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 6017 } else { 6018 /* For Monitor mode or link down we want mbpi 6019 * to be the full link speed 6020 */ 6021 mbpi = phba->cmf_link_byte_count; 6022 extra = 0; 6023 } 6024 phba->cmf_timer_cnt++; 6025 6026 if (io_cnt) { 6027 /* Update congestion info buffer latency in us */ 6028 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 6029 atomic64_add(lat, &phba->cgn_latency_evt); 6030 } 6031 busy = atomic_xchg(&phba->cmf_busy, 0); 6032 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 6033 6034 /* Calculate MBPI for the next timer interval */ 6035 if (mbpi) { 6036 if (mbpi > phba->cmf_link_byte_count || 6037 phba->cmf_active_mode == LPFC_CFG_MONITOR) 6038 mbpi = phba->cmf_link_byte_count; 6039 6040 /* Change max_bytes_per_interval to what the prior 6041 * CMF_SYNC_WQE cmpl indicated. 6042 */ 6043 if (mbpi != phba->cmf_max_bytes_per_interval) 6044 phba->cmf_max_bytes_per_interval = mbpi; 6045 } 6046 6047 /* Save rxmonitor information for debug */ 6048 if (phba->rxtable) { 6049 head = atomic_xchg(&phba->rxtable_idx_head, 6050 LPFC_RXMONITOR_TABLE_IN_USE); 6051 entry = &phba->rxtable[head]; 6052 entry->total_bytes = total; 6053 entry->cmf_bytes = total + extra; 6054 entry->rcv_bytes = rcv; 6055 entry->cmf_busy = busy; 6056 entry->cmf_info = phba->cmf_active_info; 6057 if (io_cnt) { 6058 entry->avg_io_latency = div_u64(lat, io_cnt); 6059 entry->avg_io_size = div_u64(rcv, io_cnt); 6060 } else { 6061 entry->avg_io_latency = 0; 6062 entry->avg_io_size = 0; 6063 } 6064 entry->max_read_cnt = max_read; 6065 entry->io_cnt = io_cnt; 6066 entry->max_bytes_per_interval = mbpi; 6067 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6068 entry->timer_utilization = phba->cmf_last_ts; 6069 else 6070 entry->timer_utilization = ms; 6071 entry->timer_interval = ms; 6072 phba->cmf_last_ts = 0; 6073 6074 /* Increment rxtable index */ 6075 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6076 tail = atomic_read(&phba->rxtable_idx_tail); 6077 if (head == tail) { 6078 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6079 atomic_set(&phba->rxtable_idx_tail, tail); 6080 } 6081 atomic_set(&phba->rxtable_idx_head, head); 6082 } 6083 6084 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6085 /* If Monitor mode, check if we are oversubscribed 6086 * against the full line rate. 6087 */ 6088 if (mbpi && total > mbpi) 6089 atomic_inc(&phba->cgn_driver_evt_cnt); 6090 } 6091 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6092 6093 /* Each minute save Fabric and Driver congestion information */ 6094 lpfc_cgn_save_evt_cnt(phba); 6095 6096 phba->hba_flag &= ~HBA_SHORT_CMF; 6097 6098 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the 6099 * minute, adjust our next timer interval, if needed, to ensure a 6100 * 1 minute granularity when we get the next timer interrupt. 6101 */ 6102 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), 6103 phba->cgn_evt_timestamp)) { 6104 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - 6105 jiffies); 6106 if (timer_interval <= 0) 6107 timer_interval = LPFC_CMF_INTERVAL; 6108 else 6109 phba->hba_flag |= HBA_SHORT_CMF; 6110 6111 /* If we adjust timer_interval, max_bytes_per_interval 6112 * needs to be adjusted as well. 6113 */ 6114 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 6115 timer_interval, 1000); 6116 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) 6117 phba->cmf_max_bytes_per_interval = 6118 phba->cmf_link_byte_count; 6119 } 6120 6121 /* Since total_bytes has already been zero'ed, its okay to unblock 6122 * after max_bytes_per_interval is setup. 6123 */ 6124 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6125 queue_work(phba->wq, &phba->unblock_request_work); 6126 6127 /* SCSI IO is now unblocked */ 6128 atomic_set(&phba->cmf_stop_io, 0); 6129 6130 skip: 6131 hrtimer_forward_now(timer, 6132 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6133 return HRTIMER_RESTART; 6134 } 6135 6136 #define trunk_link_status(__idx)\ 6137 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6138 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6139 "Link up" : "Link down") : "NA" 6140 /* Did port __idx reported an error */ 6141 #define trunk_port_fault(__idx)\ 6142 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6143 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6144 6145 static void 6146 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6147 struct lpfc_acqe_fc_la *acqe_fc) 6148 { 6149 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6150 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6151 6152 phba->sli4_hba.link_state.speed = 6153 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6154 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6155 6156 phba->sli4_hba.link_state.logical_speed = 6157 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6158 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6159 phba->fc_linkspeed = 6160 lpfc_async_link_speed_to_read_top( 6161 phba, 6162 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6163 6164 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6165 phba->trunk_link.link0.state = 6166 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6167 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6168 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6169 } 6170 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6171 phba->trunk_link.link1.state = 6172 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6173 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6174 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6175 } 6176 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6177 phba->trunk_link.link2.state = 6178 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6179 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6180 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6181 } 6182 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6183 phba->trunk_link.link3.state = 6184 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6185 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6186 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6187 } 6188 6189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6190 "2910 Async FC Trunking Event - Speed:%d\n" 6191 "\tLogical speed:%d " 6192 "port0: %s port1: %s port2: %s port3: %s\n", 6193 phba->sli4_hba.link_state.speed, 6194 phba->sli4_hba.link_state.logical_speed, 6195 trunk_link_status(0), trunk_link_status(1), 6196 trunk_link_status(2), trunk_link_status(3)); 6197 6198 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6199 lpfc_cmf_signal_init(phba); 6200 6201 if (port_fault) 6202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6203 "3202 trunk error:0x%x (%s) seen on port0:%s " 6204 /* 6205 * SLI-4: We have only 0xA error codes 6206 * defined as of now. print an appropriate 6207 * message in case driver needs to be updated. 6208 */ 6209 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6210 "UNDEFINED. update driver." : trunk_errmsg[err], 6211 trunk_port_fault(0), trunk_port_fault(1), 6212 trunk_port_fault(2), trunk_port_fault(3)); 6213 } 6214 6215 6216 /** 6217 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6218 * @phba: pointer to lpfc hba data structure. 6219 * @acqe_fc: pointer to the async fc completion queue entry. 6220 * 6221 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6222 * that the event was received and then issue a read_topology mailbox command so 6223 * that the rest of the driver will treat it the same as SLI3. 6224 **/ 6225 static void 6226 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6227 { 6228 struct lpfc_dmabuf *mp; 6229 LPFC_MBOXQ_t *pmb; 6230 MAILBOX_t *mb; 6231 struct lpfc_mbx_read_top *la; 6232 int rc; 6233 6234 if (bf_get(lpfc_trailer_type, acqe_fc) != 6235 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6237 "2895 Non FC link Event detected.(%d)\n", 6238 bf_get(lpfc_trailer_type, acqe_fc)); 6239 return; 6240 } 6241 6242 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6243 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6244 lpfc_update_trunk_link_status(phba, acqe_fc); 6245 return; 6246 } 6247 6248 /* Keep the link status for extra SLI4 state machine reference */ 6249 phba->sli4_hba.link_state.speed = 6250 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6251 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6252 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6253 phba->sli4_hba.link_state.topology = 6254 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6255 phba->sli4_hba.link_state.status = 6256 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6257 phba->sli4_hba.link_state.type = 6258 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6259 phba->sli4_hba.link_state.number = 6260 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6261 phba->sli4_hba.link_state.fault = 6262 bf_get(lpfc_acqe_link_fault, acqe_fc); 6263 6264 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6265 LPFC_FC_LA_TYPE_LINK_DOWN) 6266 phba->sli4_hba.link_state.logical_speed = 0; 6267 else if (!phba->sli4_hba.conf_trunk) 6268 phba->sli4_hba.link_state.logical_speed = 6269 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6270 6271 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6272 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6273 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6274 "%dMbps Fault:%d\n", 6275 phba->sli4_hba.link_state.speed, 6276 phba->sli4_hba.link_state.topology, 6277 phba->sli4_hba.link_state.status, 6278 phba->sli4_hba.link_state.type, 6279 phba->sli4_hba.link_state.number, 6280 phba->sli4_hba.link_state.logical_speed, 6281 phba->sli4_hba.link_state.fault); 6282 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6283 if (!pmb) { 6284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6285 "2897 The mboxq allocation failed\n"); 6286 return; 6287 } 6288 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6289 if (!mp) { 6290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6291 "2898 The lpfc_dmabuf allocation failed\n"); 6292 goto out_free_pmb; 6293 } 6294 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 6295 if (!mp->virt) { 6296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6297 "2899 The mbuf allocation failed\n"); 6298 goto out_free_dmabuf; 6299 } 6300 6301 /* Cleanup any outstanding ELS commands */ 6302 lpfc_els_flush_all_cmd(phba); 6303 6304 /* Block ELS IOCBs until we have done process link event */ 6305 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6306 6307 /* Update link event statistics */ 6308 phba->sli.slistat.link_event++; 6309 6310 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6311 lpfc_read_topology(phba, pmb, mp); 6312 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6313 pmb->vport = phba->pport; 6314 6315 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6316 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6317 6318 switch (phba->sli4_hba.link_state.status) { 6319 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6320 phba->link_flag |= LS_MDS_LINK_DOWN; 6321 break; 6322 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6323 phba->link_flag |= LS_MDS_LOOPBACK; 6324 break; 6325 default: 6326 break; 6327 } 6328 6329 /* Initialize completion status */ 6330 mb = &pmb->u.mb; 6331 mb->mbxStatus = MBX_SUCCESS; 6332 6333 /* Parse port fault information field */ 6334 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6335 6336 /* Parse and translate link attention fields */ 6337 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6338 la->eventTag = acqe_fc->event_tag; 6339 6340 if (phba->sli4_hba.link_state.status == 6341 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6342 bf_set(lpfc_mbx_read_top_att_type, la, 6343 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6344 } else { 6345 bf_set(lpfc_mbx_read_top_att_type, la, 6346 LPFC_FC_LA_TYPE_LINK_DOWN); 6347 } 6348 /* Invoke the mailbox command callback function */ 6349 lpfc_mbx_cmpl_read_topology(phba, pmb); 6350 6351 return; 6352 } 6353 6354 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6355 if (rc == MBX_NOT_FINISHED) { 6356 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6357 goto out_free_dmabuf; 6358 } 6359 return; 6360 6361 out_free_dmabuf: 6362 kfree(mp); 6363 out_free_pmb: 6364 mempool_free(pmb, phba->mbox_mem_pool); 6365 } 6366 6367 /** 6368 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6369 * @phba: pointer to lpfc hba data structure. 6370 * @acqe_sli: pointer to the async SLI completion queue entry. 6371 * 6372 * This routine is to handle the SLI4 asynchronous SLI events. 6373 **/ 6374 static void 6375 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6376 { 6377 char port_name; 6378 char message[128]; 6379 uint8_t status; 6380 uint8_t evt_type; 6381 uint8_t operational = 0; 6382 struct temp_event temp_event_data; 6383 struct lpfc_acqe_misconfigured_event *misconfigured; 6384 struct lpfc_acqe_cgn_signal *cgn_signal; 6385 struct Scsi_Host *shost; 6386 struct lpfc_vport **vports; 6387 int rc, i, cnt; 6388 6389 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6390 6391 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6392 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6393 "x%08x x%08x x%08x\n", evt_type, 6394 acqe_sli->event_data1, acqe_sli->event_data2, 6395 acqe_sli->reserved, acqe_sli->trailer); 6396 6397 port_name = phba->Port[0]; 6398 if (port_name == 0x00) 6399 port_name = '?'; /* get port name is empty */ 6400 6401 switch (evt_type) { 6402 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6403 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6404 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6405 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6406 6407 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6408 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6409 acqe_sli->event_data1, port_name); 6410 6411 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6412 shost = lpfc_shost_from_vport(phba->pport); 6413 fc_host_post_vendor_event(shost, fc_get_event_number(), 6414 sizeof(temp_event_data), 6415 (char *)&temp_event_data, 6416 SCSI_NL_VID_TYPE_PCI 6417 | PCI_VENDOR_ID_EMULEX); 6418 break; 6419 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6420 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6421 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6422 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6423 6424 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6425 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6426 acqe_sli->event_data1, port_name); 6427 6428 shost = lpfc_shost_from_vport(phba->pport); 6429 fc_host_post_vendor_event(shost, fc_get_event_number(), 6430 sizeof(temp_event_data), 6431 (char *)&temp_event_data, 6432 SCSI_NL_VID_TYPE_PCI 6433 | PCI_VENDOR_ID_EMULEX); 6434 break; 6435 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6436 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6437 &acqe_sli->event_data1; 6438 6439 /* fetch the status for this port */ 6440 switch (phba->sli4_hba.lnk_info.lnk_no) { 6441 case LPFC_LINK_NUMBER_0: 6442 status = bf_get(lpfc_sli_misconfigured_port0_state, 6443 &misconfigured->theEvent); 6444 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6445 &misconfigured->theEvent); 6446 break; 6447 case LPFC_LINK_NUMBER_1: 6448 status = bf_get(lpfc_sli_misconfigured_port1_state, 6449 &misconfigured->theEvent); 6450 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6451 &misconfigured->theEvent); 6452 break; 6453 case LPFC_LINK_NUMBER_2: 6454 status = bf_get(lpfc_sli_misconfigured_port2_state, 6455 &misconfigured->theEvent); 6456 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6457 &misconfigured->theEvent); 6458 break; 6459 case LPFC_LINK_NUMBER_3: 6460 status = bf_get(lpfc_sli_misconfigured_port3_state, 6461 &misconfigured->theEvent); 6462 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6463 &misconfigured->theEvent); 6464 break; 6465 default: 6466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6467 "3296 " 6468 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6469 "event: Invalid link %d", 6470 phba->sli4_hba.lnk_info.lnk_no); 6471 return; 6472 } 6473 6474 /* Skip if optic state unchanged */ 6475 if (phba->sli4_hba.lnk_info.optic_state == status) 6476 return; 6477 6478 switch (status) { 6479 case LPFC_SLI_EVENT_STATUS_VALID: 6480 sprintf(message, "Physical Link is functional"); 6481 break; 6482 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6483 sprintf(message, "Optics faulted/incorrectly " 6484 "installed/not installed - Reseat optics, " 6485 "if issue not resolved, replace."); 6486 break; 6487 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6488 sprintf(message, 6489 "Optics of two types installed - Remove one " 6490 "optic or install matching pair of optics."); 6491 break; 6492 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6493 sprintf(message, "Incompatible optics - Replace with " 6494 "compatible optics for card to function."); 6495 break; 6496 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6497 sprintf(message, "Unqualified optics - Replace with " 6498 "Avago optics for Warranty and Technical " 6499 "Support - Link is%s operational", 6500 (operational) ? " not" : ""); 6501 break; 6502 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6503 sprintf(message, "Uncertified optics - Replace with " 6504 "Avago-certified optics to enable link " 6505 "operation - Link is%s operational", 6506 (operational) ? " not" : ""); 6507 break; 6508 default: 6509 /* firmware is reporting a status we don't know about */ 6510 sprintf(message, "Unknown event status x%02x", status); 6511 break; 6512 } 6513 6514 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6515 rc = lpfc_sli4_read_config(phba); 6516 if (rc) { 6517 phba->lmt = 0; 6518 lpfc_printf_log(phba, KERN_ERR, 6519 LOG_TRACE_EVENT, 6520 "3194 Unable to retrieve supported " 6521 "speeds, rc = 0x%x\n", rc); 6522 } 6523 rc = lpfc_sli4_refresh_params(phba); 6524 if (rc) { 6525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6526 "3174 Unable to update pls support, " 6527 "rc x%x\n", rc); 6528 } 6529 vports = lpfc_create_vport_work_array(phba); 6530 if (vports != NULL) { 6531 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6532 i++) { 6533 shost = lpfc_shost_from_vport(vports[i]); 6534 lpfc_host_supported_speeds_set(shost); 6535 } 6536 } 6537 lpfc_destroy_vport_work_array(phba, vports); 6538 6539 phba->sli4_hba.lnk_info.optic_state = status; 6540 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6541 "3176 Port Name %c %s\n", port_name, message); 6542 break; 6543 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6544 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6545 "3192 Remote DPort Test Initiated - " 6546 "Event Data1:x%08x Event Data2: x%08x\n", 6547 acqe_sli->event_data1, acqe_sli->event_data2); 6548 break; 6549 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6550 /* Call FW to obtain active parms */ 6551 lpfc_sli4_cgn_parm_chg_evt(phba); 6552 break; 6553 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6554 /* Misconfigured WWN. Reports that the SLI Port is configured 6555 * to use FA-WWN, but the attached device doesn’t support it. 6556 * No driver action is required. 6557 * Event Data1 - N.A, Event Data2 - N.A 6558 */ 6559 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 6560 "2699 Misconfigured FA-WWN - Attached device does " 6561 "not support FA-WWN\n"); 6562 break; 6563 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6564 /* EEPROM failure. No driver action is required */ 6565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6566 "2518 EEPROM failure - " 6567 "Event Data1: x%08x Event Data2: x%08x\n", 6568 acqe_sli->event_data1, acqe_sli->event_data2); 6569 break; 6570 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6571 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6572 break; 6573 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6574 &acqe_sli->event_data1; 6575 phba->cgn_acqe_cnt++; 6576 6577 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6578 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6579 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6580 6581 /* no threshold for CMF, even 1 signal will trigger an event */ 6582 6583 /* Alarm overrides warning, so check that first */ 6584 if (cgn_signal->alarm_cnt) { 6585 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6586 /* Keep track of alarm cnt for cgn_info */ 6587 atomic_add(cgn_signal->alarm_cnt, 6588 &phba->cgn_fabric_alarm_cnt); 6589 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6590 atomic_add(cgn_signal->alarm_cnt, 6591 &phba->cgn_sync_alarm_cnt); 6592 } 6593 } else if (cnt) { 6594 /* signal action needs to be taken */ 6595 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6596 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6597 /* Keep track of warning cnt for cgn_info */ 6598 atomic_add(cnt, &phba->cgn_fabric_warn_cnt); 6599 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6600 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6601 } 6602 } 6603 break; 6604 default: 6605 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6606 "3193 Unrecognized SLI event, type: 0x%x", 6607 evt_type); 6608 break; 6609 } 6610 } 6611 6612 /** 6613 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6614 * @vport: pointer to vport data structure. 6615 * 6616 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6617 * response to a CVL event. 6618 * 6619 * Return the pointer to the ndlp with the vport if successful, otherwise 6620 * return NULL. 6621 **/ 6622 static struct lpfc_nodelist * 6623 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6624 { 6625 struct lpfc_nodelist *ndlp; 6626 struct Scsi_Host *shost; 6627 struct lpfc_hba *phba; 6628 6629 if (!vport) 6630 return NULL; 6631 phba = vport->phba; 6632 if (!phba) 6633 return NULL; 6634 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6635 if (!ndlp) { 6636 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6637 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6638 if (!ndlp) 6639 return NULL; 6640 /* Set the node type */ 6641 ndlp->nlp_type |= NLP_FABRIC; 6642 /* Put ndlp onto node list */ 6643 lpfc_enqueue_node(vport, ndlp); 6644 } 6645 if ((phba->pport->port_state < LPFC_FLOGI) && 6646 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6647 return NULL; 6648 /* If virtual link is not yet instantiated ignore CVL */ 6649 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6650 && (vport->port_state != LPFC_VPORT_FAILED)) 6651 return NULL; 6652 shost = lpfc_shost_from_vport(vport); 6653 if (!shost) 6654 return NULL; 6655 lpfc_linkdown_port(vport); 6656 lpfc_cleanup_pending_mbox(vport); 6657 spin_lock_irq(shost->host_lock); 6658 vport->fc_flag |= FC_VPORT_CVL_RCVD; 6659 spin_unlock_irq(shost->host_lock); 6660 6661 return ndlp; 6662 } 6663 6664 /** 6665 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6666 * @phba: pointer to lpfc hba data structure. 6667 * 6668 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6669 * response to a FCF dead event. 6670 **/ 6671 static void 6672 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6673 { 6674 struct lpfc_vport **vports; 6675 int i; 6676 6677 vports = lpfc_create_vport_work_array(phba); 6678 if (vports) 6679 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6680 lpfc_sli4_perform_vport_cvl(vports[i]); 6681 lpfc_destroy_vport_work_array(phba, vports); 6682 } 6683 6684 /** 6685 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6686 * @phba: pointer to lpfc hba data structure. 6687 * @acqe_fip: pointer to the async fcoe completion queue entry. 6688 * 6689 * This routine is to handle the SLI4 asynchronous fcoe event. 6690 **/ 6691 static void 6692 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6693 struct lpfc_acqe_fip *acqe_fip) 6694 { 6695 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6696 int rc; 6697 struct lpfc_vport *vport; 6698 struct lpfc_nodelist *ndlp; 6699 int active_vlink_present; 6700 struct lpfc_vport **vports; 6701 int i; 6702 6703 phba->fc_eventTag = acqe_fip->event_tag; 6704 phba->fcoe_eventtag = acqe_fip->event_tag; 6705 switch (event_type) { 6706 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6707 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6708 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6710 "2546 New FCF event, evt_tag:x%x, " 6711 "index:x%x\n", 6712 acqe_fip->event_tag, 6713 acqe_fip->index); 6714 else 6715 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6716 LOG_DISCOVERY, 6717 "2788 FCF param modified event, " 6718 "evt_tag:x%x, index:x%x\n", 6719 acqe_fip->event_tag, 6720 acqe_fip->index); 6721 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6722 /* 6723 * During period of FCF discovery, read the FCF 6724 * table record indexed by the event to update 6725 * FCF roundrobin failover eligible FCF bmask. 6726 */ 6727 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6728 LOG_DISCOVERY, 6729 "2779 Read FCF (x%x) for updating " 6730 "roundrobin FCF failover bmask\n", 6731 acqe_fip->index); 6732 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6733 } 6734 6735 /* If the FCF discovery is in progress, do nothing. */ 6736 spin_lock_irq(&phba->hbalock); 6737 if (phba->hba_flag & FCF_TS_INPROG) { 6738 spin_unlock_irq(&phba->hbalock); 6739 break; 6740 } 6741 /* If fast FCF failover rescan event is pending, do nothing */ 6742 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6743 spin_unlock_irq(&phba->hbalock); 6744 break; 6745 } 6746 6747 /* If the FCF has been in discovered state, do nothing. */ 6748 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6749 spin_unlock_irq(&phba->hbalock); 6750 break; 6751 } 6752 spin_unlock_irq(&phba->hbalock); 6753 6754 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6755 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6756 "2770 Start FCF table scan per async FCF " 6757 "event, evt_tag:x%x, index:x%x\n", 6758 acqe_fip->event_tag, acqe_fip->index); 6759 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6760 LPFC_FCOE_FCF_GET_FIRST); 6761 if (rc) 6762 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6763 "2547 Issue FCF scan read FCF mailbox " 6764 "command failed (x%x)\n", rc); 6765 break; 6766 6767 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6769 "2548 FCF Table full count 0x%x tag 0x%x\n", 6770 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6771 acqe_fip->event_tag); 6772 break; 6773 6774 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6775 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6777 "2549 FCF (x%x) disconnected from network, " 6778 "tag:x%x\n", acqe_fip->index, 6779 acqe_fip->event_tag); 6780 /* 6781 * If we are in the middle of FCF failover process, clear 6782 * the corresponding FCF bit in the roundrobin bitmap. 6783 */ 6784 spin_lock_irq(&phba->hbalock); 6785 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6786 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6787 spin_unlock_irq(&phba->hbalock); 6788 /* Update FLOGI FCF failover eligible FCF bmask */ 6789 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6790 break; 6791 } 6792 spin_unlock_irq(&phba->hbalock); 6793 6794 /* If the event is not for currently used fcf do nothing */ 6795 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6796 break; 6797 6798 /* 6799 * Otherwise, request the port to rediscover the entire FCF 6800 * table for a fast recovery from case that the current FCF 6801 * is no longer valid as we are not in the middle of FCF 6802 * failover process already. 6803 */ 6804 spin_lock_irq(&phba->hbalock); 6805 /* Mark the fast failover process in progress */ 6806 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6807 spin_unlock_irq(&phba->hbalock); 6808 6809 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6810 "2771 Start FCF fast failover process due to " 6811 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6812 "\n", acqe_fip->event_tag, acqe_fip->index); 6813 rc = lpfc_sli4_redisc_fcf_table(phba); 6814 if (rc) { 6815 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6816 LOG_TRACE_EVENT, 6817 "2772 Issue FCF rediscover mailbox " 6818 "command failed, fail through to FCF " 6819 "dead event\n"); 6820 spin_lock_irq(&phba->hbalock); 6821 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6822 spin_unlock_irq(&phba->hbalock); 6823 /* 6824 * Last resort will fail over by treating this 6825 * as a link down to FCF registration. 6826 */ 6827 lpfc_sli4_fcf_dead_failthrough(phba); 6828 } else { 6829 /* Reset FCF roundrobin bmask for new discovery */ 6830 lpfc_sli4_clear_fcf_rr_bmask(phba); 6831 /* 6832 * Handling fast FCF failover to a DEAD FCF event is 6833 * considered equalivant to receiving CVL to all vports. 6834 */ 6835 lpfc_sli4_perform_all_vport_cvl(phba); 6836 } 6837 break; 6838 case LPFC_FIP_EVENT_TYPE_CVL: 6839 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6840 lpfc_printf_log(phba, KERN_ERR, 6841 LOG_TRACE_EVENT, 6842 "2718 Clear Virtual Link Received for VPI 0x%x" 6843 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6844 6845 vport = lpfc_find_vport_by_vpid(phba, 6846 acqe_fip->index); 6847 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6848 if (!ndlp) 6849 break; 6850 active_vlink_present = 0; 6851 6852 vports = lpfc_create_vport_work_array(phba); 6853 if (vports) { 6854 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6855 i++) { 6856 if ((!(vports[i]->fc_flag & 6857 FC_VPORT_CVL_RCVD)) && 6858 (vports[i]->port_state > LPFC_FDISC)) { 6859 active_vlink_present = 1; 6860 break; 6861 } 6862 } 6863 lpfc_destroy_vport_work_array(phba, vports); 6864 } 6865 6866 /* 6867 * Don't re-instantiate if vport is marked for deletion. 6868 * If we are here first then vport_delete is going to wait 6869 * for discovery to complete. 6870 */ 6871 if (!(vport->load_flag & FC_UNLOADING) && 6872 active_vlink_present) { 6873 /* 6874 * If there are other active VLinks present, 6875 * re-instantiate the Vlink using FDISC. 6876 */ 6877 mod_timer(&ndlp->nlp_delayfunc, 6878 jiffies + msecs_to_jiffies(1000)); 6879 spin_lock_irq(&ndlp->lock); 6880 ndlp->nlp_flag |= NLP_DELAY_TMO; 6881 spin_unlock_irq(&ndlp->lock); 6882 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6883 vport->port_state = LPFC_FDISC; 6884 } else { 6885 /* 6886 * Otherwise, we request port to rediscover 6887 * the entire FCF table for a fast recovery 6888 * from possible case that the current FCF 6889 * is no longer valid if we are not already 6890 * in the FCF failover process. 6891 */ 6892 spin_lock_irq(&phba->hbalock); 6893 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6894 spin_unlock_irq(&phba->hbalock); 6895 break; 6896 } 6897 /* Mark the fast failover process in progress */ 6898 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6899 spin_unlock_irq(&phba->hbalock); 6900 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6901 LOG_DISCOVERY, 6902 "2773 Start FCF failover per CVL, " 6903 "evt_tag:x%x\n", acqe_fip->event_tag); 6904 rc = lpfc_sli4_redisc_fcf_table(phba); 6905 if (rc) { 6906 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6907 LOG_TRACE_EVENT, 6908 "2774 Issue FCF rediscover " 6909 "mailbox command failed, " 6910 "through to CVL event\n"); 6911 spin_lock_irq(&phba->hbalock); 6912 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6913 spin_unlock_irq(&phba->hbalock); 6914 /* 6915 * Last resort will be re-try on the 6916 * the current registered FCF entry. 6917 */ 6918 lpfc_retry_pport_discovery(phba); 6919 } else 6920 /* 6921 * Reset FCF roundrobin bmask for new 6922 * discovery. 6923 */ 6924 lpfc_sli4_clear_fcf_rr_bmask(phba); 6925 } 6926 break; 6927 default: 6928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6929 "0288 Unknown FCoE event type 0x%x event tag " 6930 "0x%x\n", event_type, acqe_fip->event_tag); 6931 break; 6932 } 6933 } 6934 6935 /** 6936 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6937 * @phba: pointer to lpfc hba data structure. 6938 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6939 * 6940 * This routine is to handle the SLI4 asynchronous dcbx event. 6941 **/ 6942 static void 6943 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6944 struct lpfc_acqe_dcbx *acqe_dcbx) 6945 { 6946 phba->fc_eventTag = acqe_dcbx->event_tag; 6947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6948 "0290 The SLI4 DCBX asynchronous event is not " 6949 "handled yet\n"); 6950 } 6951 6952 /** 6953 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6954 * @phba: pointer to lpfc hba data structure. 6955 * @acqe_grp5: pointer to the async grp5 completion queue entry. 6956 * 6957 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 6958 * is an asynchronous notified of a logical link speed change. The Port 6959 * reports the logical link speed in units of 10Mbps. 6960 **/ 6961 static void 6962 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 6963 struct lpfc_acqe_grp5 *acqe_grp5) 6964 { 6965 uint16_t prev_ll_spd; 6966 6967 phba->fc_eventTag = acqe_grp5->event_tag; 6968 phba->fcoe_eventtag = acqe_grp5->event_tag; 6969 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 6970 phba->sli4_hba.link_state.logical_speed = 6971 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 6972 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6973 "2789 GRP5 Async Event: Updating logical link speed " 6974 "from %dMbps to %dMbps\n", prev_ll_spd, 6975 phba->sli4_hba.link_state.logical_speed); 6976 } 6977 6978 /** 6979 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 6980 * @phba: pointer to lpfc hba data structure. 6981 * 6982 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 6983 * is an asynchronous notification of a request to reset CM stats. 6984 **/ 6985 static void 6986 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 6987 { 6988 if (!phba->cgn_i) 6989 return; 6990 lpfc_init_congestion_stat(phba); 6991 } 6992 6993 /** 6994 * lpfc_cgn_params_val - Validate FW congestion parameters. 6995 * @phba: pointer to lpfc hba data structure. 6996 * @p_cfg_param: pointer to FW provided congestion parameters. 6997 * 6998 * This routine validates the congestion parameters passed 6999 * by the FW to the driver via an ACQE event. 7000 **/ 7001 static void 7002 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 7003 { 7004 spin_lock_irq(&phba->hbalock); 7005 7006 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 7007 LPFC_CFG_MONITOR)) { 7008 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 7009 "6225 CMF mode param out of range: %d\n", 7010 p_cfg_param->cgn_param_mode); 7011 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 7012 } 7013 7014 spin_unlock_irq(&phba->hbalock); 7015 } 7016 7017 /** 7018 * lpfc_cgn_params_parse - Process a FW cong parm change event 7019 * @phba: pointer to lpfc hba data structure. 7020 * @p_cgn_param: pointer to a data buffer with the FW cong params. 7021 * @len: the size of pdata in bytes. 7022 * 7023 * This routine validates the congestion management buffer signature 7024 * from the FW, validates the contents and makes corrections for 7025 * valid, in-range values. If the signature magic is correct and 7026 * after parameter validation, the contents are copied to the driver's 7027 * @phba structure. If the magic is incorrect, an error message is 7028 * logged. 7029 **/ 7030 static void 7031 lpfc_cgn_params_parse(struct lpfc_hba *phba, 7032 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 7033 { 7034 struct lpfc_cgn_info *cp; 7035 uint32_t crc, oldmode; 7036 7037 /* Make sure the FW has encoded the correct magic number to 7038 * validate the congestion parameter in FW memory. 7039 */ 7040 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 7041 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7042 "4668 FW cgn parm buffer data: " 7043 "magic 0x%x version %d mode %d " 7044 "level0 %d level1 %d " 7045 "level2 %d byte13 %d " 7046 "byte14 %d byte15 %d " 7047 "byte11 %d byte12 %d activeMode %d\n", 7048 p_cgn_param->cgn_param_magic, 7049 p_cgn_param->cgn_param_version, 7050 p_cgn_param->cgn_param_mode, 7051 p_cgn_param->cgn_param_level0, 7052 p_cgn_param->cgn_param_level1, 7053 p_cgn_param->cgn_param_level2, 7054 p_cgn_param->byte13, 7055 p_cgn_param->byte14, 7056 p_cgn_param->byte15, 7057 p_cgn_param->byte11, 7058 p_cgn_param->byte12, 7059 phba->cmf_active_mode); 7060 7061 oldmode = phba->cmf_active_mode; 7062 7063 /* Any parameters out of range are corrected to defaults 7064 * by this routine. No need to fail. 7065 */ 7066 lpfc_cgn_params_val(phba, p_cgn_param); 7067 7068 /* Parameters are verified, move them into driver storage */ 7069 spin_lock_irq(&phba->hbalock); 7070 memcpy(&phba->cgn_p, p_cgn_param, 7071 sizeof(struct lpfc_cgn_param)); 7072 7073 /* Update parameters in congestion info buffer now */ 7074 if (phba->cgn_i) { 7075 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7076 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7077 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7078 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7079 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7080 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7081 LPFC_CGN_CRC32_SEED); 7082 cp->cgn_info_crc = cpu_to_le32(crc); 7083 } 7084 spin_unlock_irq(&phba->hbalock); 7085 7086 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7087 7088 switch (oldmode) { 7089 case LPFC_CFG_OFF: 7090 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7091 /* Turning CMF on */ 7092 lpfc_cmf_start(phba); 7093 7094 if (phba->link_state >= LPFC_LINK_UP) { 7095 phba->cgn_reg_fpin = 7096 phba->cgn_init_reg_fpin; 7097 phba->cgn_reg_signal = 7098 phba->cgn_init_reg_signal; 7099 lpfc_issue_els_edc(phba->pport, 0); 7100 } 7101 } 7102 break; 7103 case LPFC_CFG_MANAGED: 7104 switch (phba->cgn_p.cgn_param_mode) { 7105 case LPFC_CFG_OFF: 7106 /* Turning CMF off */ 7107 lpfc_cmf_stop(phba); 7108 if (phba->link_state >= LPFC_LINK_UP) 7109 lpfc_issue_els_edc(phba->pport, 0); 7110 break; 7111 case LPFC_CFG_MONITOR: 7112 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7113 "4661 Switch from MANAGED to " 7114 "`MONITOR mode\n"); 7115 phba->cmf_max_bytes_per_interval = 7116 phba->cmf_link_byte_count; 7117 7118 /* Resume blocked IO - unblock on workqueue */ 7119 queue_work(phba->wq, 7120 &phba->unblock_request_work); 7121 break; 7122 } 7123 break; 7124 case LPFC_CFG_MONITOR: 7125 switch (phba->cgn_p.cgn_param_mode) { 7126 case LPFC_CFG_OFF: 7127 /* Turning CMF off */ 7128 lpfc_cmf_stop(phba); 7129 if (phba->link_state >= LPFC_LINK_UP) 7130 lpfc_issue_els_edc(phba->pport, 0); 7131 break; 7132 case LPFC_CFG_MANAGED: 7133 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7134 "4662 Switch from MONITOR to " 7135 "MANAGED mode\n"); 7136 lpfc_cmf_signal_init(phba); 7137 break; 7138 } 7139 break; 7140 } 7141 } else { 7142 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7143 "4669 FW cgn parm buf wrong magic 0x%x " 7144 "version %d\n", p_cgn_param->cgn_param_magic, 7145 p_cgn_param->cgn_param_version); 7146 } 7147 } 7148 7149 /** 7150 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7151 * @phba: pointer to lpfc hba data structure. 7152 * 7153 * This routine issues a read_object mailbox command to 7154 * get the congestion management parameters from the FW 7155 * parses it and updates the driver maintained values. 7156 * 7157 * Returns 7158 * 0 if the object was empty 7159 * -Eval if an error was encountered 7160 * Count if bytes were read from object 7161 **/ 7162 int 7163 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7164 { 7165 int ret = 0; 7166 struct lpfc_cgn_param *p_cgn_param = NULL; 7167 u32 *pdata = NULL; 7168 u32 len = 0; 7169 7170 /* Find out if the FW has a new set of congestion parameters. */ 7171 len = sizeof(struct lpfc_cgn_param); 7172 pdata = kzalloc(len, GFP_KERNEL); 7173 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7174 pdata, len); 7175 7176 /* 0 means no data. A negative means error. A positive means 7177 * bytes were copied. 7178 */ 7179 if (!ret) { 7180 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7181 "4670 CGN RD OBJ returns no data\n"); 7182 goto rd_obj_err; 7183 } else if (ret < 0) { 7184 /* Some error. Just exit and return it to the caller.*/ 7185 goto rd_obj_err; 7186 } 7187 7188 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7189 "6234 READ CGN PARAMS Successful %d\n", len); 7190 7191 /* Parse data pointer over len and update the phba congestion 7192 * parameters with values passed back. The receive rate values 7193 * may have been altered in FW, but take no action here. 7194 */ 7195 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7196 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7197 7198 rd_obj_err: 7199 kfree(pdata); 7200 return ret; 7201 } 7202 7203 /** 7204 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7205 * @phba: pointer to lpfc hba data structure. 7206 * 7207 * The FW generated Async ACQE SLI event calls this routine when 7208 * the event type is an SLI Internal Port Event and the Event Code 7209 * indicates a change to the FW maintained congestion parameters. 7210 * 7211 * This routine executes a Read_Object mailbox call to obtain the 7212 * current congestion parameters maintained in FW and corrects 7213 * the driver's active congestion parameters. 7214 * 7215 * The acqe event is not passed because there is no further data 7216 * required. 7217 * 7218 * Returns nonzero error if event processing encountered an error. 7219 * Zero otherwise for success. 7220 **/ 7221 static int 7222 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7223 { 7224 int ret = 0; 7225 7226 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7227 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7228 "4664 Cgn Evt when E2E off. Drop event\n"); 7229 return -EACCES; 7230 } 7231 7232 /* If the event is claiming an empty object, it's ok. A write 7233 * could have cleared it. Only error is a negative return 7234 * status. 7235 */ 7236 ret = lpfc_sli4_cgn_params_read(phba); 7237 if (ret < 0) { 7238 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7239 "4667 Error reading Cgn Params (%d)\n", 7240 ret); 7241 } else if (!ret) { 7242 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7243 "4673 CGN Event empty object.\n"); 7244 } 7245 return ret; 7246 } 7247 7248 /** 7249 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7250 * @phba: pointer to lpfc hba data structure. 7251 * 7252 * This routine is invoked by the worker thread to process all the pending 7253 * SLI4 asynchronous events. 7254 **/ 7255 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7256 { 7257 struct lpfc_cq_event *cq_event; 7258 unsigned long iflags; 7259 7260 /* First, declare the async event has been handled */ 7261 spin_lock_irqsave(&phba->hbalock, iflags); 7262 phba->hba_flag &= ~ASYNC_EVENT; 7263 spin_unlock_irqrestore(&phba->hbalock, iflags); 7264 7265 /* Now, handle all the async events */ 7266 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7267 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7268 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7269 cq_event, struct lpfc_cq_event, list); 7270 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7271 iflags); 7272 7273 /* Process the asynchronous event */ 7274 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7275 case LPFC_TRAILER_CODE_LINK: 7276 lpfc_sli4_async_link_evt(phba, 7277 &cq_event->cqe.acqe_link); 7278 break; 7279 case LPFC_TRAILER_CODE_FCOE: 7280 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7281 break; 7282 case LPFC_TRAILER_CODE_DCBX: 7283 lpfc_sli4_async_dcbx_evt(phba, 7284 &cq_event->cqe.acqe_dcbx); 7285 break; 7286 case LPFC_TRAILER_CODE_GRP5: 7287 lpfc_sli4_async_grp5_evt(phba, 7288 &cq_event->cqe.acqe_grp5); 7289 break; 7290 case LPFC_TRAILER_CODE_FC: 7291 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7292 break; 7293 case LPFC_TRAILER_CODE_SLI: 7294 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7295 break; 7296 case LPFC_TRAILER_CODE_CMSTAT: 7297 lpfc_sli4_async_cmstat_evt(phba); 7298 break; 7299 default: 7300 lpfc_printf_log(phba, KERN_ERR, 7301 LOG_TRACE_EVENT, 7302 "1804 Invalid asynchronous event code: " 7303 "x%x\n", bf_get(lpfc_trailer_code, 7304 &cq_event->cqe.mcqe_cmpl)); 7305 break; 7306 } 7307 7308 /* Free the completion event processed to the free pool */ 7309 lpfc_sli4_cq_event_release(phba, cq_event); 7310 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7311 } 7312 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7313 } 7314 7315 /** 7316 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7317 * @phba: pointer to lpfc hba data structure. 7318 * 7319 * This routine is invoked by the worker thread to process FCF table 7320 * rediscovery pending completion event. 7321 **/ 7322 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7323 { 7324 int rc; 7325 7326 spin_lock_irq(&phba->hbalock); 7327 /* Clear FCF rediscovery timeout event */ 7328 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7329 /* Clear driver fast failover FCF record flag */ 7330 phba->fcf.failover_rec.flag = 0; 7331 /* Set state for FCF fast failover */ 7332 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7333 spin_unlock_irq(&phba->hbalock); 7334 7335 /* Scan FCF table from the first entry to re-discover SAN */ 7336 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7337 "2777 Start post-quiescent FCF table scan\n"); 7338 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7339 if (rc) 7340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7341 "2747 Issue FCF scan read FCF mailbox " 7342 "command failed 0x%x\n", rc); 7343 } 7344 7345 /** 7346 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7347 * @phba: pointer to lpfc hba data structure. 7348 * @dev_grp: The HBA PCI-Device group number. 7349 * 7350 * This routine is invoked to set up the per HBA PCI-Device group function 7351 * API jump table entries. 7352 * 7353 * Return: 0 if success, otherwise -ENODEV 7354 **/ 7355 int 7356 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7357 { 7358 int rc; 7359 7360 /* Set up lpfc PCI-device group */ 7361 phba->pci_dev_grp = dev_grp; 7362 7363 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7364 if (dev_grp == LPFC_PCI_DEV_OC) 7365 phba->sli_rev = LPFC_SLI_REV4; 7366 7367 /* Set up device INIT API function jump table */ 7368 rc = lpfc_init_api_table_setup(phba, dev_grp); 7369 if (rc) 7370 return -ENODEV; 7371 /* Set up SCSI API function jump table */ 7372 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7373 if (rc) 7374 return -ENODEV; 7375 /* Set up SLI API function jump table */ 7376 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7377 if (rc) 7378 return -ENODEV; 7379 /* Set up MBOX API function jump table */ 7380 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7381 if (rc) 7382 return -ENODEV; 7383 7384 return 0; 7385 } 7386 7387 /** 7388 * lpfc_log_intr_mode - Log the active interrupt mode 7389 * @phba: pointer to lpfc hba data structure. 7390 * @intr_mode: active interrupt mode adopted. 7391 * 7392 * This routine it invoked to log the currently used active interrupt mode 7393 * to the device. 7394 **/ 7395 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7396 { 7397 switch (intr_mode) { 7398 case 0: 7399 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7400 "0470 Enable INTx interrupt mode.\n"); 7401 break; 7402 case 1: 7403 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7404 "0481 Enabled MSI interrupt mode.\n"); 7405 break; 7406 case 2: 7407 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7408 "0480 Enabled MSI-X interrupt mode.\n"); 7409 break; 7410 default: 7411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7412 "0482 Illegal interrupt mode.\n"); 7413 break; 7414 } 7415 return; 7416 } 7417 7418 /** 7419 * lpfc_enable_pci_dev - Enable a generic PCI device. 7420 * @phba: pointer to lpfc hba data structure. 7421 * 7422 * This routine is invoked to enable the PCI device that is common to all 7423 * PCI devices. 7424 * 7425 * Return codes 7426 * 0 - successful 7427 * other values - error 7428 **/ 7429 static int 7430 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7431 { 7432 struct pci_dev *pdev; 7433 7434 /* Obtain PCI device reference */ 7435 if (!phba->pcidev) 7436 goto out_error; 7437 else 7438 pdev = phba->pcidev; 7439 /* Enable PCI device */ 7440 if (pci_enable_device_mem(pdev)) 7441 goto out_error; 7442 /* Request PCI resource for the device */ 7443 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7444 goto out_disable_device; 7445 /* Set up device as PCI master and save state for EEH */ 7446 pci_set_master(pdev); 7447 pci_try_set_mwi(pdev); 7448 pci_save_state(pdev); 7449 7450 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7451 if (pci_is_pcie(pdev)) 7452 pdev->needs_freset = 1; 7453 7454 return 0; 7455 7456 out_disable_device: 7457 pci_disable_device(pdev); 7458 out_error: 7459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7460 "1401 Failed to enable pci device\n"); 7461 return -ENODEV; 7462 } 7463 7464 /** 7465 * lpfc_disable_pci_dev - Disable a generic PCI device. 7466 * @phba: pointer to lpfc hba data structure. 7467 * 7468 * This routine is invoked to disable the PCI device that is common to all 7469 * PCI devices. 7470 **/ 7471 static void 7472 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7473 { 7474 struct pci_dev *pdev; 7475 7476 /* Obtain PCI device reference */ 7477 if (!phba->pcidev) 7478 return; 7479 else 7480 pdev = phba->pcidev; 7481 /* Release PCI resource and disable PCI device */ 7482 pci_release_mem_regions(pdev); 7483 pci_disable_device(pdev); 7484 7485 return; 7486 } 7487 7488 /** 7489 * lpfc_reset_hba - Reset a hba 7490 * @phba: pointer to lpfc hba data structure. 7491 * 7492 * This routine is invoked to reset a hba device. It brings the HBA 7493 * offline, performs a board restart, and then brings the board back 7494 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7495 * on outstanding mailbox commands. 7496 **/ 7497 void 7498 lpfc_reset_hba(struct lpfc_hba *phba) 7499 { 7500 /* If resets are disabled then set error state and return. */ 7501 if (!phba->cfg_enable_hba_reset) { 7502 phba->link_state = LPFC_HBA_ERROR; 7503 return; 7504 } 7505 7506 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7507 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7508 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7509 } else { 7510 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7511 lpfc_sli_flush_io_rings(phba); 7512 } 7513 lpfc_offline(phba); 7514 lpfc_sli_brdrestart(phba); 7515 lpfc_online(phba); 7516 lpfc_unblock_mgmt_io(phba); 7517 } 7518 7519 /** 7520 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7521 * @phba: pointer to lpfc hba data structure. 7522 * 7523 * This function enables the PCI SR-IOV virtual functions to a physical 7524 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7525 * enable the number of virtual functions to the physical function. As 7526 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7527 * API call does not considered as an error condition for most of the device. 7528 **/ 7529 uint16_t 7530 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7531 { 7532 struct pci_dev *pdev = phba->pcidev; 7533 uint16_t nr_virtfn; 7534 int pos; 7535 7536 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7537 if (pos == 0) 7538 return 0; 7539 7540 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7541 return nr_virtfn; 7542 } 7543 7544 /** 7545 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7546 * @phba: pointer to lpfc hba data structure. 7547 * @nr_vfn: number of virtual functions to be enabled. 7548 * 7549 * This function enables the PCI SR-IOV virtual functions to a physical 7550 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7551 * enable the number of virtual functions to the physical function. As 7552 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7553 * API call does not considered as an error condition for most of the device. 7554 **/ 7555 int 7556 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7557 { 7558 struct pci_dev *pdev = phba->pcidev; 7559 uint16_t max_nr_vfn; 7560 int rc; 7561 7562 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7563 if (nr_vfn > max_nr_vfn) { 7564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7565 "3057 Requested vfs (%d) greater than " 7566 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7567 return -EINVAL; 7568 } 7569 7570 rc = pci_enable_sriov(pdev, nr_vfn); 7571 if (rc) { 7572 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7573 "2806 Failed to enable sriov on this device " 7574 "with vfn number nr_vf:%d, rc:%d\n", 7575 nr_vfn, rc); 7576 } else 7577 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7578 "2807 Successful enable sriov on this device " 7579 "with vfn number nr_vf:%d\n", nr_vfn); 7580 return rc; 7581 } 7582 7583 static void 7584 lpfc_unblock_requests_work(struct work_struct *work) 7585 { 7586 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7587 unblock_request_work); 7588 7589 lpfc_unblock_requests(phba); 7590 } 7591 7592 /** 7593 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7594 * @phba: pointer to lpfc hba data structure. 7595 * 7596 * This routine is invoked to set up the driver internal resources before the 7597 * device specific resource setup to support the HBA device it attached to. 7598 * 7599 * Return codes 7600 * 0 - successful 7601 * other values - error 7602 **/ 7603 static int 7604 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7605 { 7606 struct lpfc_sli *psli = &phba->sli; 7607 7608 /* 7609 * Driver resources common to all SLI revisions 7610 */ 7611 atomic_set(&phba->fast_event_count, 0); 7612 atomic_set(&phba->dbg_log_idx, 0); 7613 atomic_set(&phba->dbg_log_cnt, 0); 7614 atomic_set(&phba->dbg_log_dmping, 0); 7615 spin_lock_init(&phba->hbalock); 7616 7617 /* Initialize port_list spinlock */ 7618 spin_lock_init(&phba->port_list_lock); 7619 INIT_LIST_HEAD(&phba->port_list); 7620 7621 INIT_LIST_HEAD(&phba->work_list); 7622 init_waitqueue_head(&phba->wait_4_mlo_m_q); 7623 7624 /* Initialize the wait queue head for the kernel thread */ 7625 init_waitqueue_head(&phba->work_waitq); 7626 7627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7628 "1403 Protocols supported %s %s %s\n", 7629 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7630 "SCSI" : " "), 7631 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7632 "NVME" : " "), 7633 (phba->nvmet_support ? "NVMET" : " ")); 7634 7635 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7636 spin_lock_init(&phba->scsi_buf_list_get_lock); 7637 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7638 spin_lock_init(&phba->scsi_buf_list_put_lock); 7639 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7640 7641 /* Initialize the fabric iocb list */ 7642 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7643 7644 /* Initialize list to save ELS buffers */ 7645 INIT_LIST_HEAD(&phba->elsbuf); 7646 7647 /* Initialize FCF connection rec list */ 7648 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7649 7650 /* Initialize OAS configuration list */ 7651 spin_lock_init(&phba->devicelock); 7652 INIT_LIST_HEAD(&phba->luns); 7653 7654 /* MBOX heartbeat timer */ 7655 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7656 /* Fabric block timer */ 7657 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7658 /* EA polling mode timer */ 7659 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7660 /* Heartbeat timer */ 7661 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7662 7663 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7664 7665 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7666 lpfc_idle_stat_delay_work); 7667 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7668 return 0; 7669 } 7670 7671 /** 7672 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7673 * @phba: pointer to lpfc hba data structure. 7674 * 7675 * This routine is invoked to set up the driver internal resources specific to 7676 * support the SLI-3 HBA device it attached to. 7677 * 7678 * Return codes 7679 * 0 - successful 7680 * other values - error 7681 **/ 7682 static int 7683 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7684 { 7685 int rc, entry_sz; 7686 7687 /* 7688 * Initialize timers used by driver 7689 */ 7690 7691 /* FCP polling mode timer */ 7692 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7693 7694 /* Host attention work mask setup */ 7695 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7696 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7697 7698 /* Get all the module params for configuring this host */ 7699 lpfc_get_cfgparam(phba); 7700 /* Set up phase-1 common device driver resources */ 7701 7702 rc = lpfc_setup_driver_resource_phase1(phba); 7703 if (rc) 7704 return -ENODEV; 7705 7706 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 7707 phba->menlo_flag |= HBA_MENLO_SUPPORT; 7708 /* check for menlo minimum sg count */ 7709 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 7710 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 7711 } 7712 7713 if (!phba->sli.sli3_ring) 7714 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7715 sizeof(struct lpfc_sli_ring), 7716 GFP_KERNEL); 7717 if (!phba->sli.sli3_ring) 7718 return -ENOMEM; 7719 7720 /* 7721 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7722 * used to create the sg_dma_buf_pool must be dynamically calculated. 7723 */ 7724 7725 if (phba->sli_rev == LPFC_SLI_REV4) 7726 entry_sz = sizeof(struct sli4_sge); 7727 else 7728 entry_sz = sizeof(struct ulp_bde64); 7729 7730 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7731 if (phba->cfg_enable_bg) { 7732 /* 7733 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7734 * the FCP rsp, and a BDE for each. Sice we have no control 7735 * over how many protection data segments the SCSI Layer 7736 * will hand us (ie: there could be one for every block 7737 * in the IO), we just allocate enough BDEs to accomidate 7738 * our max amount and we need to limit lpfc_sg_seg_cnt to 7739 * minimize the risk of running out. 7740 */ 7741 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7742 sizeof(struct fcp_rsp) + 7743 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7744 7745 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7746 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7747 7748 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7749 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7750 } else { 7751 /* 7752 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7753 * the FCP rsp, a BDE for each, and a BDE for up to 7754 * cfg_sg_seg_cnt data segments. 7755 */ 7756 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7757 sizeof(struct fcp_rsp) + 7758 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7759 7760 /* Total BDEs in BPL for scsi_sg_list */ 7761 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7762 } 7763 7764 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7765 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7766 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7767 phba->cfg_total_seg_cnt); 7768 7769 phba->max_vpi = LPFC_MAX_VPI; 7770 /* This will be set to correct value after config_port mbox */ 7771 phba->max_vports = 0; 7772 7773 /* 7774 * Initialize the SLI Layer to run with lpfc HBAs. 7775 */ 7776 lpfc_sli_setup(phba); 7777 lpfc_sli_queue_init(phba); 7778 7779 /* Allocate device driver memory */ 7780 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7781 return -ENOMEM; 7782 7783 phba->lpfc_sg_dma_buf_pool = 7784 dma_pool_create("lpfc_sg_dma_buf_pool", 7785 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7786 BPL_ALIGN_SZ, 0); 7787 7788 if (!phba->lpfc_sg_dma_buf_pool) 7789 goto fail_free_mem; 7790 7791 phba->lpfc_cmd_rsp_buf_pool = 7792 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7793 &phba->pcidev->dev, 7794 sizeof(struct fcp_cmnd) + 7795 sizeof(struct fcp_rsp), 7796 BPL_ALIGN_SZ, 0); 7797 7798 if (!phba->lpfc_cmd_rsp_buf_pool) 7799 goto fail_free_dma_buf_pool; 7800 7801 /* 7802 * Enable sr-iov virtual functions if supported and configured 7803 * through the module parameter. 7804 */ 7805 if (phba->cfg_sriov_nr_virtfn > 0) { 7806 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7807 phba->cfg_sriov_nr_virtfn); 7808 if (rc) { 7809 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7810 "2808 Requested number of SR-IOV " 7811 "virtual functions (%d) is not " 7812 "supported\n", 7813 phba->cfg_sriov_nr_virtfn); 7814 phba->cfg_sriov_nr_virtfn = 0; 7815 } 7816 } 7817 7818 return 0; 7819 7820 fail_free_dma_buf_pool: 7821 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7822 phba->lpfc_sg_dma_buf_pool = NULL; 7823 fail_free_mem: 7824 lpfc_mem_free(phba); 7825 return -ENOMEM; 7826 } 7827 7828 /** 7829 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7830 * @phba: pointer to lpfc hba data structure. 7831 * 7832 * This routine is invoked to unset the driver internal resources set up 7833 * specific for supporting the SLI-3 HBA device it attached to. 7834 **/ 7835 static void 7836 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7837 { 7838 /* Free device driver memory allocated */ 7839 lpfc_mem_free_all(phba); 7840 7841 return; 7842 } 7843 7844 /** 7845 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7846 * @phba: pointer to lpfc hba data structure. 7847 * 7848 * This routine is invoked to set up the driver internal resources specific to 7849 * support the SLI-4 HBA device it attached to. 7850 * 7851 * Return codes 7852 * 0 - successful 7853 * other values - error 7854 **/ 7855 static int 7856 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7857 { 7858 LPFC_MBOXQ_t *mboxq; 7859 MAILBOX_t *mb; 7860 int rc, i, max_buf_size; 7861 int longs; 7862 int extra; 7863 uint64_t wwn; 7864 u32 if_type; 7865 u32 if_fam; 7866 7867 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7868 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7869 phba->sli4_hba.curr_disp_cpu = 0; 7870 7871 /* Get all the module params for configuring this host */ 7872 lpfc_get_cfgparam(phba); 7873 7874 /* Set up phase-1 common device driver resources */ 7875 rc = lpfc_setup_driver_resource_phase1(phba); 7876 if (rc) 7877 return -ENODEV; 7878 7879 /* Before proceed, wait for POST done and device ready */ 7880 rc = lpfc_sli4_post_status_check(phba); 7881 if (rc) 7882 return -ENODEV; 7883 7884 /* Allocate all driver workqueues here */ 7885 7886 /* The lpfc_wq workqueue for deferred irq use */ 7887 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7888 7889 /* 7890 * Initialize timers used by driver 7891 */ 7892 7893 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7894 7895 /* FCF rediscover timer */ 7896 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7897 7898 /* CMF congestion timer */ 7899 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7900 phba->cmf_timer.function = lpfc_cmf_timer; 7901 7902 /* 7903 * Control structure for handling external multi-buffer mailbox 7904 * command pass-through. 7905 */ 7906 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7907 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7908 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7909 7910 phba->max_vpi = LPFC_MAX_VPI; 7911 7912 /* This will be set to correct value after the read_config mbox */ 7913 phba->max_vports = 0; 7914 7915 /* Program the default value of vlan_id and fc_map */ 7916 phba->valid_vlan = 0; 7917 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7918 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7919 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7920 7921 /* 7922 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7923 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7924 * The WQ create will allocate the ring. 7925 */ 7926 7927 /* Initialize buffer queue management fields */ 7928 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 7929 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 7930 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 7931 7932 /* for VMID idle timeout if VMID is enabled */ 7933 if (lpfc_is_vmid_enabled(phba)) 7934 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 7935 7936 /* 7937 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 7938 */ 7939 /* Initialize the Abort buffer list used by driver */ 7940 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 7941 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 7942 7943 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7944 /* Initialize the Abort nvme buffer list used by driver */ 7945 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 7946 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7947 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 7948 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 7949 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 7950 } 7951 7952 /* This abort list used by worker thread */ 7953 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 7954 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 7955 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 7956 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 7957 7958 /* 7959 * Initialize driver internal slow-path work queues 7960 */ 7961 7962 /* Driver internel slow-path CQ Event pool */ 7963 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 7964 /* Response IOCB work queue list */ 7965 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 7966 /* Asynchronous event CQ Event work queue list */ 7967 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 7968 /* Slow-path XRI aborted CQ Event work queue list */ 7969 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 7970 /* Receive queue CQ Event work queue list */ 7971 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 7972 7973 /* Initialize extent block lists. */ 7974 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 7975 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 7976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 7977 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 7978 7979 /* Initialize mboxq lists. If the early init routines fail 7980 * these lists need to be correctly initialized. 7981 */ 7982 INIT_LIST_HEAD(&phba->sli.mboxq); 7983 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 7984 7985 /* initialize optic_state to 0xFF */ 7986 phba->sli4_hba.lnk_info.optic_state = 0xff; 7987 7988 /* Allocate device driver memory */ 7989 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 7990 if (rc) 7991 return -ENOMEM; 7992 7993 /* IF Type 2 ports get initialized now. */ 7994 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 7995 LPFC_SLI_INTF_IF_TYPE_2) { 7996 rc = lpfc_pci_function_reset(phba); 7997 if (unlikely(rc)) { 7998 rc = -ENODEV; 7999 goto out_free_mem; 8000 } 8001 phba->temp_sensor_support = 1; 8002 } 8003 8004 /* Create the bootstrap mailbox command */ 8005 rc = lpfc_create_bootstrap_mbox(phba); 8006 if (unlikely(rc)) 8007 goto out_free_mem; 8008 8009 /* Set up the host's endian order with the device. */ 8010 rc = lpfc_setup_endian_order(phba); 8011 if (unlikely(rc)) 8012 goto out_free_bsmbx; 8013 8014 /* Set up the hba's configuration parameters. */ 8015 rc = lpfc_sli4_read_config(phba); 8016 if (unlikely(rc)) 8017 goto out_free_bsmbx; 8018 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 8019 if (unlikely(rc)) 8020 goto out_free_bsmbx; 8021 8022 /* IF Type 0 ports get initialized now. */ 8023 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8024 LPFC_SLI_INTF_IF_TYPE_0) { 8025 rc = lpfc_pci_function_reset(phba); 8026 if (unlikely(rc)) 8027 goto out_free_bsmbx; 8028 } 8029 8030 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8031 GFP_KERNEL); 8032 if (!mboxq) { 8033 rc = -ENOMEM; 8034 goto out_free_bsmbx; 8035 } 8036 8037 /* Check for NVMET being configured */ 8038 phba->nvmet_support = 0; 8039 if (lpfc_enable_nvmet_cnt) { 8040 8041 /* First get WWN of HBA instance */ 8042 lpfc_read_nv(phba, mboxq); 8043 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8044 if (rc != MBX_SUCCESS) { 8045 lpfc_printf_log(phba, KERN_ERR, 8046 LOG_TRACE_EVENT, 8047 "6016 Mailbox failed , mbxCmd x%x " 8048 "READ_NV, mbxStatus x%x\n", 8049 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8050 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 8051 mempool_free(mboxq, phba->mbox_mem_pool); 8052 rc = -EIO; 8053 goto out_free_bsmbx; 8054 } 8055 mb = &mboxq->u.mb; 8056 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 8057 sizeof(uint64_t)); 8058 wwn = cpu_to_be64(wwn); 8059 phba->sli4_hba.wwnn.u.name = wwn; 8060 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 8061 sizeof(uint64_t)); 8062 /* wwn is WWPN of HBA instance */ 8063 wwn = cpu_to_be64(wwn); 8064 phba->sli4_hba.wwpn.u.name = wwn; 8065 8066 /* Check to see if it matches any module parameter */ 8067 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 8068 if (wwn == lpfc_enable_nvmet[i]) { 8069 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 8070 if (lpfc_nvmet_mem_alloc(phba)) 8071 break; 8072 8073 phba->nvmet_support = 1; /* a match */ 8074 8075 lpfc_printf_log(phba, KERN_ERR, 8076 LOG_TRACE_EVENT, 8077 "6017 NVME Target %016llx\n", 8078 wwn); 8079 #else 8080 lpfc_printf_log(phba, KERN_ERR, 8081 LOG_TRACE_EVENT, 8082 "6021 Can't enable NVME Target." 8083 " NVME_TARGET_FC infrastructure" 8084 " is not in kernel\n"); 8085 #endif 8086 /* Not supported for NVMET */ 8087 phba->cfg_xri_rebalancing = 0; 8088 if (phba->irq_chann_mode == NHT_MODE) { 8089 phba->cfg_irq_chann = 8090 phba->sli4_hba.num_present_cpu; 8091 phba->cfg_hdw_queue = 8092 phba->sli4_hba.num_present_cpu; 8093 phba->irq_chann_mode = NORMAL_MODE; 8094 } 8095 break; 8096 } 8097 } 8098 } 8099 8100 lpfc_nvme_mod_param_dep(phba); 8101 8102 /* 8103 * Get sli4 parameters that override parameters from Port capabilities. 8104 * If this call fails, it isn't critical unless the SLI4 parameters come 8105 * back in conflict. 8106 */ 8107 rc = lpfc_get_sli4_parameters(phba, mboxq); 8108 if (rc) { 8109 if_type = bf_get(lpfc_sli_intf_if_type, 8110 &phba->sli4_hba.sli_intf); 8111 if_fam = bf_get(lpfc_sli_intf_sli_family, 8112 &phba->sli4_hba.sli_intf); 8113 if (phba->sli4_hba.extents_in_use && 8114 phba->sli4_hba.rpi_hdrs_in_use) { 8115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8116 "2999 Unsupported SLI4 Parameters " 8117 "Extents and RPI headers enabled.\n"); 8118 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8119 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8120 mempool_free(mboxq, phba->mbox_mem_pool); 8121 rc = -EIO; 8122 goto out_free_bsmbx; 8123 } 8124 } 8125 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8126 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8127 mempool_free(mboxq, phba->mbox_mem_pool); 8128 rc = -EIO; 8129 goto out_free_bsmbx; 8130 } 8131 } 8132 8133 /* 8134 * 1 for cmd, 1 for rsp, NVME adds an extra one 8135 * for boundary conditions in its max_sgl_segment template. 8136 */ 8137 extra = 2; 8138 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8139 extra++; 8140 8141 /* 8142 * It doesn't matter what family our adapter is in, we are 8143 * limited to 2 Pages, 512 SGEs, for our SGL. 8144 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8145 */ 8146 max_buf_size = (2 * SLI4_PAGE_SIZE); 8147 8148 /* 8149 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8150 * used to create the sg_dma_buf_pool must be calculated. 8151 */ 8152 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8153 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8154 8155 /* 8156 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8157 * the FCP rsp, and a SGE. Sice we have no control 8158 * over how many protection segments the SCSI Layer 8159 * will hand us (ie: there could be one for every block 8160 * in the IO), just allocate enough SGEs to accomidate 8161 * our max amount and we need to limit lpfc_sg_seg_cnt 8162 * to minimize the risk of running out. 8163 */ 8164 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8165 sizeof(struct fcp_rsp) + max_buf_size; 8166 8167 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8168 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8169 8170 /* 8171 * If supporting DIF, reduce the seg count for scsi to 8172 * allow room for the DIF sges. 8173 */ 8174 if (phba->cfg_enable_bg && 8175 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8176 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8177 else 8178 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8179 8180 } else { 8181 /* 8182 * The scsi_buf for a regular I/O holds the FCP cmnd, 8183 * the FCP rsp, a SGE for each, and a SGE for up to 8184 * cfg_sg_seg_cnt data segments. 8185 */ 8186 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8187 sizeof(struct fcp_rsp) + 8188 ((phba->cfg_sg_seg_cnt + extra) * 8189 sizeof(struct sli4_sge)); 8190 8191 /* Total SGEs for scsi_sg_list */ 8192 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8193 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8194 8195 /* 8196 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8197 * need to post 1 page for the SGL. 8198 */ 8199 } 8200 8201 if (phba->cfg_xpsgl && !phba->nvmet_support) 8202 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8203 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8204 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8205 else 8206 phba->cfg_sg_dma_buf_size = 8207 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8208 8209 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8210 sizeof(struct sli4_sge); 8211 8212 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8213 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8214 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8215 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8216 "6300 Reducing NVME sg segment " 8217 "cnt to %d\n", 8218 LPFC_MAX_NVME_SEG_CNT); 8219 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8220 } else 8221 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8222 } 8223 8224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8225 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8226 "total:%d scsi:%d nvme:%d\n", 8227 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8228 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8229 phba->cfg_nvme_seg_cnt); 8230 8231 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8232 i = phba->cfg_sg_dma_buf_size; 8233 else 8234 i = SLI4_PAGE_SIZE; 8235 8236 phba->lpfc_sg_dma_buf_pool = 8237 dma_pool_create("lpfc_sg_dma_buf_pool", 8238 &phba->pcidev->dev, 8239 phba->cfg_sg_dma_buf_size, 8240 i, 0); 8241 if (!phba->lpfc_sg_dma_buf_pool) 8242 goto out_free_bsmbx; 8243 8244 phba->lpfc_cmd_rsp_buf_pool = 8245 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8246 &phba->pcidev->dev, 8247 sizeof(struct fcp_cmnd) + 8248 sizeof(struct fcp_rsp), 8249 i, 0); 8250 if (!phba->lpfc_cmd_rsp_buf_pool) 8251 goto out_free_sg_dma_buf; 8252 8253 mempool_free(mboxq, phba->mbox_mem_pool); 8254 8255 /* Verify OAS is supported */ 8256 lpfc_sli4_oas_verify(phba); 8257 8258 /* Verify RAS support on adapter */ 8259 lpfc_sli4_ras_init(phba); 8260 8261 /* Verify all the SLI4 queues */ 8262 rc = lpfc_sli4_queue_verify(phba); 8263 if (rc) 8264 goto out_free_cmd_rsp_buf; 8265 8266 /* Create driver internal CQE event pool */ 8267 rc = lpfc_sli4_cq_event_pool_create(phba); 8268 if (rc) 8269 goto out_free_cmd_rsp_buf; 8270 8271 /* Initialize sgl lists per host */ 8272 lpfc_init_sgl_list(phba); 8273 8274 /* Allocate and initialize active sgl array */ 8275 rc = lpfc_init_active_sgl_array(phba); 8276 if (rc) { 8277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8278 "1430 Failed to initialize sgl list.\n"); 8279 goto out_destroy_cq_event_pool; 8280 } 8281 rc = lpfc_sli4_init_rpi_hdrs(phba); 8282 if (rc) { 8283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8284 "1432 Failed to initialize rpi headers.\n"); 8285 goto out_free_active_sgl; 8286 } 8287 8288 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8289 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8290 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8291 GFP_KERNEL); 8292 if (!phba->fcf.fcf_rr_bmask) { 8293 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8294 "2759 Failed allocate memory for FCF round " 8295 "robin failover bmask\n"); 8296 rc = -ENOMEM; 8297 goto out_remove_rpi_hdrs; 8298 } 8299 8300 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8301 sizeof(struct lpfc_hba_eq_hdl), 8302 GFP_KERNEL); 8303 if (!phba->sli4_hba.hba_eq_hdl) { 8304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8305 "2572 Failed allocate memory for " 8306 "fast-path per-EQ handle array\n"); 8307 rc = -ENOMEM; 8308 goto out_free_fcf_rr_bmask; 8309 } 8310 8311 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8312 sizeof(struct lpfc_vector_map_info), 8313 GFP_KERNEL); 8314 if (!phba->sli4_hba.cpu_map) { 8315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8316 "3327 Failed allocate memory for msi-x " 8317 "interrupt vector mapping\n"); 8318 rc = -ENOMEM; 8319 goto out_free_hba_eq_hdl; 8320 } 8321 8322 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8323 if (!phba->sli4_hba.eq_info) { 8324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8325 "3321 Failed allocation for per_cpu stats\n"); 8326 rc = -ENOMEM; 8327 goto out_free_hba_cpu_map; 8328 } 8329 8330 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8331 sizeof(*phba->sli4_hba.idle_stat), 8332 GFP_KERNEL); 8333 if (!phba->sli4_hba.idle_stat) { 8334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8335 "3390 Failed allocation for idle_stat\n"); 8336 rc = -ENOMEM; 8337 goto out_free_hba_eq_info; 8338 } 8339 8340 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8341 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8342 if (!phba->sli4_hba.c_stat) { 8343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8344 "3332 Failed allocating per cpu hdwq stats\n"); 8345 rc = -ENOMEM; 8346 goto out_free_hba_idle_stat; 8347 } 8348 #endif 8349 8350 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8351 if (!phba->cmf_stat) { 8352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8353 "3331 Failed allocating per cpu cgn stats\n"); 8354 rc = -ENOMEM; 8355 goto out_free_hba_hdwq_info; 8356 } 8357 8358 /* 8359 * Enable sr-iov virtual functions if supported and configured 8360 * through the module parameter. 8361 */ 8362 if (phba->cfg_sriov_nr_virtfn > 0) { 8363 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8364 phba->cfg_sriov_nr_virtfn); 8365 if (rc) { 8366 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8367 "3020 Requested number of SR-IOV " 8368 "virtual functions (%d) is not " 8369 "supported\n", 8370 phba->cfg_sriov_nr_virtfn); 8371 phba->cfg_sriov_nr_virtfn = 0; 8372 } 8373 } 8374 8375 return 0; 8376 8377 out_free_hba_hdwq_info: 8378 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8379 free_percpu(phba->sli4_hba.c_stat); 8380 out_free_hba_idle_stat: 8381 #endif 8382 kfree(phba->sli4_hba.idle_stat); 8383 out_free_hba_eq_info: 8384 free_percpu(phba->sli4_hba.eq_info); 8385 out_free_hba_cpu_map: 8386 kfree(phba->sli4_hba.cpu_map); 8387 out_free_hba_eq_hdl: 8388 kfree(phba->sli4_hba.hba_eq_hdl); 8389 out_free_fcf_rr_bmask: 8390 kfree(phba->fcf.fcf_rr_bmask); 8391 out_remove_rpi_hdrs: 8392 lpfc_sli4_remove_rpi_hdrs(phba); 8393 out_free_active_sgl: 8394 lpfc_free_active_sgl(phba); 8395 out_destroy_cq_event_pool: 8396 lpfc_sli4_cq_event_pool_destroy(phba); 8397 out_free_cmd_rsp_buf: 8398 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8399 phba->lpfc_cmd_rsp_buf_pool = NULL; 8400 out_free_sg_dma_buf: 8401 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8402 phba->lpfc_sg_dma_buf_pool = NULL; 8403 out_free_bsmbx: 8404 lpfc_destroy_bootstrap_mbox(phba); 8405 out_free_mem: 8406 lpfc_mem_free(phba); 8407 return rc; 8408 } 8409 8410 /** 8411 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8412 * @phba: pointer to lpfc hba data structure. 8413 * 8414 * This routine is invoked to unset the driver internal resources set up 8415 * specific for supporting the SLI-4 HBA device it attached to. 8416 **/ 8417 static void 8418 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8419 { 8420 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8421 8422 free_percpu(phba->sli4_hba.eq_info); 8423 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8424 free_percpu(phba->sli4_hba.c_stat); 8425 #endif 8426 free_percpu(phba->cmf_stat); 8427 kfree(phba->sli4_hba.idle_stat); 8428 8429 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8430 kfree(phba->sli4_hba.cpu_map); 8431 phba->sli4_hba.num_possible_cpu = 0; 8432 phba->sli4_hba.num_present_cpu = 0; 8433 phba->sli4_hba.curr_disp_cpu = 0; 8434 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8435 8436 /* Free memory allocated for fast-path work queue handles */ 8437 kfree(phba->sli4_hba.hba_eq_hdl); 8438 8439 /* Free the allocated rpi headers. */ 8440 lpfc_sli4_remove_rpi_hdrs(phba); 8441 lpfc_sli4_remove_rpis(phba); 8442 8443 /* Free eligible FCF index bmask */ 8444 kfree(phba->fcf.fcf_rr_bmask); 8445 8446 /* Free the ELS sgl list */ 8447 lpfc_free_active_sgl(phba); 8448 lpfc_free_els_sgl_list(phba); 8449 lpfc_free_nvmet_sgl_list(phba); 8450 8451 /* Free the completion queue EQ event pool */ 8452 lpfc_sli4_cq_event_release_all(phba); 8453 lpfc_sli4_cq_event_pool_destroy(phba); 8454 8455 /* Release resource identifiers. */ 8456 lpfc_sli4_dealloc_resource_identifiers(phba); 8457 8458 /* Free the bsmbx region. */ 8459 lpfc_destroy_bootstrap_mbox(phba); 8460 8461 /* Free the SLI Layer memory with SLI4 HBAs */ 8462 lpfc_mem_free_all(phba); 8463 8464 /* Free the current connect table */ 8465 list_for_each_entry_safe(conn_entry, next_conn_entry, 8466 &phba->fcf_conn_rec_list, list) { 8467 list_del_init(&conn_entry->list); 8468 kfree(conn_entry); 8469 } 8470 8471 return; 8472 } 8473 8474 /** 8475 * lpfc_init_api_table_setup - Set up init api function jump table 8476 * @phba: The hba struct for which this call is being executed. 8477 * @dev_grp: The HBA PCI-Device group number. 8478 * 8479 * This routine sets up the device INIT interface API function jump table 8480 * in @phba struct. 8481 * 8482 * Returns: 0 - success, -ENODEV - failure. 8483 **/ 8484 int 8485 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8486 { 8487 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8488 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8489 phba->lpfc_selective_reset = lpfc_selective_reset; 8490 switch (dev_grp) { 8491 case LPFC_PCI_DEV_LP: 8492 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8493 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8494 phba->lpfc_stop_port = lpfc_stop_port_s3; 8495 break; 8496 case LPFC_PCI_DEV_OC: 8497 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8498 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8499 phba->lpfc_stop_port = lpfc_stop_port_s4; 8500 break; 8501 default: 8502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8503 "1431 Invalid HBA PCI-device group: 0x%x\n", 8504 dev_grp); 8505 return -ENODEV; 8506 } 8507 return 0; 8508 } 8509 8510 /** 8511 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8512 * @phba: pointer to lpfc hba data structure. 8513 * 8514 * This routine is invoked to set up the driver internal resources after the 8515 * device specific resource setup to support the HBA device it attached to. 8516 * 8517 * Return codes 8518 * 0 - successful 8519 * other values - error 8520 **/ 8521 static int 8522 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8523 { 8524 int error; 8525 8526 /* Startup the kernel thread for this host adapter. */ 8527 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8528 "lpfc_worker_%d", phba->brd_no); 8529 if (IS_ERR(phba->worker_thread)) { 8530 error = PTR_ERR(phba->worker_thread); 8531 return error; 8532 } 8533 8534 return 0; 8535 } 8536 8537 /** 8538 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8539 * @phba: pointer to lpfc hba data structure. 8540 * 8541 * This routine is invoked to unset the driver internal resources set up after 8542 * the device specific resource setup for supporting the HBA device it 8543 * attached to. 8544 **/ 8545 static void 8546 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8547 { 8548 if (phba->wq) { 8549 flush_workqueue(phba->wq); 8550 destroy_workqueue(phba->wq); 8551 phba->wq = NULL; 8552 } 8553 8554 /* Stop kernel worker thread */ 8555 if (phba->worker_thread) 8556 kthread_stop(phba->worker_thread); 8557 } 8558 8559 /** 8560 * lpfc_free_iocb_list - Free iocb list. 8561 * @phba: pointer to lpfc hba data structure. 8562 * 8563 * This routine is invoked to free the driver's IOCB list and memory. 8564 **/ 8565 void 8566 lpfc_free_iocb_list(struct lpfc_hba *phba) 8567 { 8568 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8569 8570 spin_lock_irq(&phba->hbalock); 8571 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8572 &phba->lpfc_iocb_list, list) { 8573 list_del(&iocbq_entry->list); 8574 kfree(iocbq_entry); 8575 phba->total_iocbq_bufs--; 8576 } 8577 spin_unlock_irq(&phba->hbalock); 8578 8579 return; 8580 } 8581 8582 /** 8583 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8584 * @phba: pointer to lpfc hba data structure. 8585 * @iocb_count: number of requested iocbs 8586 * 8587 * This routine is invoked to allocate and initizlize the driver's IOCB 8588 * list and set up the IOCB tag array accordingly. 8589 * 8590 * Return codes 8591 * 0 - successful 8592 * other values - error 8593 **/ 8594 int 8595 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8596 { 8597 struct lpfc_iocbq *iocbq_entry = NULL; 8598 uint16_t iotag; 8599 int i; 8600 8601 /* Initialize and populate the iocb list per host. */ 8602 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8603 for (i = 0; i < iocb_count; i++) { 8604 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8605 if (iocbq_entry == NULL) { 8606 printk(KERN_ERR "%s: only allocated %d iocbs of " 8607 "expected %d count. Unloading driver.\n", 8608 __func__, i, iocb_count); 8609 goto out_free_iocbq; 8610 } 8611 8612 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8613 if (iotag == 0) { 8614 kfree(iocbq_entry); 8615 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8616 "Unloading driver.\n", __func__); 8617 goto out_free_iocbq; 8618 } 8619 iocbq_entry->sli4_lxritag = NO_XRI; 8620 iocbq_entry->sli4_xritag = NO_XRI; 8621 8622 spin_lock_irq(&phba->hbalock); 8623 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8624 phba->total_iocbq_bufs++; 8625 spin_unlock_irq(&phba->hbalock); 8626 } 8627 8628 return 0; 8629 8630 out_free_iocbq: 8631 lpfc_free_iocb_list(phba); 8632 8633 return -ENOMEM; 8634 } 8635 8636 /** 8637 * lpfc_free_sgl_list - Free a given sgl list. 8638 * @phba: pointer to lpfc hba data structure. 8639 * @sglq_list: pointer to the head of sgl list. 8640 * 8641 * This routine is invoked to free a give sgl list and memory. 8642 **/ 8643 void 8644 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8645 { 8646 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8647 8648 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8649 list_del(&sglq_entry->list); 8650 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8651 kfree(sglq_entry); 8652 } 8653 } 8654 8655 /** 8656 * lpfc_free_els_sgl_list - Free els sgl list. 8657 * @phba: pointer to lpfc hba data structure. 8658 * 8659 * This routine is invoked to free the driver's els sgl list and memory. 8660 **/ 8661 static void 8662 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8663 { 8664 LIST_HEAD(sglq_list); 8665 8666 /* Retrieve all els sgls from driver list */ 8667 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8668 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8669 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8670 8671 /* Now free the sgl list */ 8672 lpfc_free_sgl_list(phba, &sglq_list); 8673 } 8674 8675 /** 8676 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8677 * @phba: pointer to lpfc hba data structure. 8678 * 8679 * This routine is invoked to free the driver's nvmet sgl list and memory. 8680 **/ 8681 static void 8682 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8683 { 8684 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8685 LIST_HEAD(sglq_list); 8686 8687 /* Retrieve all nvmet sgls from driver list */ 8688 spin_lock_irq(&phba->hbalock); 8689 spin_lock(&phba->sli4_hba.sgl_list_lock); 8690 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8691 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8692 spin_unlock_irq(&phba->hbalock); 8693 8694 /* Now free the sgl list */ 8695 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8696 list_del(&sglq_entry->list); 8697 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8698 kfree(sglq_entry); 8699 } 8700 8701 /* Update the nvmet_xri_cnt to reflect no current sgls. 8702 * The next initialization cycle sets the count and allocates 8703 * the sgls over again. 8704 */ 8705 phba->sli4_hba.nvmet_xri_cnt = 0; 8706 } 8707 8708 /** 8709 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8710 * @phba: pointer to lpfc hba data structure. 8711 * 8712 * This routine is invoked to allocate the driver's active sgl memory. 8713 * This array will hold the sglq_entry's for active IOs. 8714 **/ 8715 static int 8716 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8717 { 8718 int size; 8719 size = sizeof(struct lpfc_sglq *); 8720 size *= phba->sli4_hba.max_cfg_param.max_xri; 8721 8722 phba->sli4_hba.lpfc_sglq_active_list = 8723 kzalloc(size, GFP_KERNEL); 8724 if (!phba->sli4_hba.lpfc_sglq_active_list) 8725 return -ENOMEM; 8726 return 0; 8727 } 8728 8729 /** 8730 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8731 * @phba: pointer to lpfc hba data structure. 8732 * 8733 * This routine is invoked to walk through the array of active sglq entries 8734 * and free all of the resources. 8735 * This is just a place holder for now. 8736 **/ 8737 static void 8738 lpfc_free_active_sgl(struct lpfc_hba *phba) 8739 { 8740 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8741 } 8742 8743 /** 8744 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8745 * @phba: pointer to lpfc hba data structure. 8746 * 8747 * This routine is invoked to allocate and initizlize the driver's sgl 8748 * list and set up the sgl xritag tag array accordingly. 8749 * 8750 **/ 8751 static void 8752 lpfc_init_sgl_list(struct lpfc_hba *phba) 8753 { 8754 /* Initialize and populate the sglq list per host/VF. */ 8755 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8756 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8757 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8758 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8759 8760 /* els xri-sgl book keeping */ 8761 phba->sli4_hba.els_xri_cnt = 0; 8762 8763 /* nvme xri-buffer book keeping */ 8764 phba->sli4_hba.io_xri_cnt = 0; 8765 } 8766 8767 /** 8768 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8769 * @phba: pointer to lpfc hba data structure. 8770 * 8771 * This routine is invoked to post rpi header templates to the 8772 * port for those SLI4 ports that do not support extents. This routine 8773 * posts a PAGE_SIZE memory region to the port to hold up to 8774 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8775 * and should be called only when interrupts are disabled. 8776 * 8777 * Return codes 8778 * 0 - successful 8779 * -ERROR - otherwise. 8780 **/ 8781 int 8782 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8783 { 8784 int rc = 0; 8785 struct lpfc_rpi_hdr *rpi_hdr; 8786 8787 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8788 if (!phba->sli4_hba.rpi_hdrs_in_use) 8789 return rc; 8790 if (phba->sli4_hba.extents_in_use) 8791 return -EIO; 8792 8793 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8794 if (!rpi_hdr) { 8795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8796 "0391 Error during rpi post operation\n"); 8797 lpfc_sli4_remove_rpis(phba); 8798 rc = -ENODEV; 8799 } 8800 8801 return rc; 8802 } 8803 8804 /** 8805 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8806 * @phba: pointer to lpfc hba data structure. 8807 * 8808 * This routine is invoked to allocate a single 4KB memory region to 8809 * support rpis and stores them in the phba. This single region 8810 * provides support for up to 64 rpis. The region is used globally 8811 * by the device. 8812 * 8813 * Returns: 8814 * A valid rpi hdr on success. 8815 * A NULL pointer on any failure. 8816 **/ 8817 struct lpfc_rpi_hdr * 8818 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8819 { 8820 uint16_t rpi_limit, curr_rpi_range; 8821 struct lpfc_dmabuf *dmabuf; 8822 struct lpfc_rpi_hdr *rpi_hdr; 8823 8824 /* 8825 * If the SLI4 port supports extents, posting the rpi header isn't 8826 * required. Set the expected maximum count and let the actual value 8827 * get set when extents are fully allocated. 8828 */ 8829 if (!phba->sli4_hba.rpi_hdrs_in_use) 8830 return NULL; 8831 if (phba->sli4_hba.extents_in_use) 8832 return NULL; 8833 8834 /* The limit on the logical index is just the max_rpi count. */ 8835 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8836 8837 spin_lock_irq(&phba->hbalock); 8838 /* 8839 * Establish the starting RPI in this header block. The starting 8840 * rpi is normalized to a zero base because the physical rpi is 8841 * port based. 8842 */ 8843 curr_rpi_range = phba->sli4_hba.next_rpi; 8844 spin_unlock_irq(&phba->hbalock); 8845 8846 /* Reached full RPI range */ 8847 if (curr_rpi_range == rpi_limit) 8848 return NULL; 8849 8850 /* 8851 * First allocate the protocol header region for the port. The 8852 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8853 */ 8854 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8855 if (!dmabuf) 8856 return NULL; 8857 8858 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8859 LPFC_HDR_TEMPLATE_SIZE, 8860 &dmabuf->phys, GFP_KERNEL); 8861 if (!dmabuf->virt) { 8862 rpi_hdr = NULL; 8863 goto err_free_dmabuf; 8864 } 8865 8866 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8867 rpi_hdr = NULL; 8868 goto err_free_coherent; 8869 } 8870 8871 /* Save the rpi header data for cleanup later. */ 8872 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8873 if (!rpi_hdr) 8874 goto err_free_coherent; 8875 8876 rpi_hdr->dmabuf = dmabuf; 8877 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8878 rpi_hdr->page_count = 1; 8879 spin_lock_irq(&phba->hbalock); 8880 8881 /* The rpi_hdr stores the logical index only. */ 8882 rpi_hdr->start_rpi = curr_rpi_range; 8883 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8884 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8885 8886 spin_unlock_irq(&phba->hbalock); 8887 return rpi_hdr; 8888 8889 err_free_coherent: 8890 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8891 dmabuf->virt, dmabuf->phys); 8892 err_free_dmabuf: 8893 kfree(dmabuf); 8894 return NULL; 8895 } 8896 8897 /** 8898 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8899 * @phba: pointer to lpfc hba data structure. 8900 * 8901 * This routine is invoked to remove all memory resources allocated 8902 * to support rpis for SLI4 ports not supporting extents. This routine 8903 * presumes the caller has released all rpis consumed by fabric or port 8904 * logins and is prepared to have the header pages removed. 8905 **/ 8906 void 8907 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 8908 { 8909 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 8910 8911 if (!phba->sli4_hba.rpi_hdrs_in_use) 8912 goto exit; 8913 8914 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 8915 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 8916 list_del(&rpi_hdr->list); 8917 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 8918 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 8919 kfree(rpi_hdr->dmabuf); 8920 kfree(rpi_hdr); 8921 } 8922 exit: 8923 /* There are no rpis available to the port now. */ 8924 phba->sli4_hba.next_rpi = 0; 8925 } 8926 8927 /** 8928 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 8929 * @pdev: pointer to pci device data structure. 8930 * 8931 * This routine is invoked to allocate the driver hba data structure for an 8932 * HBA device. If the allocation is successful, the phba reference to the 8933 * PCI device data structure is set. 8934 * 8935 * Return codes 8936 * pointer to @phba - successful 8937 * NULL - error 8938 **/ 8939 static struct lpfc_hba * 8940 lpfc_hba_alloc(struct pci_dev *pdev) 8941 { 8942 struct lpfc_hba *phba; 8943 8944 /* Allocate memory for HBA structure */ 8945 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 8946 if (!phba) { 8947 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 8948 return NULL; 8949 } 8950 8951 /* Set reference to PCI device in HBA structure */ 8952 phba->pcidev = pdev; 8953 8954 /* Assign an unused board number */ 8955 phba->brd_no = lpfc_get_instance(); 8956 if (phba->brd_no < 0) { 8957 kfree(phba); 8958 return NULL; 8959 } 8960 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 8961 8962 spin_lock_init(&phba->ct_ev_lock); 8963 INIT_LIST_HEAD(&phba->ct_ev_waiters); 8964 8965 return phba; 8966 } 8967 8968 /** 8969 * lpfc_hba_free - Free driver hba data structure with a device. 8970 * @phba: pointer to lpfc hba data structure. 8971 * 8972 * This routine is invoked to free the driver hba data structure with an 8973 * HBA device. 8974 **/ 8975 static void 8976 lpfc_hba_free(struct lpfc_hba *phba) 8977 { 8978 if (phba->sli_rev == LPFC_SLI_REV4) 8979 kfree(phba->sli4_hba.hdwq); 8980 8981 /* Release the driver assigned board number */ 8982 idr_remove(&lpfc_hba_index, phba->brd_no); 8983 8984 /* Free memory allocated with sli3 rings */ 8985 kfree(phba->sli.sli3_ring); 8986 phba->sli.sli3_ring = NULL; 8987 8988 kfree(phba); 8989 return; 8990 } 8991 8992 /** 8993 * lpfc_create_shost - Create hba physical port with associated scsi host. 8994 * @phba: pointer to lpfc hba data structure. 8995 * 8996 * This routine is invoked to create HBA physical port and associate a SCSI 8997 * host with it. 8998 * 8999 * Return codes 9000 * 0 - successful 9001 * other values - error 9002 **/ 9003 static int 9004 lpfc_create_shost(struct lpfc_hba *phba) 9005 { 9006 struct lpfc_vport *vport; 9007 struct Scsi_Host *shost; 9008 9009 /* Initialize HBA FC structure */ 9010 phba->fc_edtov = FF_DEF_EDTOV; 9011 phba->fc_ratov = FF_DEF_RATOV; 9012 phba->fc_altov = FF_DEF_ALTOV; 9013 phba->fc_arbtov = FF_DEF_ARBTOV; 9014 9015 atomic_set(&phba->sdev_cnt, 0); 9016 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 9017 if (!vport) 9018 return -ENODEV; 9019 9020 shost = lpfc_shost_from_vport(vport); 9021 phba->pport = vport; 9022 9023 if (phba->nvmet_support) { 9024 /* Only 1 vport (pport) will support NVME target */ 9025 phba->targetport = NULL; 9026 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 9027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 9028 "6076 NVME Target Found\n"); 9029 } 9030 9031 lpfc_debugfs_initialize(vport); 9032 /* Put reference to SCSI host to driver's device private data */ 9033 pci_set_drvdata(phba->pcidev, shost); 9034 9035 /* 9036 * At this point we are fully registered with PSA. In addition, 9037 * any initial discovery should be completed. 9038 */ 9039 vport->load_flag |= FC_ALLOW_FDMI; 9040 if (phba->cfg_enable_SmartSAN || 9041 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 9042 9043 /* Setup appropriate attribute masks */ 9044 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 9045 if (phba->cfg_enable_SmartSAN) 9046 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 9047 else 9048 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 9049 } 9050 return 0; 9051 } 9052 9053 /** 9054 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 9055 * @phba: pointer to lpfc hba data structure. 9056 * 9057 * This routine is invoked to destroy HBA physical port and the associated 9058 * SCSI host. 9059 **/ 9060 static void 9061 lpfc_destroy_shost(struct lpfc_hba *phba) 9062 { 9063 struct lpfc_vport *vport = phba->pport; 9064 9065 /* Destroy physical port that associated with the SCSI host */ 9066 destroy_port(vport); 9067 9068 return; 9069 } 9070 9071 /** 9072 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9073 * @phba: pointer to lpfc hba data structure. 9074 * @shost: the shost to be used to detect Block guard settings. 9075 * 9076 * This routine sets up the local Block guard protocol settings for @shost. 9077 * This routine also allocates memory for debugging bg buffers. 9078 **/ 9079 static void 9080 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9081 { 9082 uint32_t old_mask; 9083 uint32_t old_guard; 9084 9085 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9087 "1478 Registering BlockGuard with the " 9088 "SCSI layer\n"); 9089 9090 old_mask = phba->cfg_prot_mask; 9091 old_guard = phba->cfg_prot_guard; 9092 9093 /* Only allow supported values */ 9094 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9095 SHOST_DIX_TYPE0_PROTECTION | 9096 SHOST_DIX_TYPE1_PROTECTION); 9097 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9098 SHOST_DIX_GUARD_CRC); 9099 9100 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9101 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9102 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9103 9104 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9105 if ((old_mask != phba->cfg_prot_mask) || 9106 (old_guard != phba->cfg_prot_guard)) 9107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9108 "1475 Registering BlockGuard with the " 9109 "SCSI layer: mask %d guard %d\n", 9110 phba->cfg_prot_mask, 9111 phba->cfg_prot_guard); 9112 9113 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9114 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9115 } else 9116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9117 "1479 Not Registering BlockGuard with the SCSI " 9118 "layer, Bad protection parameters: %d %d\n", 9119 old_mask, old_guard); 9120 } 9121 } 9122 9123 /** 9124 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9125 * @phba: pointer to lpfc hba data structure. 9126 * 9127 * This routine is invoked to perform all the necessary post initialization 9128 * setup for the device. 9129 **/ 9130 static void 9131 lpfc_post_init_setup(struct lpfc_hba *phba) 9132 { 9133 struct Scsi_Host *shost; 9134 struct lpfc_adapter_event_header adapter_event; 9135 9136 /* Get the default values for Model Name and Description */ 9137 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9138 9139 /* 9140 * hba setup may have changed the hba_queue_depth so we need to 9141 * adjust the value of can_queue. 9142 */ 9143 shost = pci_get_drvdata(phba->pcidev); 9144 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9145 9146 lpfc_host_attrib_init(shost); 9147 9148 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9149 spin_lock_irq(shost->host_lock); 9150 lpfc_poll_start_timer(phba); 9151 spin_unlock_irq(shost->host_lock); 9152 } 9153 9154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9155 "0428 Perform SCSI scan\n"); 9156 /* Send board arrival event to upper layer */ 9157 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9158 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9159 fc_host_post_vendor_event(shost, fc_get_event_number(), 9160 sizeof(adapter_event), 9161 (char *) &adapter_event, 9162 LPFC_NL_VENDOR_ID); 9163 return; 9164 } 9165 9166 /** 9167 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9168 * @phba: pointer to lpfc hba data structure. 9169 * 9170 * This routine is invoked to set up the PCI device memory space for device 9171 * with SLI-3 interface spec. 9172 * 9173 * Return codes 9174 * 0 - successful 9175 * other values - error 9176 **/ 9177 static int 9178 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9179 { 9180 struct pci_dev *pdev = phba->pcidev; 9181 unsigned long bar0map_len, bar2map_len; 9182 int i, hbq_count; 9183 void *ptr; 9184 int error; 9185 9186 if (!pdev) 9187 return -ENODEV; 9188 9189 /* Set the device DMA mask size */ 9190 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9191 if (error) 9192 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9193 if (error) 9194 return error; 9195 error = -ENODEV; 9196 9197 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9198 * required by each mapping. 9199 */ 9200 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9201 bar0map_len = pci_resource_len(pdev, 0); 9202 9203 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9204 bar2map_len = pci_resource_len(pdev, 2); 9205 9206 /* Map HBA SLIM to a kernel virtual address. */ 9207 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9208 if (!phba->slim_memmap_p) { 9209 dev_printk(KERN_ERR, &pdev->dev, 9210 "ioremap failed for SLIM memory.\n"); 9211 goto out; 9212 } 9213 9214 /* Map HBA Control Registers to a kernel virtual address. */ 9215 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9216 if (!phba->ctrl_regs_memmap_p) { 9217 dev_printk(KERN_ERR, &pdev->dev, 9218 "ioremap failed for HBA control registers.\n"); 9219 goto out_iounmap_slim; 9220 } 9221 9222 /* Allocate memory for SLI-2 structures */ 9223 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9224 &phba->slim2p.phys, GFP_KERNEL); 9225 if (!phba->slim2p.virt) 9226 goto out_iounmap; 9227 9228 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9229 phba->mbox_ext = (phba->slim2p.virt + 9230 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9231 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9232 phba->IOCBs = (phba->slim2p.virt + 9233 offsetof(struct lpfc_sli2_slim, IOCBs)); 9234 9235 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9236 lpfc_sli_hbq_size(), 9237 &phba->hbqslimp.phys, 9238 GFP_KERNEL); 9239 if (!phba->hbqslimp.virt) 9240 goto out_free_slim; 9241 9242 hbq_count = lpfc_sli_hbq_count(); 9243 ptr = phba->hbqslimp.virt; 9244 for (i = 0; i < hbq_count; ++i) { 9245 phba->hbqs[i].hbq_virt = ptr; 9246 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9247 ptr += (lpfc_hbq_defs[i]->entry_count * 9248 sizeof(struct lpfc_hbq_entry)); 9249 } 9250 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9251 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9252 9253 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9254 9255 phba->MBslimaddr = phba->slim_memmap_p; 9256 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9257 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9258 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9259 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9260 9261 return 0; 9262 9263 out_free_slim: 9264 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9265 phba->slim2p.virt, phba->slim2p.phys); 9266 out_iounmap: 9267 iounmap(phba->ctrl_regs_memmap_p); 9268 out_iounmap_slim: 9269 iounmap(phba->slim_memmap_p); 9270 out: 9271 return error; 9272 } 9273 9274 /** 9275 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9276 * @phba: pointer to lpfc hba data structure. 9277 * 9278 * This routine is invoked to unset the PCI device memory space for device 9279 * with SLI-3 interface spec. 9280 **/ 9281 static void 9282 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9283 { 9284 struct pci_dev *pdev; 9285 9286 /* Obtain PCI device reference */ 9287 if (!phba->pcidev) 9288 return; 9289 else 9290 pdev = phba->pcidev; 9291 9292 /* Free coherent DMA memory allocated */ 9293 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9294 phba->hbqslimp.virt, phba->hbqslimp.phys); 9295 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9296 phba->slim2p.virt, phba->slim2p.phys); 9297 9298 /* I/O memory unmap */ 9299 iounmap(phba->ctrl_regs_memmap_p); 9300 iounmap(phba->slim_memmap_p); 9301 9302 return; 9303 } 9304 9305 /** 9306 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9307 * @phba: pointer to lpfc hba data structure. 9308 * 9309 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9310 * done and check status. 9311 * 9312 * Return 0 if successful, otherwise -ENODEV. 9313 **/ 9314 int 9315 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9316 { 9317 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9318 struct lpfc_register reg_data; 9319 int i, port_error = 0; 9320 uint32_t if_type; 9321 9322 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9323 memset(®_data, 0, sizeof(reg_data)); 9324 if (!phba->sli4_hba.PSMPHRregaddr) 9325 return -ENODEV; 9326 9327 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9328 for (i = 0; i < 3000; i++) { 9329 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9330 &portsmphr_reg.word0) || 9331 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9332 /* Port has a fatal POST error, break out */ 9333 port_error = -ENODEV; 9334 break; 9335 } 9336 if (LPFC_POST_STAGE_PORT_READY == 9337 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9338 break; 9339 msleep(10); 9340 } 9341 9342 /* 9343 * If there was a port error during POST, then don't proceed with 9344 * other register reads as the data may not be valid. Just exit. 9345 */ 9346 if (port_error) { 9347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9348 "1408 Port Failed POST - portsmphr=0x%x, " 9349 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9350 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9351 portsmphr_reg.word0, 9352 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9353 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9354 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9355 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9356 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9357 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9358 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9359 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9360 } else { 9361 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9362 "2534 Device Info: SLIFamily=0x%x, " 9363 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9364 "SLIHint_2=0x%x, FT=0x%x\n", 9365 bf_get(lpfc_sli_intf_sli_family, 9366 &phba->sli4_hba.sli_intf), 9367 bf_get(lpfc_sli_intf_slirev, 9368 &phba->sli4_hba.sli_intf), 9369 bf_get(lpfc_sli_intf_if_type, 9370 &phba->sli4_hba.sli_intf), 9371 bf_get(lpfc_sli_intf_sli_hint1, 9372 &phba->sli4_hba.sli_intf), 9373 bf_get(lpfc_sli_intf_sli_hint2, 9374 &phba->sli4_hba.sli_intf), 9375 bf_get(lpfc_sli_intf_func_type, 9376 &phba->sli4_hba.sli_intf)); 9377 /* 9378 * Check for other Port errors during the initialization 9379 * process. Fail the load if the port did not come up 9380 * correctly. 9381 */ 9382 if_type = bf_get(lpfc_sli_intf_if_type, 9383 &phba->sli4_hba.sli_intf); 9384 switch (if_type) { 9385 case LPFC_SLI_INTF_IF_TYPE_0: 9386 phba->sli4_hba.ue_mask_lo = 9387 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9388 phba->sli4_hba.ue_mask_hi = 9389 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9390 uerrlo_reg.word0 = 9391 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9392 uerrhi_reg.word0 = 9393 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9394 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9395 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9396 lpfc_printf_log(phba, KERN_ERR, 9397 LOG_TRACE_EVENT, 9398 "1422 Unrecoverable Error " 9399 "Detected during POST " 9400 "uerr_lo_reg=0x%x, " 9401 "uerr_hi_reg=0x%x, " 9402 "ue_mask_lo_reg=0x%x, " 9403 "ue_mask_hi_reg=0x%x\n", 9404 uerrlo_reg.word0, 9405 uerrhi_reg.word0, 9406 phba->sli4_hba.ue_mask_lo, 9407 phba->sli4_hba.ue_mask_hi); 9408 port_error = -ENODEV; 9409 } 9410 break; 9411 case LPFC_SLI_INTF_IF_TYPE_2: 9412 case LPFC_SLI_INTF_IF_TYPE_6: 9413 /* Final checks. The port status should be clean. */ 9414 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9415 ®_data.word0) || 9416 (bf_get(lpfc_sliport_status_err, ®_data) && 9417 !bf_get(lpfc_sliport_status_rn, ®_data))) { 9418 phba->work_status[0] = 9419 readl(phba->sli4_hba.u.if_type2. 9420 ERR1regaddr); 9421 phba->work_status[1] = 9422 readl(phba->sli4_hba.u.if_type2. 9423 ERR2regaddr); 9424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9425 "2888 Unrecoverable port error " 9426 "following POST: port status reg " 9427 "0x%x, port_smphr reg 0x%x, " 9428 "error 1=0x%x, error 2=0x%x\n", 9429 reg_data.word0, 9430 portsmphr_reg.word0, 9431 phba->work_status[0], 9432 phba->work_status[1]); 9433 port_error = -ENODEV; 9434 break; 9435 } 9436 9437 if (lpfc_pldv_detect && 9438 bf_get(lpfc_sli_intf_sli_family, 9439 &phba->sli4_hba.sli_intf) == 9440 LPFC_SLI_INTF_FAMILY_G6) 9441 pci_write_config_byte(phba->pcidev, 9442 LPFC_SLI_INTF, CFG_PLD); 9443 break; 9444 case LPFC_SLI_INTF_IF_TYPE_1: 9445 default: 9446 break; 9447 } 9448 } 9449 return port_error; 9450 } 9451 9452 /** 9453 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9454 * @phba: pointer to lpfc hba data structure. 9455 * @if_type: The SLI4 interface type getting configured. 9456 * 9457 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9458 * memory map. 9459 **/ 9460 static void 9461 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9462 { 9463 switch (if_type) { 9464 case LPFC_SLI_INTF_IF_TYPE_0: 9465 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9466 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9467 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9468 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9469 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9470 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9471 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9472 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9473 phba->sli4_hba.SLIINTFregaddr = 9474 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9475 break; 9476 case LPFC_SLI_INTF_IF_TYPE_2: 9477 phba->sli4_hba.u.if_type2.EQDregaddr = 9478 phba->sli4_hba.conf_regs_memmap_p + 9479 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9480 phba->sli4_hba.u.if_type2.ERR1regaddr = 9481 phba->sli4_hba.conf_regs_memmap_p + 9482 LPFC_CTL_PORT_ER1_OFFSET; 9483 phba->sli4_hba.u.if_type2.ERR2regaddr = 9484 phba->sli4_hba.conf_regs_memmap_p + 9485 LPFC_CTL_PORT_ER2_OFFSET; 9486 phba->sli4_hba.u.if_type2.CTRLregaddr = 9487 phba->sli4_hba.conf_regs_memmap_p + 9488 LPFC_CTL_PORT_CTL_OFFSET; 9489 phba->sli4_hba.u.if_type2.STATUSregaddr = 9490 phba->sli4_hba.conf_regs_memmap_p + 9491 LPFC_CTL_PORT_STA_OFFSET; 9492 phba->sli4_hba.SLIINTFregaddr = 9493 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9494 phba->sli4_hba.PSMPHRregaddr = 9495 phba->sli4_hba.conf_regs_memmap_p + 9496 LPFC_CTL_PORT_SEM_OFFSET; 9497 phba->sli4_hba.RQDBregaddr = 9498 phba->sli4_hba.conf_regs_memmap_p + 9499 LPFC_ULP0_RQ_DOORBELL; 9500 phba->sli4_hba.WQDBregaddr = 9501 phba->sli4_hba.conf_regs_memmap_p + 9502 LPFC_ULP0_WQ_DOORBELL; 9503 phba->sli4_hba.CQDBregaddr = 9504 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9505 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9506 phba->sli4_hba.MQDBregaddr = 9507 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9508 phba->sli4_hba.BMBXregaddr = 9509 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9510 break; 9511 case LPFC_SLI_INTF_IF_TYPE_6: 9512 phba->sli4_hba.u.if_type2.EQDregaddr = 9513 phba->sli4_hba.conf_regs_memmap_p + 9514 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9515 phba->sli4_hba.u.if_type2.ERR1regaddr = 9516 phba->sli4_hba.conf_regs_memmap_p + 9517 LPFC_CTL_PORT_ER1_OFFSET; 9518 phba->sli4_hba.u.if_type2.ERR2regaddr = 9519 phba->sli4_hba.conf_regs_memmap_p + 9520 LPFC_CTL_PORT_ER2_OFFSET; 9521 phba->sli4_hba.u.if_type2.CTRLregaddr = 9522 phba->sli4_hba.conf_regs_memmap_p + 9523 LPFC_CTL_PORT_CTL_OFFSET; 9524 phba->sli4_hba.u.if_type2.STATUSregaddr = 9525 phba->sli4_hba.conf_regs_memmap_p + 9526 LPFC_CTL_PORT_STA_OFFSET; 9527 phba->sli4_hba.PSMPHRregaddr = 9528 phba->sli4_hba.conf_regs_memmap_p + 9529 LPFC_CTL_PORT_SEM_OFFSET; 9530 phba->sli4_hba.BMBXregaddr = 9531 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9532 break; 9533 case LPFC_SLI_INTF_IF_TYPE_1: 9534 default: 9535 dev_printk(KERN_ERR, &phba->pcidev->dev, 9536 "FATAL - unsupported SLI4 interface type - %d\n", 9537 if_type); 9538 break; 9539 } 9540 } 9541 9542 /** 9543 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9544 * @phba: pointer to lpfc hba data structure. 9545 * @if_type: sli if type to operate on. 9546 * 9547 * This routine is invoked to set up SLI4 BAR1 register memory map. 9548 **/ 9549 static void 9550 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9551 { 9552 switch (if_type) { 9553 case LPFC_SLI_INTF_IF_TYPE_0: 9554 phba->sli4_hba.PSMPHRregaddr = 9555 phba->sli4_hba.ctrl_regs_memmap_p + 9556 LPFC_SLIPORT_IF0_SMPHR; 9557 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9558 LPFC_HST_ISR0; 9559 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9560 LPFC_HST_IMR0; 9561 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9562 LPFC_HST_ISCR0; 9563 break; 9564 case LPFC_SLI_INTF_IF_TYPE_6: 9565 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9566 LPFC_IF6_RQ_DOORBELL; 9567 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9568 LPFC_IF6_WQ_DOORBELL; 9569 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9570 LPFC_IF6_CQ_DOORBELL; 9571 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9572 LPFC_IF6_EQ_DOORBELL; 9573 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9574 LPFC_IF6_MQ_DOORBELL; 9575 break; 9576 case LPFC_SLI_INTF_IF_TYPE_2: 9577 case LPFC_SLI_INTF_IF_TYPE_1: 9578 default: 9579 dev_err(&phba->pcidev->dev, 9580 "FATAL - unsupported SLI4 interface type - %d\n", 9581 if_type); 9582 break; 9583 } 9584 } 9585 9586 /** 9587 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9588 * @phba: pointer to lpfc hba data structure. 9589 * @vf: virtual function number 9590 * 9591 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9592 * based on the given viftual function number, @vf. 9593 * 9594 * Return 0 if successful, otherwise -ENODEV. 9595 **/ 9596 static int 9597 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9598 { 9599 if (vf > LPFC_VIR_FUNC_MAX) 9600 return -ENODEV; 9601 9602 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9603 vf * LPFC_VFR_PAGE_SIZE + 9604 LPFC_ULP0_RQ_DOORBELL); 9605 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9606 vf * LPFC_VFR_PAGE_SIZE + 9607 LPFC_ULP0_WQ_DOORBELL); 9608 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9609 vf * LPFC_VFR_PAGE_SIZE + 9610 LPFC_EQCQ_DOORBELL); 9611 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9612 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9613 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9614 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9615 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9616 return 0; 9617 } 9618 9619 /** 9620 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9621 * @phba: pointer to lpfc hba data structure. 9622 * 9623 * This routine is invoked to create the bootstrap mailbox 9624 * region consistent with the SLI-4 interface spec. This 9625 * routine allocates all memory necessary to communicate 9626 * mailbox commands to the port and sets up all alignment 9627 * needs. No locks are expected to be held when calling 9628 * this routine. 9629 * 9630 * Return codes 9631 * 0 - successful 9632 * -ENOMEM - could not allocated memory. 9633 **/ 9634 static int 9635 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9636 { 9637 uint32_t bmbx_size; 9638 struct lpfc_dmabuf *dmabuf; 9639 struct dma_address *dma_address; 9640 uint32_t pa_addr; 9641 uint64_t phys_addr; 9642 9643 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9644 if (!dmabuf) 9645 return -ENOMEM; 9646 9647 /* 9648 * The bootstrap mailbox region is comprised of 2 parts 9649 * plus an alignment restriction of 16 bytes. 9650 */ 9651 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9652 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9653 &dmabuf->phys, GFP_KERNEL); 9654 if (!dmabuf->virt) { 9655 kfree(dmabuf); 9656 return -ENOMEM; 9657 } 9658 9659 /* 9660 * Initialize the bootstrap mailbox pointers now so that the register 9661 * operations are simple later. The mailbox dma address is required 9662 * to be 16-byte aligned. Also align the virtual memory as each 9663 * maibox is copied into the bmbx mailbox region before issuing the 9664 * command to the port. 9665 */ 9666 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9667 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9668 9669 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9670 LPFC_ALIGN_16_BYTE); 9671 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9672 LPFC_ALIGN_16_BYTE); 9673 9674 /* 9675 * Set the high and low physical addresses now. The SLI4 alignment 9676 * requirement is 16 bytes and the mailbox is posted to the port 9677 * as two 30-bit addresses. The other data is a bit marking whether 9678 * the 30-bit address is the high or low address. 9679 * Upcast bmbx aphys to 64bits so shift instruction compiles 9680 * clean on 32 bit machines. 9681 */ 9682 dma_address = &phba->sli4_hba.bmbx.dma_address; 9683 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9684 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9685 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9686 LPFC_BMBX_BIT1_ADDR_HI); 9687 9688 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9689 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9690 LPFC_BMBX_BIT1_ADDR_LO); 9691 return 0; 9692 } 9693 9694 /** 9695 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9696 * @phba: pointer to lpfc hba data structure. 9697 * 9698 * This routine is invoked to teardown the bootstrap mailbox 9699 * region and release all host resources. This routine requires 9700 * the caller to ensure all mailbox commands recovered, no 9701 * additional mailbox comands are sent, and interrupts are disabled 9702 * before calling this routine. 9703 * 9704 **/ 9705 static void 9706 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9707 { 9708 dma_free_coherent(&phba->pcidev->dev, 9709 phba->sli4_hba.bmbx.bmbx_size, 9710 phba->sli4_hba.bmbx.dmabuf->virt, 9711 phba->sli4_hba.bmbx.dmabuf->phys); 9712 9713 kfree(phba->sli4_hba.bmbx.dmabuf); 9714 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9715 } 9716 9717 static const char * const lpfc_topo_to_str[] = { 9718 "Loop then P2P", 9719 "Loopback", 9720 "P2P Only", 9721 "Unsupported", 9722 "Loop Only", 9723 "Unsupported", 9724 "P2P then Loop", 9725 }; 9726 9727 #define LINK_FLAGS_DEF 0x0 9728 #define LINK_FLAGS_P2P 0x1 9729 #define LINK_FLAGS_LOOP 0x2 9730 /** 9731 * lpfc_map_topology - Map the topology read from READ_CONFIG 9732 * @phba: pointer to lpfc hba data structure. 9733 * @rd_config: pointer to read config data 9734 * 9735 * This routine is invoked to map the topology values as read 9736 * from the read config mailbox command. If the persistent 9737 * topology feature is supported, the firmware will provide the 9738 * saved topology information to be used in INIT_LINK 9739 **/ 9740 static void 9741 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9742 { 9743 u8 ptv, tf, pt; 9744 9745 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9746 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9747 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9748 9749 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9750 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9751 ptv, tf, pt); 9752 if (!ptv) { 9753 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9754 "2019 FW does not support persistent topology " 9755 "Using driver parameter defined value [%s]", 9756 lpfc_topo_to_str[phba->cfg_topology]); 9757 return; 9758 } 9759 /* FW supports persistent topology - override module parameter value */ 9760 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9761 9762 /* if ASIC_GEN_NUM >= 0xC) */ 9763 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9764 LPFC_SLI_INTF_IF_TYPE_6) || 9765 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9766 LPFC_SLI_INTF_FAMILY_G6)) { 9767 if (!tf) { 9768 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9769 ? FLAGS_TOPOLOGY_MODE_LOOP 9770 : FLAGS_TOPOLOGY_MODE_PT_PT); 9771 } else { 9772 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9773 } 9774 } else { /* G5 */ 9775 if (tf) { 9776 /* If topology failover set - pt is '0' or '1' */ 9777 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9778 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9779 } else { 9780 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9781 ? FLAGS_TOPOLOGY_MODE_PT_PT 9782 : FLAGS_TOPOLOGY_MODE_LOOP); 9783 } 9784 } 9785 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9786 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9787 "2020 Using persistent topology value [%s]", 9788 lpfc_topo_to_str[phba->cfg_topology]); 9789 } else { 9790 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9791 "2021 Invalid topology values from FW " 9792 "Using driver parameter defined value [%s]", 9793 lpfc_topo_to_str[phba->cfg_topology]); 9794 } 9795 } 9796 9797 /** 9798 * lpfc_sli4_read_config - Get the config parameters. 9799 * @phba: pointer to lpfc hba data structure. 9800 * 9801 * This routine is invoked to read the configuration parameters from the HBA. 9802 * The configuration parameters are used to set the base and maximum values 9803 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9804 * allocation for the port. 9805 * 9806 * Return codes 9807 * 0 - successful 9808 * -ENOMEM - No available memory 9809 * -EIO - The mailbox failed to complete successfully. 9810 **/ 9811 int 9812 lpfc_sli4_read_config(struct lpfc_hba *phba) 9813 { 9814 LPFC_MBOXQ_t *pmb; 9815 struct lpfc_mbx_read_config *rd_config; 9816 union lpfc_sli4_cfg_shdr *shdr; 9817 uint32_t shdr_status, shdr_add_status; 9818 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9819 struct lpfc_rsrc_desc_fcfcoe *desc; 9820 char *pdesc_0; 9821 uint16_t forced_link_speed; 9822 uint32_t if_type, qmin; 9823 int length, i, rc = 0, rc2; 9824 9825 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9826 if (!pmb) { 9827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9828 "2011 Unable to allocate memory for issuing " 9829 "SLI_CONFIG_SPECIAL mailbox command\n"); 9830 return -ENOMEM; 9831 } 9832 9833 lpfc_read_config(phba, pmb); 9834 9835 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9836 if (rc != MBX_SUCCESS) { 9837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9838 "2012 Mailbox failed , mbxCmd x%x " 9839 "READ_CONFIG, mbxStatus x%x\n", 9840 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9841 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9842 rc = -EIO; 9843 } else { 9844 rd_config = &pmb->u.mqe.un.rd_config; 9845 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9846 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9847 phba->sli4_hba.lnk_info.lnk_tp = 9848 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9849 phba->sli4_hba.lnk_info.lnk_no = 9850 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9851 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9852 "3081 lnk_type:%d, lnk_numb:%d\n", 9853 phba->sli4_hba.lnk_info.lnk_tp, 9854 phba->sli4_hba.lnk_info.lnk_no); 9855 } else 9856 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9857 "3082 Mailbox (x%x) returned ldv:x0\n", 9858 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9859 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9860 phba->bbcredit_support = 1; 9861 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9862 } 9863 9864 phba->sli4_hba.conf_trunk = 9865 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9866 phba->sli4_hba.extents_in_use = 9867 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9868 phba->sli4_hba.max_cfg_param.max_xri = 9869 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9870 /* Reduce resource usage in kdump environment */ 9871 if (is_kdump_kernel() && 9872 phba->sli4_hba.max_cfg_param.max_xri > 512) 9873 phba->sli4_hba.max_cfg_param.max_xri = 512; 9874 phba->sli4_hba.max_cfg_param.xri_base = 9875 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 9876 phba->sli4_hba.max_cfg_param.max_vpi = 9877 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 9878 /* Limit the max we support */ 9879 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 9880 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 9881 phba->sli4_hba.max_cfg_param.vpi_base = 9882 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 9883 phba->sli4_hba.max_cfg_param.max_rpi = 9884 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 9885 phba->sli4_hba.max_cfg_param.rpi_base = 9886 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 9887 phba->sli4_hba.max_cfg_param.max_vfi = 9888 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 9889 phba->sli4_hba.max_cfg_param.vfi_base = 9890 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 9891 phba->sli4_hba.max_cfg_param.max_fcfi = 9892 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 9893 phba->sli4_hba.max_cfg_param.max_eq = 9894 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 9895 phba->sli4_hba.max_cfg_param.max_rq = 9896 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 9897 phba->sli4_hba.max_cfg_param.max_wq = 9898 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 9899 phba->sli4_hba.max_cfg_param.max_cq = 9900 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 9901 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 9902 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 9903 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 9904 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 9905 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 9906 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 9907 phba->max_vports = phba->max_vpi; 9908 9909 /* Next decide on FPIN or Signal E2E CGN support 9910 * For congestion alarms and warnings valid combination are: 9911 * 1. FPIN alarms / FPIN warnings 9912 * 2. Signal alarms / Signal warnings 9913 * 3. FPIN alarms / Signal warnings 9914 * 4. Signal alarms / FPIN warnings 9915 * 9916 * Initialize the adapter frequency to 100 mSecs 9917 */ 9918 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9919 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9920 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9921 9922 if (lpfc_use_cgn_signal) { 9923 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 9924 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 9925 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 9926 } 9927 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 9928 /* MUST support both alarm and warning 9929 * because EDC does not support alarm alone. 9930 */ 9931 if (phba->cgn_reg_signal != 9932 EDC_CG_SIG_WARN_ONLY) { 9933 /* Must support both or none */ 9934 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9935 phba->cgn_reg_signal = 9936 EDC_CG_SIG_NOTSUPPORTED; 9937 } else { 9938 phba->cgn_reg_signal = 9939 EDC_CG_SIG_WARN_ALARM; 9940 phba->cgn_reg_fpin = 9941 LPFC_CGN_FPIN_NONE; 9942 } 9943 } 9944 } 9945 9946 /* Set the congestion initial signal and fpin values. */ 9947 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 9948 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 9949 9950 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 9951 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 9952 phba->cgn_reg_signal, phba->cgn_reg_fpin); 9953 9954 lpfc_map_topology(phba, rd_config); 9955 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9956 "2003 cfg params Extents? %d " 9957 "XRI(B:%d M:%d), " 9958 "VPI(B:%d M:%d) " 9959 "VFI(B:%d M:%d) " 9960 "RPI(B:%d M:%d) " 9961 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 9962 phba->sli4_hba.extents_in_use, 9963 phba->sli4_hba.max_cfg_param.xri_base, 9964 phba->sli4_hba.max_cfg_param.max_xri, 9965 phba->sli4_hba.max_cfg_param.vpi_base, 9966 phba->sli4_hba.max_cfg_param.max_vpi, 9967 phba->sli4_hba.max_cfg_param.vfi_base, 9968 phba->sli4_hba.max_cfg_param.max_vfi, 9969 phba->sli4_hba.max_cfg_param.rpi_base, 9970 phba->sli4_hba.max_cfg_param.max_rpi, 9971 phba->sli4_hba.max_cfg_param.max_fcfi, 9972 phba->sli4_hba.max_cfg_param.max_eq, 9973 phba->sli4_hba.max_cfg_param.max_cq, 9974 phba->sli4_hba.max_cfg_param.max_wq, 9975 phba->sli4_hba.max_cfg_param.max_rq, 9976 phba->lmt); 9977 9978 /* 9979 * Calculate queue resources based on how 9980 * many WQ/CQ/EQs are available. 9981 */ 9982 qmin = phba->sli4_hba.max_cfg_param.max_wq; 9983 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 9984 qmin = phba->sli4_hba.max_cfg_param.max_cq; 9985 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 9986 qmin = phba->sli4_hba.max_cfg_param.max_eq; 9987 /* 9988 * Whats left after this can go toward NVME / FCP. 9989 * The minus 4 accounts for ELS, NVME LS, MBOX 9990 * plus one extra. When configured for 9991 * NVMET, FCP io channel WQs are not created. 9992 */ 9993 qmin -= 4; 9994 9995 /* Check to see if there is enough for NVME */ 9996 if ((phba->cfg_irq_chann > qmin) || 9997 (phba->cfg_hdw_queue > qmin)) { 9998 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9999 "2005 Reducing Queues - " 10000 "FW resource limitation: " 10001 "WQ %d CQ %d EQ %d: min %d: " 10002 "IRQ %d HDWQ %d\n", 10003 phba->sli4_hba.max_cfg_param.max_wq, 10004 phba->sli4_hba.max_cfg_param.max_cq, 10005 phba->sli4_hba.max_cfg_param.max_eq, 10006 qmin, phba->cfg_irq_chann, 10007 phba->cfg_hdw_queue); 10008 10009 if (phba->cfg_irq_chann > qmin) 10010 phba->cfg_irq_chann = qmin; 10011 if (phba->cfg_hdw_queue > qmin) 10012 phba->cfg_hdw_queue = qmin; 10013 } 10014 } 10015 10016 if (rc) 10017 goto read_cfg_out; 10018 10019 /* Update link speed if forced link speed is supported */ 10020 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10021 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10022 forced_link_speed = 10023 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10024 if (forced_link_speed) { 10025 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10026 10027 switch (forced_link_speed) { 10028 case LINK_SPEED_1G: 10029 phba->cfg_link_speed = 10030 LPFC_USER_LINK_SPEED_1G; 10031 break; 10032 case LINK_SPEED_2G: 10033 phba->cfg_link_speed = 10034 LPFC_USER_LINK_SPEED_2G; 10035 break; 10036 case LINK_SPEED_4G: 10037 phba->cfg_link_speed = 10038 LPFC_USER_LINK_SPEED_4G; 10039 break; 10040 case LINK_SPEED_8G: 10041 phba->cfg_link_speed = 10042 LPFC_USER_LINK_SPEED_8G; 10043 break; 10044 case LINK_SPEED_10G: 10045 phba->cfg_link_speed = 10046 LPFC_USER_LINK_SPEED_10G; 10047 break; 10048 case LINK_SPEED_16G: 10049 phba->cfg_link_speed = 10050 LPFC_USER_LINK_SPEED_16G; 10051 break; 10052 case LINK_SPEED_32G: 10053 phba->cfg_link_speed = 10054 LPFC_USER_LINK_SPEED_32G; 10055 break; 10056 case LINK_SPEED_64G: 10057 phba->cfg_link_speed = 10058 LPFC_USER_LINK_SPEED_64G; 10059 break; 10060 case 0xffff: 10061 phba->cfg_link_speed = 10062 LPFC_USER_LINK_SPEED_AUTO; 10063 break; 10064 default: 10065 lpfc_printf_log(phba, KERN_ERR, 10066 LOG_TRACE_EVENT, 10067 "0047 Unrecognized link " 10068 "speed : %d\n", 10069 forced_link_speed); 10070 phba->cfg_link_speed = 10071 LPFC_USER_LINK_SPEED_AUTO; 10072 } 10073 } 10074 } 10075 10076 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10077 length = phba->sli4_hba.max_cfg_param.max_xri - 10078 lpfc_sli4_get_els_iocb_cnt(phba); 10079 if (phba->cfg_hba_queue_depth > length) { 10080 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10081 "3361 HBA queue depth changed from %d to %d\n", 10082 phba->cfg_hba_queue_depth, length); 10083 phba->cfg_hba_queue_depth = length; 10084 } 10085 10086 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10087 LPFC_SLI_INTF_IF_TYPE_2) 10088 goto read_cfg_out; 10089 10090 /* get the pf# and vf# for SLI4 if_type 2 port */ 10091 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10092 sizeof(struct lpfc_sli4_cfg_mhdr)); 10093 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10094 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10095 length, LPFC_SLI4_MBX_EMBED); 10096 10097 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10098 shdr = (union lpfc_sli4_cfg_shdr *) 10099 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10100 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10101 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10102 if (rc2 || shdr_status || shdr_add_status) { 10103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10104 "3026 Mailbox failed , mbxCmd x%x " 10105 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10106 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10107 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10108 goto read_cfg_out; 10109 } 10110 10111 /* search for fc_fcoe resrouce descriptor */ 10112 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10113 10114 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10115 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10116 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10117 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10118 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10119 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10120 goto read_cfg_out; 10121 10122 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10123 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10124 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10125 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10126 phba->sli4_hba.iov.pf_number = 10127 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10128 phba->sli4_hba.iov.vf_number = 10129 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10130 break; 10131 } 10132 } 10133 10134 if (i < LPFC_RSRC_DESC_MAX_NUM) 10135 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10136 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10137 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10138 phba->sli4_hba.iov.vf_number); 10139 else 10140 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10141 "3028 GET_FUNCTION_CONFIG: failed to find " 10142 "Resource Descriptor:x%x\n", 10143 LPFC_RSRC_DESC_TYPE_FCFCOE); 10144 10145 read_cfg_out: 10146 mempool_free(pmb, phba->mbox_mem_pool); 10147 return rc; 10148 } 10149 10150 /** 10151 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10152 * @phba: pointer to lpfc hba data structure. 10153 * 10154 * This routine is invoked to setup the port-side endian order when 10155 * the port if_type is 0. This routine has no function for other 10156 * if_types. 10157 * 10158 * Return codes 10159 * 0 - successful 10160 * -ENOMEM - No available memory 10161 * -EIO - The mailbox failed to complete successfully. 10162 **/ 10163 static int 10164 lpfc_setup_endian_order(struct lpfc_hba *phba) 10165 { 10166 LPFC_MBOXQ_t *mboxq; 10167 uint32_t if_type, rc = 0; 10168 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10169 HOST_ENDIAN_HIGH_WORD1}; 10170 10171 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10172 switch (if_type) { 10173 case LPFC_SLI_INTF_IF_TYPE_0: 10174 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10175 GFP_KERNEL); 10176 if (!mboxq) { 10177 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10178 "0492 Unable to allocate memory for " 10179 "issuing SLI_CONFIG_SPECIAL mailbox " 10180 "command\n"); 10181 return -ENOMEM; 10182 } 10183 10184 /* 10185 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10186 * two words to contain special data values and no other data. 10187 */ 10188 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10189 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10190 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10191 if (rc != MBX_SUCCESS) { 10192 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10193 "0493 SLI_CONFIG_SPECIAL mailbox " 10194 "failed with status x%x\n", 10195 rc); 10196 rc = -EIO; 10197 } 10198 mempool_free(mboxq, phba->mbox_mem_pool); 10199 break; 10200 case LPFC_SLI_INTF_IF_TYPE_6: 10201 case LPFC_SLI_INTF_IF_TYPE_2: 10202 case LPFC_SLI_INTF_IF_TYPE_1: 10203 default: 10204 break; 10205 } 10206 return rc; 10207 } 10208 10209 /** 10210 * lpfc_sli4_queue_verify - Verify and update EQ counts 10211 * @phba: pointer to lpfc hba data structure. 10212 * 10213 * This routine is invoked to check the user settable queue counts for EQs. 10214 * After this routine is called the counts will be set to valid values that 10215 * adhere to the constraints of the system's interrupt vectors and the port's 10216 * queue resources. 10217 * 10218 * Return codes 10219 * 0 - successful 10220 * -ENOMEM - No available memory 10221 **/ 10222 static int 10223 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10224 { 10225 /* 10226 * Sanity check for configured queue parameters against the run-time 10227 * device parameters 10228 */ 10229 10230 if (phba->nvmet_support) { 10231 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10232 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10233 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10234 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10235 } 10236 10237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10238 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10239 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10240 phba->cfg_nvmet_mrq); 10241 10242 /* Get EQ depth from module parameter, fake the default for now */ 10243 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10244 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10245 10246 /* Get CQ depth from module parameter, fake the default for now */ 10247 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10248 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10249 return 0; 10250 } 10251 10252 static int 10253 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10254 { 10255 struct lpfc_queue *qdesc; 10256 u32 wqesize; 10257 int cpu; 10258 10259 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10260 /* Create Fast Path IO CQs */ 10261 if (phba->enab_exp_wqcq_pages) 10262 /* Increase the CQ size when WQEs contain an embedded cdb */ 10263 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10264 phba->sli4_hba.cq_esize, 10265 LPFC_CQE_EXP_COUNT, cpu); 10266 10267 else 10268 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10269 phba->sli4_hba.cq_esize, 10270 phba->sli4_hba.cq_ecount, cpu); 10271 if (!qdesc) { 10272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10273 "0499 Failed allocate fast-path IO CQ (%d)\n", 10274 idx); 10275 return 1; 10276 } 10277 qdesc->qe_valid = 1; 10278 qdesc->hdwq = idx; 10279 qdesc->chann = cpu; 10280 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10281 10282 /* Create Fast Path IO WQs */ 10283 if (phba->enab_exp_wqcq_pages) { 10284 /* Increase the WQ size when WQEs contain an embedded cdb */ 10285 wqesize = (phba->fcp_embed_io) ? 10286 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10287 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10288 wqesize, 10289 LPFC_WQE_EXP_COUNT, cpu); 10290 } else 10291 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10292 phba->sli4_hba.wq_esize, 10293 phba->sli4_hba.wq_ecount, cpu); 10294 10295 if (!qdesc) { 10296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10297 "0503 Failed allocate fast-path IO WQ (%d)\n", 10298 idx); 10299 return 1; 10300 } 10301 qdesc->hdwq = idx; 10302 qdesc->chann = cpu; 10303 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10304 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10305 return 0; 10306 } 10307 10308 /** 10309 * lpfc_sli4_queue_create - Create all the SLI4 queues 10310 * @phba: pointer to lpfc hba data structure. 10311 * 10312 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10313 * operation. For each SLI4 queue type, the parameters such as queue entry 10314 * count (queue depth) shall be taken from the module parameter. For now, 10315 * we just use some constant number as place holder. 10316 * 10317 * Return codes 10318 * 0 - successful 10319 * -ENOMEM - No availble memory 10320 * -EIO - The mailbox failed to complete successfully. 10321 **/ 10322 int 10323 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10324 { 10325 struct lpfc_queue *qdesc; 10326 int idx, cpu, eqcpu; 10327 struct lpfc_sli4_hdw_queue *qp; 10328 struct lpfc_vector_map_info *cpup; 10329 struct lpfc_vector_map_info *eqcpup; 10330 struct lpfc_eq_intr_info *eqi; 10331 10332 /* 10333 * Create HBA Record arrays. 10334 * Both NVME and FCP will share that same vectors / EQs 10335 */ 10336 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10337 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10338 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10339 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10340 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10341 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10342 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10343 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10344 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10345 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10346 10347 if (!phba->sli4_hba.hdwq) { 10348 phba->sli4_hba.hdwq = kcalloc( 10349 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10350 GFP_KERNEL); 10351 if (!phba->sli4_hba.hdwq) { 10352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10353 "6427 Failed allocate memory for " 10354 "fast-path Hardware Queue array\n"); 10355 goto out_error; 10356 } 10357 /* Prepare hardware queues to take IO buffers */ 10358 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10359 qp = &phba->sli4_hba.hdwq[idx]; 10360 spin_lock_init(&qp->io_buf_list_get_lock); 10361 spin_lock_init(&qp->io_buf_list_put_lock); 10362 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10363 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10364 qp->get_io_bufs = 0; 10365 qp->put_io_bufs = 0; 10366 qp->total_io_bufs = 0; 10367 spin_lock_init(&qp->abts_io_buf_list_lock); 10368 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10369 qp->abts_scsi_io_bufs = 0; 10370 qp->abts_nvme_io_bufs = 0; 10371 INIT_LIST_HEAD(&qp->sgl_list); 10372 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10373 spin_lock_init(&qp->hdwq_lock); 10374 } 10375 } 10376 10377 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10378 if (phba->nvmet_support) { 10379 phba->sli4_hba.nvmet_cqset = kcalloc( 10380 phba->cfg_nvmet_mrq, 10381 sizeof(struct lpfc_queue *), 10382 GFP_KERNEL); 10383 if (!phba->sli4_hba.nvmet_cqset) { 10384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10385 "3121 Fail allocate memory for " 10386 "fast-path CQ set array\n"); 10387 goto out_error; 10388 } 10389 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10390 phba->cfg_nvmet_mrq, 10391 sizeof(struct lpfc_queue *), 10392 GFP_KERNEL); 10393 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10395 "3122 Fail allocate memory for " 10396 "fast-path RQ set hdr array\n"); 10397 goto out_error; 10398 } 10399 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10400 phba->cfg_nvmet_mrq, 10401 sizeof(struct lpfc_queue *), 10402 GFP_KERNEL); 10403 if (!phba->sli4_hba.nvmet_mrq_data) { 10404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10405 "3124 Fail allocate memory for " 10406 "fast-path RQ set data array\n"); 10407 goto out_error; 10408 } 10409 } 10410 } 10411 10412 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10413 10414 /* Create HBA Event Queues (EQs) */ 10415 for_each_present_cpu(cpu) { 10416 /* We only want to create 1 EQ per vector, even though 10417 * multiple CPUs might be using that vector. so only 10418 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10419 */ 10420 cpup = &phba->sli4_hba.cpu_map[cpu]; 10421 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10422 continue; 10423 10424 /* Get a ptr to the Hardware Queue associated with this CPU */ 10425 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10426 10427 /* Allocate an EQ */ 10428 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10429 phba->sli4_hba.eq_esize, 10430 phba->sli4_hba.eq_ecount, cpu); 10431 if (!qdesc) { 10432 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10433 "0497 Failed allocate EQ (%d)\n", 10434 cpup->hdwq); 10435 goto out_error; 10436 } 10437 qdesc->qe_valid = 1; 10438 qdesc->hdwq = cpup->hdwq; 10439 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10440 qdesc->last_cpu = qdesc->chann; 10441 10442 /* Save the allocated EQ in the Hardware Queue */ 10443 qp->hba_eq = qdesc; 10444 10445 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10446 list_add(&qdesc->cpu_list, &eqi->list); 10447 } 10448 10449 /* Now we need to populate the other Hardware Queues, that share 10450 * an IRQ vector, with the associated EQ ptr. 10451 */ 10452 for_each_present_cpu(cpu) { 10453 cpup = &phba->sli4_hba.cpu_map[cpu]; 10454 10455 /* Check for EQ already allocated in previous loop */ 10456 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10457 continue; 10458 10459 /* Check for multiple CPUs per hdwq */ 10460 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10461 if (qp->hba_eq) 10462 continue; 10463 10464 /* We need to share an EQ for this hdwq */ 10465 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10466 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10467 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10468 } 10469 10470 /* Allocate IO Path SLI4 CQ/WQs */ 10471 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10472 if (lpfc_alloc_io_wq_cq(phba, idx)) 10473 goto out_error; 10474 } 10475 10476 if (phba->nvmet_support) { 10477 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10478 cpu = lpfc_find_cpu_handle(phba, idx, 10479 LPFC_FIND_BY_HDWQ); 10480 qdesc = lpfc_sli4_queue_alloc(phba, 10481 LPFC_DEFAULT_PAGE_SIZE, 10482 phba->sli4_hba.cq_esize, 10483 phba->sli4_hba.cq_ecount, 10484 cpu); 10485 if (!qdesc) { 10486 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10487 "3142 Failed allocate NVME " 10488 "CQ Set (%d)\n", idx); 10489 goto out_error; 10490 } 10491 qdesc->qe_valid = 1; 10492 qdesc->hdwq = idx; 10493 qdesc->chann = cpu; 10494 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10495 } 10496 } 10497 10498 /* 10499 * Create Slow Path Completion Queues (CQs) 10500 */ 10501 10502 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10503 /* Create slow-path Mailbox Command Complete Queue */ 10504 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10505 phba->sli4_hba.cq_esize, 10506 phba->sli4_hba.cq_ecount, cpu); 10507 if (!qdesc) { 10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10509 "0500 Failed allocate slow-path mailbox CQ\n"); 10510 goto out_error; 10511 } 10512 qdesc->qe_valid = 1; 10513 phba->sli4_hba.mbx_cq = qdesc; 10514 10515 /* Create slow-path ELS Complete Queue */ 10516 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10517 phba->sli4_hba.cq_esize, 10518 phba->sli4_hba.cq_ecount, cpu); 10519 if (!qdesc) { 10520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10521 "0501 Failed allocate slow-path ELS CQ\n"); 10522 goto out_error; 10523 } 10524 qdesc->qe_valid = 1; 10525 qdesc->chann = cpu; 10526 phba->sli4_hba.els_cq = qdesc; 10527 10528 10529 /* 10530 * Create Slow Path Work Queues (WQs) 10531 */ 10532 10533 /* Create Mailbox Command Queue */ 10534 10535 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10536 phba->sli4_hba.mq_esize, 10537 phba->sli4_hba.mq_ecount, cpu); 10538 if (!qdesc) { 10539 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10540 "0505 Failed allocate slow-path MQ\n"); 10541 goto out_error; 10542 } 10543 qdesc->chann = cpu; 10544 phba->sli4_hba.mbx_wq = qdesc; 10545 10546 /* 10547 * Create ELS Work Queues 10548 */ 10549 10550 /* Create slow-path ELS Work Queue */ 10551 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10552 phba->sli4_hba.wq_esize, 10553 phba->sli4_hba.wq_ecount, cpu); 10554 if (!qdesc) { 10555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10556 "0504 Failed allocate slow-path ELS WQ\n"); 10557 goto out_error; 10558 } 10559 qdesc->chann = cpu; 10560 phba->sli4_hba.els_wq = qdesc; 10561 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10562 10563 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10564 /* Create NVME LS Complete Queue */ 10565 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10566 phba->sli4_hba.cq_esize, 10567 phba->sli4_hba.cq_ecount, cpu); 10568 if (!qdesc) { 10569 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10570 "6079 Failed allocate NVME LS CQ\n"); 10571 goto out_error; 10572 } 10573 qdesc->chann = cpu; 10574 qdesc->qe_valid = 1; 10575 phba->sli4_hba.nvmels_cq = qdesc; 10576 10577 /* Create NVME LS Work Queue */ 10578 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10579 phba->sli4_hba.wq_esize, 10580 phba->sli4_hba.wq_ecount, cpu); 10581 if (!qdesc) { 10582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10583 "6080 Failed allocate NVME LS WQ\n"); 10584 goto out_error; 10585 } 10586 qdesc->chann = cpu; 10587 phba->sli4_hba.nvmels_wq = qdesc; 10588 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10589 } 10590 10591 /* 10592 * Create Receive Queue (RQ) 10593 */ 10594 10595 /* Create Receive Queue for header */ 10596 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10597 phba->sli4_hba.rq_esize, 10598 phba->sli4_hba.rq_ecount, cpu); 10599 if (!qdesc) { 10600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10601 "0506 Failed allocate receive HRQ\n"); 10602 goto out_error; 10603 } 10604 phba->sli4_hba.hdr_rq = qdesc; 10605 10606 /* Create Receive Queue for data */ 10607 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10608 phba->sli4_hba.rq_esize, 10609 phba->sli4_hba.rq_ecount, cpu); 10610 if (!qdesc) { 10611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10612 "0507 Failed allocate receive DRQ\n"); 10613 goto out_error; 10614 } 10615 phba->sli4_hba.dat_rq = qdesc; 10616 10617 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10618 phba->nvmet_support) { 10619 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10620 cpu = lpfc_find_cpu_handle(phba, idx, 10621 LPFC_FIND_BY_HDWQ); 10622 /* Create NVMET Receive Queue for header */ 10623 qdesc = lpfc_sli4_queue_alloc(phba, 10624 LPFC_DEFAULT_PAGE_SIZE, 10625 phba->sli4_hba.rq_esize, 10626 LPFC_NVMET_RQE_DEF_COUNT, 10627 cpu); 10628 if (!qdesc) { 10629 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10630 "3146 Failed allocate " 10631 "receive HRQ\n"); 10632 goto out_error; 10633 } 10634 qdesc->hdwq = idx; 10635 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10636 10637 /* Only needed for header of RQ pair */ 10638 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10639 GFP_KERNEL, 10640 cpu_to_node(cpu)); 10641 if (qdesc->rqbp == NULL) { 10642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10643 "6131 Failed allocate " 10644 "Header RQBP\n"); 10645 goto out_error; 10646 } 10647 10648 /* Put list in known state in case driver load fails. */ 10649 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10650 10651 /* Create NVMET Receive Queue for data */ 10652 qdesc = lpfc_sli4_queue_alloc(phba, 10653 LPFC_DEFAULT_PAGE_SIZE, 10654 phba->sli4_hba.rq_esize, 10655 LPFC_NVMET_RQE_DEF_COUNT, 10656 cpu); 10657 if (!qdesc) { 10658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10659 "3156 Failed allocate " 10660 "receive DRQ\n"); 10661 goto out_error; 10662 } 10663 qdesc->hdwq = idx; 10664 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10665 } 10666 } 10667 10668 /* Clear NVME stats */ 10669 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10670 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10671 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10672 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10673 } 10674 } 10675 10676 /* Clear SCSI stats */ 10677 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10678 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10679 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10680 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10681 } 10682 } 10683 10684 return 0; 10685 10686 out_error: 10687 lpfc_sli4_queue_destroy(phba); 10688 return -ENOMEM; 10689 } 10690 10691 static inline void 10692 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10693 { 10694 if (*qp != NULL) { 10695 lpfc_sli4_queue_free(*qp); 10696 *qp = NULL; 10697 } 10698 } 10699 10700 static inline void 10701 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10702 { 10703 int idx; 10704 10705 if (*qs == NULL) 10706 return; 10707 10708 for (idx = 0; idx < max; idx++) 10709 __lpfc_sli4_release_queue(&(*qs)[idx]); 10710 10711 kfree(*qs); 10712 *qs = NULL; 10713 } 10714 10715 static inline void 10716 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10717 { 10718 struct lpfc_sli4_hdw_queue *hdwq; 10719 struct lpfc_queue *eq; 10720 uint32_t idx; 10721 10722 hdwq = phba->sli4_hba.hdwq; 10723 10724 /* Loop thru all Hardware Queues */ 10725 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10726 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10727 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10728 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10729 hdwq[idx].hba_eq = NULL; 10730 hdwq[idx].io_cq = NULL; 10731 hdwq[idx].io_wq = NULL; 10732 if (phba->cfg_xpsgl && !phba->nvmet_support) 10733 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10734 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10735 } 10736 /* Loop thru all IRQ vectors */ 10737 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10738 /* Free the EQ corresponding to the IRQ vector */ 10739 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10740 lpfc_sli4_queue_free(eq); 10741 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10742 } 10743 } 10744 10745 /** 10746 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10747 * @phba: pointer to lpfc hba data structure. 10748 * 10749 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10750 * operation. 10751 * 10752 * Return codes 10753 * 0 - successful 10754 * -ENOMEM - No available memory 10755 * -EIO - The mailbox failed to complete successfully. 10756 **/ 10757 void 10758 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10759 { 10760 /* 10761 * Set FREE_INIT before beginning to free the queues. 10762 * Wait until the users of queues to acknowledge to 10763 * release queues by clearing FREE_WAIT. 10764 */ 10765 spin_lock_irq(&phba->hbalock); 10766 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10767 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10768 spin_unlock_irq(&phba->hbalock); 10769 msleep(20); 10770 spin_lock_irq(&phba->hbalock); 10771 } 10772 spin_unlock_irq(&phba->hbalock); 10773 10774 lpfc_sli4_cleanup_poll_list(phba); 10775 10776 /* Release HBA eqs */ 10777 if (phba->sli4_hba.hdwq) 10778 lpfc_sli4_release_hdwq(phba); 10779 10780 if (phba->nvmet_support) { 10781 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10782 phba->cfg_nvmet_mrq); 10783 10784 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10785 phba->cfg_nvmet_mrq); 10786 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10787 phba->cfg_nvmet_mrq); 10788 } 10789 10790 /* Release mailbox command work queue */ 10791 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10792 10793 /* Release ELS work queue */ 10794 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10795 10796 /* Release ELS work queue */ 10797 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10798 10799 /* Release unsolicited receive queue */ 10800 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10801 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10802 10803 /* Release ELS complete queue */ 10804 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10805 10806 /* Release NVME LS complete queue */ 10807 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10808 10809 /* Release mailbox command complete queue */ 10810 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10811 10812 /* Everything on this list has been freed */ 10813 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10814 10815 /* Done with freeing the queues */ 10816 spin_lock_irq(&phba->hbalock); 10817 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10818 spin_unlock_irq(&phba->hbalock); 10819 } 10820 10821 int 10822 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10823 { 10824 struct lpfc_rqb *rqbp; 10825 struct lpfc_dmabuf *h_buf; 10826 struct rqb_dmabuf *rqb_buffer; 10827 10828 rqbp = rq->rqbp; 10829 while (!list_empty(&rqbp->rqb_buffer_list)) { 10830 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10831 struct lpfc_dmabuf, list); 10832 10833 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10834 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10835 rqbp->buffer_count--; 10836 } 10837 return 1; 10838 } 10839 10840 static int 10841 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10842 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10843 int qidx, uint32_t qtype) 10844 { 10845 struct lpfc_sli_ring *pring; 10846 int rc; 10847 10848 if (!eq || !cq || !wq) { 10849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10850 "6085 Fast-path %s (%d) not allocated\n", 10851 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10852 return -ENOMEM; 10853 } 10854 10855 /* create the Cq first */ 10856 rc = lpfc_cq_create(phba, cq, eq, 10857 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10858 if (rc) { 10859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10860 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10861 qidx, (uint32_t)rc); 10862 return rc; 10863 } 10864 10865 if (qtype != LPFC_MBOX) { 10866 /* Setup cq_map for fast lookup */ 10867 if (cq_map) 10868 *cq_map = cq->queue_id; 10869 10870 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10871 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10872 qidx, cq->queue_id, qidx, eq->queue_id); 10873 10874 /* create the wq */ 10875 rc = lpfc_wq_create(phba, wq, cq, qtype); 10876 if (rc) { 10877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10878 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 10879 qidx, (uint32_t)rc); 10880 /* no need to tear down cq - caller will do so */ 10881 return rc; 10882 } 10883 10884 /* Bind this CQ/WQ to the NVME ring */ 10885 pring = wq->pring; 10886 pring->sli.sli4.wqp = (void *)wq; 10887 cq->pring = pring; 10888 10889 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10890 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 10891 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 10892 } else { 10893 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 10894 if (rc) { 10895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10896 "0539 Failed setup of slow-path MQ: " 10897 "rc = 0x%x\n", rc); 10898 /* no need to tear down cq - caller will do so */ 10899 return rc; 10900 } 10901 10902 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10903 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 10904 phba->sli4_hba.mbx_wq->queue_id, 10905 phba->sli4_hba.mbx_cq->queue_id); 10906 } 10907 10908 return 0; 10909 } 10910 10911 /** 10912 * lpfc_setup_cq_lookup - Setup the CQ lookup table 10913 * @phba: pointer to lpfc hba data structure. 10914 * 10915 * This routine will populate the cq_lookup table by all 10916 * available CQ queue_id's. 10917 **/ 10918 static void 10919 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 10920 { 10921 struct lpfc_queue *eq, *childq; 10922 int qidx; 10923 10924 memset(phba->sli4_hba.cq_lookup, 0, 10925 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 10926 /* Loop thru all IRQ vectors */ 10927 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10928 /* Get the EQ corresponding to the IRQ vector */ 10929 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10930 if (!eq) 10931 continue; 10932 /* Loop through all CQs associated with that EQ */ 10933 list_for_each_entry(childq, &eq->child_list, list) { 10934 if (childq->queue_id > phba->sli4_hba.cq_max) 10935 continue; 10936 if (childq->subtype == LPFC_IO) 10937 phba->sli4_hba.cq_lookup[childq->queue_id] = 10938 childq; 10939 } 10940 } 10941 } 10942 10943 /** 10944 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 10945 * @phba: pointer to lpfc hba data structure. 10946 * 10947 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 10948 * operation. 10949 * 10950 * Return codes 10951 * 0 - successful 10952 * -ENOMEM - No available memory 10953 * -EIO - The mailbox failed to complete successfully. 10954 **/ 10955 int 10956 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 10957 { 10958 uint32_t shdr_status, shdr_add_status; 10959 union lpfc_sli4_cfg_shdr *shdr; 10960 struct lpfc_vector_map_info *cpup; 10961 struct lpfc_sli4_hdw_queue *qp; 10962 LPFC_MBOXQ_t *mboxq; 10963 int qidx, cpu; 10964 uint32_t length, usdelay; 10965 int rc = -ENOMEM; 10966 10967 /* Check for dual-ULP support */ 10968 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10969 if (!mboxq) { 10970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10971 "3249 Unable to allocate memory for " 10972 "QUERY_FW_CFG mailbox command\n"); 10973 return -ENOMEM; 10974 } 10975 length = (sizeof(struct lpfc_mbx_query_fw_config) - 10976 sizeof(struct lpfc_sli4_cfg_mhdr)); 10977 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10978 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 10979 length, LPFC_SLI4_MBX_EMBED); 10980 10981 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10982 10983 shdr = (union lpfc_sli4_cfg_shdr *) 10984 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10985 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10986 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10987 if (shdr_status || shdr_add_status || rc) { 10988 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10989 "3250 QUERY_FW_CFG mailbox failed with status " 10990 "x%x add_status x%x, mbx status x%x\n", 10991 shdr_status, shdr_add_status, rc); 10992 mempool_free(mboxq, phba->mbox_mem_pool); 10993 rc = -ENXIO; 10994 goto out_error; 10995 } 10996 10997 phba->sli4_hba.fw_func_mode = 10998 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 10999 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 11000 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 11001 phba->sli4_hba.physical_port = 11002 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 11003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11004 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 11005 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 11006 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 11007 11008 mempool_free(mboxq, phba->mbox_mem_pool); 11009 11010 /* 11011 * Set up HBA Event Queues (EQs) 11012 */ 11013 qp = phba->sli4_hba.hdwq; 11014 11015 /* Set up HBA event queue */ 11016 if (!qp) { 11017 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11018 "3147 Fast-path EQs not allocated\n"); 11019 rc = -ENOMEM; 11020 goto out_error; 11021 } 11022 11023 /* Loop thru all IRQ vectors */ 11024 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11025 /* Create HBA Event Queues (EQs) in order */ 11026 for_each_present_cpu(cpu) { 11027 cpup = &phba->sli4_hba.cpu_map[cpu]; 11028 11029 /* Look for the CPU thats using that vector with 11030 * LPFC_CPU_FIRST_IRQ set. 11031 */ 11032 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11033 continue; 11034 if (qidx != cpup->eq) 11035 continue; 11036 11037 /* Create an EQ for that vector */ 11038 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 11039 phba->cfg_fcp_imax); 11040 if (rc) { 11041 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11042 "0523 Failed setup of fast-path" 11043 " EQ (%d), rc = 0x%x\n", 11044 cpup->eq, (uint32_t)rc); 11045 goto out_destroy; 11046 } 11047 11048 /* Save the EQ for that vector in the hba_eq_hdl */ 11049 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 11050 qp[cpup->hdwq].hba_eq; 11051 11052 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11053 "2584 HBA EQ setup: queue[%d]-id=%d\n", 11054 cpup->eq, 11055 qp[cpup->hdwq].hba_eq->queue_id); 11056 } 11057 } 11058 11059 /* Loop thru all Hardware Queues */ 11060 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11061 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 11062 cpup = &phba->sli4_hba.cpu_map[cpu]; 11063 11064 /* Create the CQ/WQ corresponding to the Hardware Queue */ 11065 rc = lpfc_create_wq_cq(phba, 11066 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 11067 qp[qidx].io_cq, 11068 qp[qidx].io_wq, 11069 &phba->sli4_hba.hdwq[qidx].io_cq_map, 11070 qidx, 11071 LPFC_IO); 11072 if (rc) { 11073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11074 "0535 Failed to setup fastpath " 11075 "IO WQ/CQ (%d), rc = 0x%x\n", 11076 qidx, (uint32_t)rc); 11077 goto out_destroy; 11078 } 11079 } 11080 11081 /* 11082 * Set up Slow Path Complete Queues (CQs) 11083 */ 11084 11085 /* Set up slow-path MBOX CQ/MQ */ 11086 11087 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11089 "0528 %s not allocated\n", 11090 phba->sli4_hba.mbx_cq ? 11091 "Mailbox WQ" : "Mailbox CQ"); 11092 rc = -ENOMEM; 11093 goto out_destroy; 11094 } 11095 11096 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11097 phba->sli4_hba.mbx_cq, 11098 phba->sli4_hba.mbx_wq, 11099 NULL, 0, LPFC_MBOX); 11100 if (rc) { 11101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11102 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11103 (uint32_t)rc); 11104 goto out_destroy; 11105 } 11106 if (phba->nvmet_support) { 11107 if (!phba->sli4_hba.nvmet_cqset) { 11108 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11109 "3165 Fast-path NVME CQ Set " 11110 "array not allocated\n"); 11111 rc = -ENOMEM; 11112 goto out_destroy; 11113 } 11114 if (phba->cfg_nvmet_mrq > 1) { 11115 rc = lpfc_cq_create_set(phba, 11116 phba->sli4_hba.nvmet_cqset, 11117 qp, 11118 LPFC_WCQ, LPFC_NVMET); 11119 if (rc) { 11120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11121 "3164 Failed setup of NVME CQ " 11122 "Set, rc = 0x%x\n", 11123 (uint32_t)rc); 11124 goto out_destroy; 11125 } 11126 } else { 11127 /* Set up NVMET Receive Complete Queue */ 11128 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11129 qp[0].hba_eq, 11130 LPFC_WCQ, LPFC_NVMET); 11131 if (rc) { 11132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11133 "6089 Failed setup NVMET CQ: " 11134 "rc = 0x%x\n", (uint32_t)rc); 11135 goto out_destroy; 11136 } 11137 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11138 11139 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11140 "6090 NVMET CQ setup: cq-id=%d, " 11141 "parent eq-id=%d\n", 11142 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11143 qp[0].hba_eq->queue_id); 11144 } 11145 } 11146 11147 /* Set up slow-path ELS WQ/CQ */ 11148 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11150 "0530 ELS %s not allocated\n", 11151 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11152 rc = -ENOMEM; 11153 goto out_destroy; 11154 } 11155 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11156 phba->sli4_hba.els_cq, 11157 phba->sli4_hba.els_wq, 11158 NULL, 0, LPFC_ELS); 11159 if (rc) { 11160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11161 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11162 (uint32_t)rc); 11163 goto out_destroy; 11164 } 11165 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11166 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11167 phba->sli4_hba.els_wq->queue_id, 11168 phba->sli4_hba.els_cq->queue_id); 11169 11170 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11171 /* Set up NVME LS Complete Queue */ 11172 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11174 "6091 LS %s not allocated\n", 11175 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11176 rc = -ENOMEM; 11177 goto out_destroy; 11178 } 11179 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11180 phba->sli4_hba.nvmels_cq, 11181 phba->sli4_hba.nvmels_wq, 11182 NULL, 0, LPFC_NVME_LS); 11183 if (rc) { 11184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11185 "0526 Failed setup of NVVME LS WQ/CQ: " 11186 "rc = 0x%x\n", (uint32_t)rc); 11187 goto out_destroy; 11188 } 11189 11190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11191 "6096 ELS WQ setup: wq-id=%d, " 11192 "parent cq-id=%d\n", 11193 phba->sli4_hba.nvmels_wq->queue_id, 11194 phba->sli4_hba.nvmels_cq->queue_id); 11195 } 11196 11197 /* 11198 * Create NVMET Receive Queue (RQ) 11199 */ 11200 if (phba->nvmet_support) { 11201 if ((!phba->sli4_hba.nvmet_cqset) || 11202 (!phba->sli4_hba.nvmet_mrq_hdr) || 11203 (!phba->sli4_hba.nvmet_mrq_data)) { 11204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11205 "6130 MRQ CQ Queues not " 11206 "allocated\n"); 11207 rc = -ENOMEM; 11208 goto out_destroy; 11209 } 11210 if (phba->cfg_nvmet_mrq > 1) { 11211 rc = lpfc_mrq_create(phba, 11212 phba->sli4_hba.nvmet_mrq_hdr, 11213 phba->sli4_hba.nvmet_mrq_data, 11214 phba->sli4_hba.nvmet_cqset, 11215 LPFC_NVMET); 11216 if (rc) { 11217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11218 "6098 Failed setup of NVMET " 11219 "MRQ: rc = 0x%x\n", 11220 (uint32_t)rc); 11221 goto out_destroy; 11222 } 11223 11224 } else { 11225 rc = lpfc_rq_create(phba, 11226 phba->sli4_hba.nvmet_mrq_hdr[0], 11227 phba->sli4_hba.nvmet_mrq_data[0], 11228 phba->sli4_hba.nvmet_cqset[0], 11229 LPFC_NVMET); 11230 if (rc) { 11231 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11232 "6057 Failed setup of NVMET " 11233 "Receive Queue: rc = 0x%x\n", 11234 (uint32_t)rc); 11235 goto out_destroy; 11236 } 11237 11238 lpfc_printf_log( 11239 phba, KERN_INFO, LOG_INIT, 11240 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11241 "dat-rq-id=%d parent cq-id=%d\n", 11242 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11243 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11244 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11245 11246 } 11247 } 11248 11249 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11251 "0540 Receive Queue not allocated\n"); 11252 rc = -ENOMEM; 11253 goto out_destroy; 11254 } 11255 11256 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11257 phba->sli4_hba.els_cq, LPFC_USOL); 11258 if (rc) { 11259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11260 "0541 Failed setup of Receive Queue: " 11261 "rc = 0x%x\n", (uint32_t)rc); 11262 goto out_destroy; 11263 } 11264 11265 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11266 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11267 "parent cq-id=%d\n", 11268 phba->sli4_hba.hdr_rq->queue_id, 11269 phba->sli4_hba.dat_rq->queue_id, 11270 phba->sli4_hba.els_cq->queue_id); 11271 11272 if (phba->cfg_fcp_imax) 11273 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11274 else 11275 usdelay = 0; 11276 11277 for (qidx = 0; qidx < phba->cfg_irq_chann; 11278 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11279 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11280 usdelay); 11281 11282 if (phba->sli4_hba.cq_max) { 11283 kfree(phba->sli4_hba.cq_lookup); 11284 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11285 sizeof(struct lpfc_queue *), GFP_KERNEL); 11286 if (!phba->sli4_hba.cq_lookup) { 11287 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11288 "0549 Failed setup of CQ Lookup table: " 11289 "size 0x%x\n", phba->sli4_hba.cq_max); 11290 rc = -ENOMEM; 11291 goto out_destroy; 11292 } 11293 lpfc_setup_cq_lookup(phba); 11294 } 11295 return 0; 11296 11297 out_destroy: 11298 lpfc_sli4_queue_unset(phba); 11299 out_error: 11300 return rc; 11301 } 11302 11303 /** 11304 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11305 * @phba: pointer to lpfc hba data structure. 11306 * 11307 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11308 * operation. 11309 * 11310 * Return codes 11311 * 0 - successful 11312 * -ENOMEM - No available memory 11313 * -EIO - The mailbox failed to complete successfully. 11314 **/ 11315 void 11316 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11317 { 11318 struct lpfc_sli4_hdw_queue *qp; 11319 struct lpfc_queue *eq; 11320 int qidx; 11321 11322 /* Unset mailbox command work queue */ 11323 if (phba->sli4_hba.mbx_wq) 11324 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11325 11326 /* Unset NVME LS work queue */ 11327 if (phba->sli4_hba.nvmels_wq) 11328 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11329 11330 /* Unset ELS work queue */ 11331 if (phba->sli4_hba.els_wq) 11332 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11333 11334 /* Unset unsolicited receive queue */ 11335 if (phba->sli4_hba.hdr_rq) 11336 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11337 phba->sli4_hba.dat_rq); 11338 11339 /* Unset mailbox command complete queue */ 11340 if (phba->sli4_hba.mbx_cq) 11341 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11342 11343 /* Unset ELS complete queue */ 11344 if (phba->sli4_hba.els_cq) 11345 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11346 11347 /* Unset NVME LS complete queue */ 11348 if (phba->sli4_hba.nvmels_cq) 11349 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11350 11351 if (phba->nvmet_support) { 11352 /* Unset NVMET MRQ queue */ 11353 if (phba->sli4_hba.nvmet_mrq_hdr) { 11354 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11355 lpfc_rq_destroy( 11356 phba, 11357 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11358 phba->sli4_hba.nvmet_mrq_data[qidx]); 11359 } 11360 11361 /* Unset NVMET CQ Set complete queue */ 11362 if (phba->sli4_hba.nvmet_cqset) { 11363 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11364 lpfc_cq_destroy( 11365 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11366 } 11367 } 11368 11369 /* Unset fast-path SLI4 queues */ 11370 if (phba->sli4_hba.hdwq) { 11371 /* Loop thru all Hardware Queues */ 11372 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11373 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11374 qp = &phba->sli4_hba.hdwq[qidx]; 11375 lpfc_wq_destroy(phba, qp->io_wq); 11376 lpfc_cq_destroy(phba, qp->io_cq); 11377 } 11378 /* Loop thru all IRQ vectors */ 11379 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11380 /* Destroy the EQ corresponding to the IRQ vector */ 11381 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11382 lpfc_eq_destroy(phba, eq); 11383 } 11384 } 11385 11386 kfree(phba->sli4_hba.cq_lookup); 11387 phba->sli4_hba.cq_lookup = NULL; 11388 phba->sli4_hba.cq_max = 0; 11389 } 11390 11391 /** 11392 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11393 * @phba: pointer to lpfc hba data structure. 11394 * 11395 * This routine is invoked to allocate and set up a pool of completion queue 11396 * events. The body of the completion queue event is a completion queue entry 11397 * CQE. For now, this pool is used for the interrupt service routine to queue 11398 * the following HBA completion queue events for the worker thread to process: 11399 * - Mailbox asynchronous events 11400 * - Receive queue completion unsolicited events 11401 * Later, this can be used for all the slow-path events. 11402 * 11403 * Return codes 11404 * 0 - successful 11405 * -ENOMEM - No available memory 11406 **/ 11407 static int 11408 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11409 { 11410 struct lpfc_cq_event *cq_event; 11411 int i; 11412 11413 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11414 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11415 if (!cq_event) 11416 goto out_pool_create_fail; 11417 list_add_tail(&cq_event->list, 11418 &phba->sli4_hba.sp_cqe_event_pool); 11419 } 11420 return 0; 11421 11422 out_pool_create_fail: 11423 lpfc_sli4_cq_event_pool_destroy(phba); 11424 return -ENOMEM; 11425 } 11426 11427 /** 11428 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11429 * @phba: pointer to lpfc hba data structure. 11430 * 11431 * This routine is invoked to free the pool of completion queue events at 11432 * driver unload time. Note that, it is the responsibility of the driver 11433 * cleanup routine to free all the outstanding completion-queue events 11434 * allocated from this pool back into the pool before invoking this routine 11435 * to destroy the pool. 11436 **/ 11437 static void 11438 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11439 { 11440 struct lpfc_cq_event *cq_event, *next_cq_event; 11441 11442 list_for_each_entry_safe(cq_event, next_cq_event, 11443 &phba->sli4_hba.sp_cqe_event_pool, list) { 11444 list_del(&cq_event->list); 11445 kfree(cq_event); 11446 } 11447 } 11448 11449 /** 11450 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11451 * @phba: pointer to lpfc hba data structure. 11452 * 11453 * This routine is the lock free version of the API invoked to allocate a 11454 * completion-queue event from the free pool. 11455 * 11456 * Return: Pointer to the newly allocated completion-queue event if successful 11457 * NULL otherwise. 11458 **/ 11459 struct lpfc_cq_event * 11460 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11461 { 11462 struct lpfc_cq_event *cq_event = NULL; 11463 11464 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11465 struct lpfc_cq_event, list); 11466 return cq_event; 11467 } 11468 11469 /** 11470 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11471 * @phba: pointer to lpfc hba data structure. 11472 * 11473 * This routine is the lock version of the API invoked to allocate a 11474 * completion-queue event from the free pool. 11475 * 11476 * Return: Pointer to the newly allocated completion-queue event if successful 11477 * NULL otherwise. 11478 **/ 11479 struct lpfc_cq_event * 11480 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11481 { 11482 struct lpfc_cq_event *cq_event; 11483 unsigned long iflags; 11484 11485 spin_lock_irqsave(&phba->hbalock, iflags); 11486 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11487 spin_unlock_irqrestore(&phba->hbalock, iflags); 11488 return cq_event; 11489 } 11490 11491 /** 11492 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11493 * @phba: pointer to lpfc hba data structure. 11494 * @cq_event: pointer to the completion queue event to be freed. 11495 * 11496 * This routine is the lock free version of the API invoked to release a 11497 * completion-queue event back into the free pool. 11498 **/ 11499 void 11500 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11501 struct lpfc_cq_event *cq_event) 11502 { 11503 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11504 } 11505 11506 /** 11507 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11508 * @phba: pointer to lpfc hba data structure. 11509 * @cq_event: pointer to the completion queue event to be freed. 11510 * 11511 * This routine is the lock version of the API invoked to release a 11512 * completion-queue event back into the free pool. 11513 **/ 11514 void 11515 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11516 struct lpfc_cq_event *cq_event) 11517 { 11518 unsigned long iflags; 11519 spin_lock_irqsave(&phba->hbalock, iflags); 11520 __lpfc_sli4_cq_event_release(phba, cq_event); 11521 spin_unlock_irqrestore(&phba->hbalock, iflags); 11522 } 11523 11524 /** 11525 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11526 * @phba: pointer to lpfc hba data structure. 11527 * 11528 * This routine is to free all the pending completion-queue events to the 11529 * back into the free pool for device reset. 11530 **/ 11531 static void 11532 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11533 { 11534 LIST_HEAD(cq_event_list); 11535 struct lpfc_cq_event *cq_event; 11536 unsigned long iflags; 11537 11538 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11539 11540 /* Pending ELS XRI abort events */ 11541 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11542 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11543 &cq_event_list); 11544 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11545 11546 /* Pending asynnc events */ 11547 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11548 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11549 &cq_event_list); 11550 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11551 11552 while (!list_empty(&cq_event_list)) { 11553 list_remove_head(&cq_event_list, cq_event, 11554 struct lpfc_cq_event, list); 11555 lpfc_sli4_cq_event_release(phba, cq_event); 11556 } 11557 } 11558 11559 /** 11560 * lpfc_pci_function_reset - Reset pci function. 11561 * @phba: pointer to lpfc hba data structure. 11562 * 11563 * This routine is invoked to request a PCI function reset. It will destroys 11564 * all resources assigned to the PCI function which originates this request. 11565 * 11566 * Return codes 11567 * 0 - successful 11568 * -ENOMEM - No available memory 11569 * -EIO - The mailbox failed to complete successfully. 11570 **/ 11571 int 11572 lpfc_pci_function_reset(struct lpfc_hba *phba) 11573 { 11574 LPFC_MBOXQ_t *mboxq; 11575 uint32_t rc = 0, if_type; 11576 uint32_t shdr_status, shdr_add_status; 11577 uint32_t rdy_chk; 11578 uint32_t port_reset = 0; 11579 union lpfc_sli4_cfg_shdr *shdr; 11580 struct lpfc_register reg_data; 11581 uint16_t devid; 11582 11583 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11584 switch (if_type) { 11585 case LPFC_SLI_INTF_IF_TYPE_0: 11586 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11587 GFP_KERNEL); 11588 if (!mboxq) { 11589 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11590 "0494 Unable to allocate memory for " 11591 "issuing SLI_FUNCTION_RESET mailbox " 11592 "command\n"); 11593 return -ENOMEM; 11594 } 11595 11596 /* Setup PCI function reset mailbox-ioctl command */ 11597 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11598 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11599 LPFC_SLI4_MBX_EMBED); 11600 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11601 shdr = (union lpfc_sli4_cfg_shdr *) 11602 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11603 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11604 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11605 &shdr->response); 11606 mempool_free(mboxq, phba->mbox_mem_pool); 11607 if (shdr_status || shdr_add_status || rc) { 11608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11609 "0495 SLI_FUNCTION_RESET mailbox " 11610 "failed with status x%x add_status x%x," 11611 " mbx status x%x\n", 11612 shdr_status, shdr_add_status, rc); 11613 rc = -ENXIO; 11614 } 11615 break; 11616 case LPFC_SLI_INTF_IF_TYPE_2: 11617 case LPFC_SLI_INTF_IF_TYPE_6: 11618 wait: 11619 /* 11620 * Poll the Port Status Register and wait for RDY for 11621 * up to 30 seconds. If the port doesn't respond, treat 11622 * it as an error. 11623 */ 11624 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11625 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11626 STATUSregaddr, ®_data.word0)) { 11627 rc = -ENODEV; 11628 goto out; 11629 } 11630 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11631 break; 11632 msleep(20); 11633 } 11634 11635 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11636 phba->work_status[0] = readl( 11637 phba->sli4_hba.u.if_type2.ERR1regaddr); 11638 phba->work_status[1] = readl( 11639 phba->sli4_hba.u.if_type2.ERR2regaddr); 11640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11641 "2890 Port not ready, port status reg " 11642 "0x%x error 1=0x%x, error 2=0x%x\n", 11643 reg_data.word0, 11644 phba->work_status[0], 11645 phba->work_status[1]); 11646 rc = -ENODEV; 11647 goto out; 11648 } 11649 11650 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11651 lpfc_pldv_detect = true; 11652 11653 if (!port_reset) { 11654 /* 11655 * Reset the port now 11656 */ 11657 reg_data.word0 = 0; 11658 bf_set(lpfc_sliport_ctrl_end, ®_data, 11659 LPFC_SLIPORT_LITTLE_ENDIAN); 11660 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11661 LPFC_SLIPORT_INIT_PORT); 11662 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11663 CTRLregaddr); 11664 /* flush */ 11665 pci_read_config_word(phba->pcidev, 11666 PCI_DEVICE_ID, &devid); 11667 11668 port_reset = 1; 11669 msleep(20); 11670 goto wait; 11671 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11672 rc = -ENODEV; 11673 goto out; 11674 } 11675 break; 11676 11677 case LPFC_SLI_INTF_IF_TYPE_1: 11678 default: 11679 break; 11680 } 11681 11682 out: 11683 /* Catch the not-ready port failure after a port reset. */ 11684 if (rc) { 11685 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11686 "3317 HBA not functional: IP Reset Failed " 11687 "try: echo fw_reset > board_mode\n"); 11688 rc = -ENODEV; 11689 } 11690 11691 return rc; 11692 } 11693 11694 /** 11695 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11696 * @phba: pointer to lpfc hba data structure. 11697 * 11698 * This routine is invoked to set up the PCI device memory space for device 11699 * with SLI-4 interface spec. 11700 * 11701 * Return codes 11702 * 0 - successful 11703 * other values - error 11704 **/ 11705 static int 11706 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11707 { 11708 struct pci_dev *pdev = phba->pcidev; 11709 unsigned long bar0map_len, bar1map_len, bar2map_len; 11710 int error; 11711 uint32_t if_type; 11712 11713 if (!pdev) 11714 return -ENODEV; 11715 11716 /* Set the device DMA mask size */ 11717 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11718 if (error) 11719 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11720 if (error) 11721 return error; 11722 11723 /* 11724 * The BARs and register set definitions and offset locations are 11725 * dependent on the if_type. 11726 */ 11727 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11728 &phba->sli4_hba.sli_intf.word0)) { 11729 return -ENODEV; 11730 } 11731 11732 /* There is no SLI3 failback for SLI4 devices. */ 11733 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11734 LPFC_SLI_INTF_VALID) { 11735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11736 "2894 SLI_INTF reg contents invalid " 11737 "sli_intf reg 0x%x\n", 11738 phba->sli4_hba.sli_intf.word0); 11739 return -ENODEV; 11740 } 11741 11742 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11743 /* 11744 * Get the bus address of SLI4 device Bar regions and the 11745 * number of bytes required by each mapping. The mapping of the 11746 * particular PCI BARs regions is dependent on the type of 11747 * SLI4 device. 11748 */ 11749 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11750 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11751 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11752 11753 /* 11754 * Map SLI4 PCI Config Space Register base to a kernel virtual 11755 * addr 11756 */ 11757 phba->sli4_hba.conf_regs_memmap_p = 11758 ioremap(phba->pci_bar0_map, bar0map_len); 11759 if (!phba->sli4_hba.conf_regs_memmap_p) { 11760 dev_printk(KERN_ERR, &pdev->dev, 11761 "ioremap failed for SLI4 PCI config " 11762 "registers.\n"); 11763 return -ENODEV; 11764 } 11765 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11766 /* Set up BAR0 PCI config space register memory map */ 11767 lpfc_sli4_bar0_register_memmap(phba, if_type); 11768 } else { 11769 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11770 bar0map_len = pci_resource_len(pdev, 1); 11771 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11772 dev_printk(KERN_ERR, &pdev->dev, 11773 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11774 return -ENODEV; 11775 } 11776 phba->sli4_hba.conf_regs_memmap_p = 11777 ioremap(phba->pci_bar0_map, bar0map_len); 11778 if (!phba->sli4_hba.conf_regs_memmap_p) { 11779 dev_printk(KERN_ERR, &pdev->dev, 11780 "ioremap failed for SLI4 PCI config " 11781 "registers.\n"); 11782 return -ENODEV; 11783 } 11784 lpfc_sli4_bar0_register_memmap(phba, if_type); 11785 } 11786 11787 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11788 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11789 /* 11790 * Map SLI4 if type 0 HBA Control Register base to a 11791 * kernel virtual address and setup the registers. 11792 */ 11793 phba->pci_bar1_map = pci_resource_start(pdev, 11794 PCI_64BIT_BAR2); 11795 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11796 phba->sli4_hba.ctrl_regs_memmap_p = 11797 ioremap(phba->pci_bar1_map, 11798 bar1map_len); 11799 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11800 dev_err(&pdev->dev, 11801 "ioremap failed for SLI4 HBA " 11802 "control registers.\n"); 11803 error = -ENOMEM; 11804 goto out_iounmap_conf; 11805 } 11806 phba->pci_bar2_memmap_p = 11807 phba->sli4_hba.ctrl_regs_memmap_p; 11808 lpfc_sli4_bar1_register_memmap(phba, if_type); 11809 } else { 11810 error = -ENOMEM; 11811 goto out_iounmap_conf; 11812 } 11813 } 11814 11815 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11816 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11817 /* 11818 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11819 * virtual address and setup the registers. 11820 */ 11821 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11822 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11823 phba->sli4_hba.drbl_regs_memmap_p = 11824 ioremap(phba->pci_bar1_map, bar1map_len); 11825 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11826 dev_err(&pdev->dev, 11827 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11828 error = -ENOMEM; 11829 goto out_iounmap_conf; 11830 } 11831 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11832 lpfc_sli4_bar1_register_memmap(phba, if_type); 11833 } 11834 11835 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11836 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11837 /* 11838 * Map SLI4 if type 0 HBA Doorbell Register base to 11839 * a kernel virtual address and setup the registers. 11840 */ 11841 phba->pci_bar2_map = pci_resource_start(pdev, 11842 PCI_64BIT_BAR4); 11843 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11844 phba->sli4_hba.drbl_regs_memmap_p = 11845 ioremap(phba->pci_bar2_map, 11846 bar2map_len); 11847 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11848 dev_err(&pdev->dev, 11849 "ioremap failed for SLI4 HBA" 11850 " doorbell registers.\n"); 11851 error = -ENOMEM; 11852 goto out_iounmap_ctrl; 11853 } 11854 phba->pci_bar4_memmap_p = 11855 phba->sli4_hba.drbl_regs_memmap_p; 11856 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11857 if (error) 11858 goto out_iounmap_all; 11859 } else { 11860 error = -ENOMEM; 11861 goto out_iounmap_all; 11862 } 11863 } 11864 11865 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11866 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11867 /* 11868 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11869 * virtual address and setup the registers. 11870 */ 11871 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11872 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11873 phba->sli4_hba.dpp_regs_memmap_p = 11874 ioremap(phba->pci_bar2_map, bar2map_len); 11875 if (!phba->sli4_hba.dpp_regs_memmap_p) { 11876 dev_err(&pdev->dev, 11877 "ioremap failed for SLI4 HBA dpp registers.\n"); 11878 error = -ENOMEM; 11879 goto out_iounmap_ctrl; 11880 } 11881 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 11882 } 11883 11884 /* Set up the EQ/CQ register handeling functions now */ 11885 switch (if_type) { 11886 case LPFC_SLI_INTF_IF_TYPE_0: 11887 case LPFC_SLI_INTF_IF_TYPE_2: 11888 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 11889 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 11890 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 11891 break; 11892 case LPFC_SLI_INTF_IF_TYPE_6: 11893 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 11894 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 11895 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 11896 break; 11897 default: 11898 break; 11899 } 11900 11901 return 0; 11902 11903 out_iounmap_all: 11904 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11905 out_iounmap_ctrl: 11906 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11907 out_iounmap_conf: 11908 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11909 11910 return error; 11911 } 11912 11913 /** 11914 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 11915 * @phba: pointer to lpfc hba data structure. 11916 * 11917 * This routine is invoked to unset the PCI device memory space for device 11918 * with SLI-4 interface spec. 11919 **/ 11920 static void 11921 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 11922 { 11923 uint32_t if_type; 11924 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11925 11926 switch (if_type) { 11927 case LPFC_SLI_INTF_IF_TYPE_0: 11928 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11929 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11930 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11931 break; 11932 case LPFC_SLI_INTF_IF_TYPE_2: 11933 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11934 break; 11935 case LPFC_SLI_INTF_IF_TYPE_6: 11936 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11937 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11938 if (phba->sli4_hba.dpp_regs_memmap_p) 11939 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 11940 break; 11941 case LPFC_SLI_INTF_IF_TYPE_1: 11942 default: 11943 dev_printk(KERN_ERR, &phba->pcidev->dev, 11944 "FATAL - unsupported SLI4 interface type - %d\n", 11945 if_type); 11946 break; 11947 } 11948 } 11949 11950 /** 11951 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 11952 * @phba: pointer to lpfc hba data structure. 11953 * 11954 * This routine is invoked to enable the MSI-X interrupt vectors to device 11955 * with SLI-3 interface specs. 11956 * 11957 * Return codes 11958 * 0 - successful 11959 * other values - error 11960 **/ 11961 static int 11962 lpfc_sli_enable_msix(struct lpfc_hba *phba) 11963 { 11964 int rc; 11965 LPFC_MBOXQ_t *pmb; 11966 11967 /* Set up MSI-X multi-message vectors */ 11968 rc = pci_alloc_irq_vectors(phba->pcidev, 11969 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 11970 if (rc < 0) { 11971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11972 "0420 PCI enable MSI-X failed (%d)\n", rc); 11973 goto vec_fail_out; 11974 } 11975 11976 /* 11977 * Assign MSI-X vectors to interrupt handlers 11978 */ 11979 11980 /* vector-0 is associated to slow-path handler */ 11981 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 11982 &lpfc_sli_sp_intr_handler, 0, 11983 LPFC_SP_DRIVER_HANDLER_NAME, phba); 11984 if (rc) { 11985 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11986 "0421 MSI-X slow-path request_irq failed " 11987 "(%d)\n", rc); 11988 goto msi_fail_out; 11989 } 11990 11991 /* vector-1 is associated to fast-path handler */ 11992 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 11993 &lpfc_sli_fp_intr_handler, 0, 11994 LPFC_FP_DRIVER_HANDLER_NAME, phba); 11995 11996 if (rc) { 11997 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11998 "0429 MSI-X fast-path request_irq failed " 11999 "(%d)\n", rc); 12000 goto irq_fail_out; 12001 } 12002 12003 /* 12004 * Configure HBA MSI-X attention conditions to messages 12005 */ 12006 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12007 12008 if (!pmb) { 12009 rc = -ENOMEM; 12010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12011 "0474 Unable to allocate memory for issuing " 12012 "MBOX_CONFIG_MSI command\n"); 12013 goto mem_fail_out; 12014 } 12015 rc = lpfc_config_msi(phba, pmb); 12016 if (rc) 12017 goto mbx_fail_out; 12018 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12019 if (rc != MBX_SUCCESS) { 12020 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 12021 "0351 Config MSI mailbox command failed, " 12022 "mbxCmd x%x, mbxStatus x%x\n", 12023 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 12024 goto mbx_fail_out; 12025 } 12026 12027 /* Free memory allocated for mailbox command */ 12028 mempool_free(pmb, phba->mbox_mem_pool); 12029 return rc; 12030 12031 mbx_fail_out: 12032 /* Free memory allocated for mailbox command */ 12033 mempool_free(pmb, phba->mbox_mem_pool); 12034 12035 mem_fail_out: 12036 /* free the irq already requested */ 12037 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 12038 12039 irq_fail_out: 12040 /* free the irq already requested */ 12041 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 12042 12043 msi_fail_out: 12044 /* Unconfigure MSI-X capability structure */ 12045 pci_free_irq_vectors(phba->pcidev); 12046 12047 vec_fail_out: 12048 return rc; 12049 } 12050 12051 /** 12052 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 12053 * @phba: pointer to lpfc hba data structure. 12054 * 12055 * This routine is invoked to enable the MSI interrupt mode to device with 12056 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 12057 * enable the MSI vector. The device driver is responsible for calling the 12058 * request_irq() to register MSI vector with a interrupt the handler, which 12059 * is done in this function. 12060 * 12061 * Return codes 12062 * 0 - successful 12063 * other values - error 12064 */ 12065 static int 12066 lpfc_sli_enable_msi(struct lpfc_hba *phba) 12067 { 12068 int rc; 12069 12070 rc = pci_enable_msi(phba->pcidev); 12071 if (!rc) 12072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12073 "0462 PCI enable MSI mode success.\n"); 12074 else { 12075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12076 "0471 PCI enable MSI mode failed (%d)\n", rc); 12077 return rc; 12078 } 12079 12080 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12081 0, LPFC_DRIVER_NAME, phba); 12082 if (rc) { 12083 pci_disable_msi(phba->pcidev); 12084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12085 "0478 MSI request_irq failed (%d)\n", rc); 12086 } 12087 return rc; 12088 } 12089 12090 /** 12091 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12092 * @phba: pointer to lpfc hba data structure. 12093 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12094 * 12095 * This routine is invoked to enable device interrupt and associate driver's 12096 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12097 * spec. Depends on the interrupt mode configured to the driver, the driver 12098 * will try to fallback from the configured interrupt mode to an interrupt 12099 * mode which is supported by the platform, kernel, and device in the order 12100 * of: 12101 * MSI-X -> MSI -> IRQ. 12102 * 12103 * Return codes 12104 * 0 - successful 12105 * other values - error 12106 **/ 12107 static uint32_t 12108 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12109 { 12110 uint32_t intr_mode = LPFC_INTR_ERROR; 12111 int retval; 12112 12113 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12114 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12115 if (retval) 12116 return intr_mode; 12117 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12118 12119 if (cfg_mode == 2) { 12120 /* Now, try to enable MSI-X interrupt mode */ 12121 retval = lpfc_sli_enable_msix(phba); 12122 if (!retval) { 12123 /* Indicate initialization to MSI-X mode */ 12124 phba->intr_type = MSIX; 12125 intr_mode = 2; 12126 } 12127 } 12128 12129 /* Fallback to MSI if MSI-X initialization failed */ 12130 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12131 retval = lpfc_sli_enable_msi(phba); 12132 if (!retval) { 12133 /* Indicate initialization to MSI mode */ 12134 phba->intr_type = MSI; 12135 intr_mode = 1; 12136 } 12137 } 12138 12139 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12140 if (phba->intr_type == NONE) { 12141 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12142 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12143 if (!retval) { 12144 /* Indicate initialization to INTx mode */ 12145 phba->intr_type = INTx; 12146 intr_mode = 0; 12147 } 12148 } 12149 return intr_mode; 12150 } 12151 12152 /** 12153 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12154 * @phba: pointer to lpfc hba data structure. 12155 * 12156 * This routine is invoked to disable device interrupt and disassociate the 12157 * driver's interrupt handler(s) from interrupt vector(s) to device with 12158 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12159 * release the interrupt vector(s) for the message signaled interrupt. 12160 **/ 12161 static void 12162 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12163 { 12164 int nr_irqs, i; 12165 12166 if (phba->intr_type == MSIX) 12167 nr_irqs = LPFC_MSIX_VECTORS; 12168 else 12169 nr_irqs = 1; 12170 12171 for (i = 0; i < nr_irqs; i++) 12172 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12173 pci_free_irq_vectors(phba->pcidev); 12174 12175 /* Reset interrupt management states */ 12176 phba->intr_type = NONE; 12177 phba->sli.slistat.sli_intr = 0; 12178 } 12179 12180 /** 12181 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12182 * @phba: pointer to lpfc hba data structure. 12183 * @id: EQ vector index or Hardware Queue index 12184 * @match: LPFC_FIND_BY_EQ = match by EQ 12185 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12186 * Return the CPU that matches the selection criteria 12187 */ 12188 static uint16_t 12189 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12190 { 12191 struct lpfc_vector_map_info *cpup; 12192 int cpu; 12193 12194 /* Loop through all CPUs */ 12195 for_each_present_cpu(cpu) { 12196 cpup = &phba->sli4_hba.cpu_map[cpu]; 12197 12198 /* If we are matching by EQ, there may be multiple CPUs using 12199 * using the same vector, so select the one with 12200 * LPFC_CPU_FIRST_IRQ set. 12201 */ 12202 if ((match == LPFC_FIND_BY_EQ) && 12203 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12204 (cpup->eq == id)) 12205 return cpu; 12206 12207 /* If matching by HDWQ, select the first CPU that matches */ 12208 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12209 return cpu; 12210 } 12211 return 0; 12212 } 12213 12214 #ifdef CONFIG_X86 12215 /** 12216 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12217 * @phba: pointer to lpfc hba data structure. 12218 * @cpu: CPU map index 12219 * @phys_id: CPU package physical id 12220 * @core_id: CPU core id 12221 */ 12222 static int 12223 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12224 uint16_t phys_id, uint16_t core_id) 12225 { 12226 struct lpfc_vector_map_info *cpup; 12227 int idx; 12228 12229 for_each_present_cpu(idx) { 12230 cpup = &phba->sli4_hba.cpu_map[idx]; 12231 /* Does the cpup match the one we are looking for */ 12232 if ((cpup->phys_id == phys_id) && 12233 (cpup->core_id == core_id) && 12234 (cpu != idx)) 12235 return 1; 12236 } 12237 return 0; 12238 } 12239 #endif 12240 12241 /* 12242 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12243 * @phba: pointer to lpfc hba data structure. 12244 * @eqidx: index for eq and irq vector 12245 * @flag: flags to set for vector_map structure 12246 * @cpu: cpu used to index vector_map structure 12247 * 12248 * The routine assigns eq info into vector_map structure 12249 */ 12250 static inline void 12251 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12252 unsigned int cpu) 12253 { 12254 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12255 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12256 12257 cpup->eq = eqidx; 12258 cpup->flag |= flag; 12259 12260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12261 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12262 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12263 } 12264 12265 /** 12266 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12267 * @phba: pointer to lpfc hba data structure. 12268 * 12269 * The routine initializes the cpu_map array structure 12270 */ 12271 static void 12272 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12273 { 12274 struct lpfc_vector_map_info *cpup; 12275 struct lpfc_eq_intr_info *eqi; 12276 int cpu; 12277 12278 for_each_possible_cpu(cpu) { 12279 cpup = &phba->sli4_hba.cpu_map[cpu]; 12280 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12281 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12282 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12283 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12284 cpup->flag = 0; 12285 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12286 INIT_LIST_HEAD(&eqi->list); 12287 eqi->icnt = 0; 12288 } 12289 } 12290 12291 /** 12292 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12293 * @phba: pointer to lpfc hba data structure. 12294 * 12295 * The routine initializes the hba_eq_hdl array structure 12296 */ 12297 static void 12298 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12299 { 12300 struct lpfc_hba_eq_hdl *eqhdl; 12301 int i; 12302 12303 for (i = 0; i < phba->cfg_irq_chann; i++) { 12304 eqhdl = lpfc_get_eq_hdl(i); 12305 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 12306 eqhdl->phba = phba; 12307 } 12308 } 12309 12310 /** 12311 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12312 * @phba: pointer to lpfc hba data structure. 12313 * @vectors: number of msix vectors allocated. 12314 * 12315 * The routine will figure out the CPU affinity assignment for every 12316 * MSI-X vector allocated for the HBA. 12317 * In addition, the CPU to IO channel mapping will be calculated 12318 * and the phba->sli4_hba.cpu_map array will reflect this. 12319 */ 12320 static void 12321 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12322 { 12323 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12324 int max_phys_id, min_phys_id; 12325 int max_core_id, min_core_id; 12326 struct lpfc_vector_map_info *cpup; 12327 struct lpfc_vector_map_info *new_cpup; 12328 #ifdef CONFIG_X86 12329 struct cpuinfo_x86 *cpuinfo; 12330 #endif 12331 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12332 struct lpfc_hdwq_stat *c_stat; 12333 #endif 12334 12335 max_phys_id = 0; 12336 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12337 max_core_id = 0; 12338 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12339 12340 /* Update CPU map with physical id and core id of each CPU */ 12341 for_each_present_cpu(cpu) { 12342 cpup = &phba->sli4_hba.cpu_map[cpu]; 12343 #ifdef CONFIG_X86 12344 cpuinfo = &cpu_data(cpu); 12345 cpup->phys_id = cpuinfo->phys_proc_id; 12346 cpup->core_id = cpuinfo->cpu_core_id; 12347 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12348 cpup->flag |= LPFC_CPU_MAP_HYPER; 12349 #else 12350 /* No distinction between CPUs for other platforms */ 12351 cpup->phys_id = 0; 12352 cpup->core_id = cpu; 12353 #endif 12354 12355 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12356 "3328 CPU %d physid %d coreid %d flag x%x\n", 12357 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12358 12359 if (cpup->phys_id > max_phys_id) 12360 max_phys_id = cpup->phys_id; 12361 if (cpup->phys_id < min_phys_id) 12362 min_phys_id = cpup->phys_id; 12363 12364 if (cpup->core_id > max_core_id) 12365 max_core_id = cpup->core_id; 12366 if (cpup->core_id < min_core_id) 12367 min_core_id = cpup->core_id; 12368 } 12369 12370 /* After looking at each irq vector assigned to this pcidev, its 12371 * possible to see that not ALL CPUs have been accounted for. 12372 * Next we will set any unassigned (unaffinitized) cpu map 12373 * entries to a IRQ on the same phys_id. 12374 */ 12375 first_cpu = cpumask_first(cpu_present_mask); 12376 start_cpu = first_cpu; 12377 12378 for_each_present_cpu(cpu) { 12379 cpup = &phba->sli4_hba.cpu_map[cpu]; 12380 12381 /* Is this CPU entry unassigned */ 12382 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12383 /* Mark CPU as IRQ not assigned by the kernel */ 12384 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12385 12386 /* If so, find a new_cpup thats on the the SAME 12387 * phys_id as cpup. start_cpu will start where we 12388 * left off so all unassigned entries don't get assgined 12389 * the IRQ of the first entry. 12390 */ 12391 new_cpu = start_cpu; 12392 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12393 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12394 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12395 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12396 (new_cpup->phys_id == cpup->phys_id)) 12397 goto found_same; 12398 new_cpu = cpumask_next( 12399 new_cpu, cpu_present_mask); 12400 if (new_cpu == nr_cpumask_bits) 12401 new_cpu = first_cpu; 12402 } 12403 /* At this point, we leave the CPU as unassigned */ 12404 continue; 12405 found_same: 12406 /* We found a matching phys_id, so copy the IRQ info */ 12407 cpup->eq = new_cpup->eq; 12408 12409 /* Bump start_cpu to the next slot to minmize the 12410 * chance of having multiple unassigned CPU entries 12411 * selecting the same IRQ. 12412 */ 12413 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12414 if (start_cpu == nr_cpumask_bits) 12415 start_cpu = first_cpu; 12416 12417 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12418 "3337 Set Affinity: CPU %d " 12419 "eq %d from peer cpu %d same " 12420 "phys_id (%d)\n", 12421 cpu, cpup->eq, new_cpu, 12422 cpup->phys_id); 12423 } 12424 } 12425 12426 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12427 start_cpu = first_cpu; 12428 12429 for_each_present_cpu(cpu) { 12430 cpup = &phba->sli4_hba.cpu_map[cpu]; 12431 12432 /* Is this entry unassigned */ 12433 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12434 /* Mark it as IRQ not assigned by the kernel */ 12435 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12436 12437 /* If so, find a new_cpup thats on ANY phys_id 12438 * as the cpup. start_cpu will start where we 12439 * left off so all unassigned entries don't get 12440 * assigned the IRQ of the first entry. 12441 */ 12442 new_cpu = start_cpu; 12443 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12444 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12445 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12446 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12447 goto found_any; 12448 new_cpu = cpumask_next( 12449 new_cpu, cpu_present_mask); 12450 if (new_cpu == nr_cpumask_bits) 12451 new_cpu = first_cpu; 12452 } 12453 /* We should never leave an entry unassigned */ 12454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12455 "3339 Set Affinity: CPU %d " 12456 "eq %d UNASSIGNED\n", 12457 cpup->hdwq, cpup->eq); 12458 continue; 12459 found_any: 12460 /* We found an available entry, copy the IRQ info */ 12461 cpup->eq = new_cpup->eq; 12462 12463 /* Bump start_cpu to the next slot to minmize the 12464 * chance of having multiple unassigned CPU entries 12465 * selecting the same IRQ. 12466 */ 12467 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12468 if (start_cpu == nr_cpumask_bits) 12469 start_cpu = first_cpu; 12470 12471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12472 "3338 Set Affinity: CPU %d " 12473 "eq %d from peer cpu %d (%d/%d)\n", 12474 cpu, cpup->eq, new_cpu, 12475 new_cpup->phys_id, new_cpup->core_id); 12476 } 12477 } 12478 12479 /* Assign hdwq indices that are unique across all cpus in the map 12480 * that are also FIRST_CPUs. 12481 */ 12482 idx = 0; 12483 for_each_present_cpu(cpu) { 12484 cpup = &phba->sli4_hba.cpu_map[cpu]; 12485 12486 /* Only FIRST IRQs get a hdwq index assignment. */ 12487 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12488 continue; 12489 12490 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12491 cpup->hdwq = idx; 12492 idx++; 12493 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12494 "3333 Set Affinity: CPU %d (phys %d core %d): " 12495 "hdwq %d eq %d flg x%x\n", 12496 cpu, cpup->phys_id, cpup->core_id, 12497 cpup->hdwq, cpup->eq, cpup->flag); 12498 } 12499 /* Associate a hdwq with each cpu_map entry 12500 * This will be 1 to 1 - hdwq to cpu, unless there are less 12501 * hardware queues then CPUs. For that case we will just round-robin 12502 * the available hardware queues as they get assigned to CPUs. 12503 * The next_idx is the idx from the FIRST_CPU loop above to account 12504 * for irq_chann < hdwq. The idx is used for round-robin assignments 12505 * and needs to start at 0. 12506 */ 12507 next_idx = idx; 12508 start_cpu = 0; 12509 idx = 0; 12510 for_each_present_cpu(cpu) { 12511 cpup = &phba->sli4_hba.cpu_map[cpu]; 12512 12513 /* FIRST cpus are already mapped. */ 12514 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12515 continue; 12516 12517 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12518 * of the unassigned cpus to the next idx so that all 12519 * hdw queues are fully utilized. 12520 */ 12521 if (next_idx < phba->cfg_hdw_queue) { 12522 cpup->hdwq = next_idx; 12523 next_idx++; 12524 continue; 12525 } 12526 12527 /* Not a First CPU and all hdw_queues are used. Reuse a 12528 * Hardware Queue for another CPU, so be smart about it 12529 * and pick one that has its IRQ/EQ mapped to the same phys_id 12530 * (CPU package) and core_id. 12531 */ 12532 new_cpu = start_cpu; 12533 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12534 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12535 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12536 new_cpup->phys_id == cpup->phys_id && 12537 new_cpup->core_id == cpup->core_id) { 12538 goto found_hdwq; 12539 } 12540 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12541 if (new_cpu == nr_cpumask_bits) 12542 new_cpu = first_cpu; 12543 } 12544 12545 /* If we can't match both phys_id and core_id, 12546 * settle for just a phys_id match. 12547 */ 12548 new_cpu = start_cpu; 12549 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12550 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12551 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12552 new_cpup->phys_id == cpup->phys_id) 12553 goto found_hdwq; 12554 12555 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12556 if (new_cpu == nr_cpumask_bits) 12557 new_cpu = first_cpu; 12558 } 12559 12560 /* Otherwise just round robin on cfg_hdw_queue */ 12561 cpup->hdwq = idx % phba->cfg_hdw_queue; 12562 idx++; 12563 goto logit; 12564 found_hdwq: 12565 /* We found an available entry, copy the IRQ info */ 12566 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12567 if (start_cpu == nr_cpumask_bits) 12568 start_cpu = first_cpu; 12569 cpup->hdwq = new_cpup->hdwq; 12570 logit: 12571 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12572 "3335 Set Affinity: CPU %d (phys %d core %d): " 12573 "hdwq %d eq %d flg x%x\n", 12574 cpu, cpup->phys_id, cpup->core_id, 12575 cpup->hdwq, cpup->eq, cpup->flag); 12576 } 12577 12578 /* 12579 * Initialize the cpu_map slots for not-present cpus in case 12580 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12581 */ 12582 idx = 0; 12583 for_each_possible_cpu(cpu) { 12584 cpup = &phba->sli4_hba.cpu_map[cpu]; 12585 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12586 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12587 c_stat->hdwq_no = cpup->hdwq; 12588 #endif 12589 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12590 continue; 12591 12592 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12593 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12594 c_stat->hdwq_no = cpup->hdwq; 12595 #endif 12596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12597 "3340 Set Affinity: not present " 12598 "CPU %d hdwq %d\n", 12599 cpu, cpup->hdwq); 12600 } 12601 12602 /* The cpu_map array will be used later during initialization 12603 * when EQ / CQ / WQs are allocated and configured. 12604 */ 12605 return; 12606 } 12607 12608 /** 12609 * lpfc_cpuhp_get_eq 12610 * 12611 * @phba: pointer to lpfc hba data structure. 12612 * @cpu: cpu going offline 12613 * @eqlist: eq list to append to 12614 */ 12615 static int 12616 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12617 struct list_head *eqlist) 12618 { 12619 const struct cpumask *maskp; 12620 struct lpfc_queue *eq; 12621 struct cpumask *tmp; 12622 u16 idx; 12623 12624 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12625 if (!tmp) 12626 return -ENOMEM; 12627 12628 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12629 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12630 if (!maskp) 12631 continue; 12632 /* 12633 * if irq is not affinitized to the cpu going 12634 * then we don't need to poll the eq attached 12635 * to it. 12636 */ 12637 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12638 continue; 12639 /* get the cpus that are online and are affini- 12640 * tized to this irq vector. If the count is 12641 * more than 1 then cpuhp is not going to shut- 12642 * down this vector. Since this cpu has not 12643 * gone offline yet, we need >1. 12644 */ 12645 cpumask_and(tmp, maskp, cpu_online_mask); 12646 if (cpumask_weight(tmp) > 1) 12647 continue; 12648 12649 /* Now that we have an irq to shutdown, get the eq 12650 * mapped to this irq. Note: multiple hdwq's in 12651 * the software can share an eq, but eventually 12652 * only eq will be mapped to this vector 12653 */ 12654 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12655 list_add(&eq->_poll_list, eqlist); 12656 } 12657 kfree(tmp); 12658 return 0; 12659 } 12660 12661 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12662 { 12663 if (phba->sli_rev != LPFC_SLI_REV4) 12664 return; 12665 12666 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12667 &phba->cpuhp); 12668 /* 12669 * unregistering the instance doesn't stop the polling 12670 * timer. Wait for the poll timer to retire. 12671 */ 12672 synchronize_rcu(); 12673 del_timer_sync(&phba->cpuhp_poll_timer); 12674 } 12675 12676 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12677 { 12678 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 12679 return; 12680 12681 __lpfc_cpuhp_remove(phba); 12682 } 12683 12684 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12685 { 12686 if (phba->sli_rev != LPFC_SLI_REV4) 12687 return; 12688 12689 rcu_read_lock(); 12690 12691 if (!list_empty(&phba->poll_list)) 12692 mod_timer(&phba->cpuhp_poll_timer, 12693 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12694 12695 rcu_read_unlock(); 12696 12697 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12698 &phba->cpuhp); 12699 } 12700 12701 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12702 { 12703 if (phba->pport->load_flag & FC_UNLOADING) { 12704 *retval = -EAGAIN; 12705 return true; 12706 } 12707 12708 if (phba->sli_rev != LPFC_SLI_REV4) { 12709 *retval = 0; 12710 return true; 12711 } 12712 12713 /* proceed with the hotplug */ 12714 return false; 12715 } 12716 12717 /** 12718 * lpfc_irq_set_aff - set IRQ affinity 12719 * @eqhdl: EQ handle 12720 * @cpu: cpu to set affinity 12721 * 12722 **/ 12723 static inline void 12724 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12725 { 12726 cpumask_clear(&eqhdl->aff_mask); 12727 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12728 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12729 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); 12730 } 12731 12732 /** 12733 * lpfc_irq_clear_aff - clear IRQ affinity 12734 * @eqhdl: EQ handle 12735 * 12736 **/ 12737 static inline void 12738 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12739 { 12740 cpumask_clear(&eqhdl->aff_mask); 12741 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12742 } 12743 12744 /** 12745 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12746 * @phba: pointer to HBA context object. 12747 * @cpu: cpu going offline/online 12748 * @offline: true, cpu is going offline. false, cpu is coming online. 12749 * 12750 * If cpu is going offline, we'll try our best effort to find the next 12751 * online cpu on the phba's original_mask and migrate all offlining IRQ 12752 * affinities. 12753 * 12754 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12755 * 12756 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12757 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12758 * 12759 **/ 12760 static void 12761 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12762 { 12763 struct lpfc_vector_map_info *cpup; 12764 struct cpumask *aff_mask; 12765 unsigned int cpu_select, cpu_next, idx; 12766 const struct cpumask *orig_mask; 12767 12768 if (phba->irq_chann_mode == NORMAL_MODE) 12769 return; 12770 12771 orig_mask = &phba->sli4_hba.irq_aff_mask; 12772 12773 if (!cpumask_test_cpu(cpu, orig_mask)) 12774 return; 12775 12776 cpup = &phba->sli4_hba.cpu_map[cpu]; 12777 12778 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12779 return; 12780 12781 if (offline) { 12782 /* Find next online CPU on original mask */ 12783 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12784 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12785 12786 /* Found a valid CPU */ 12787 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12788 /* Go through each eqhdl and ensure offlining 12789 * cpu aff_mask is migrated 12790 */ 12791 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12792 aff_mask = lpfc_get_aff_mask(idx); 12793 12794 /* Migrate affinity */ 12795 if (cpumask_test_cpu(cpu, aff_mask)) 12796 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12797 cpu_select); 12798 } 12799 } else { 12800 /* Rely on irqbalance if no online CPUs left on NUMA */ 12801 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12802 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12803 } 12804 } else { 12805 /* Migrate affinity back to this CPU */ 12806 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12807 } 12808 } 12809 12810 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12811 { 12812 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12813 struct lpfc_queue *eq, *next; 12814 LIST_HEAD(eqlist); 12815 int retval; 12816 12817 if (!phba) { 12818 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12819 return 0; 12820 } 12821 12822 if (__lpfc_cpuhp_checks(phba, &retval)) 12823 return retval; 12824 12825 lpfc_irq_rebalance(phba, cpu, true); 12826 12827 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12828 if (retval) 12829 return retval; 12830 12831 /* start polling on these eq's */ 12832 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12833 list_del_init(&eq->_poll_list); 12834 lpfc_sli4_start_polling(eq); 12835 } 12836 12837 return 0; 12838 } 12839 12840 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12841 { 12842 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12843 struct lpfc_queue *eq, *next; 12844 unsigned int n; 12845 int retval; 12846 12847 if (!phba) { 12848 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12849 return 0; 12850 } 12851 12852 if (__lpfc_cpuhp_checks(phba, &retval)) 12853 return retval; 12854 12855 lpfc_irq_rebalance(phba, cpu, false); 12856 12857 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12858 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12859 if (n == cpu) 12860 lpfc_sli4_stop_polling(eq); 12861 } 12862 12863 return 0; 12864 } 12865 12866 /** 12867 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12868 * @phba: pointer to lpfc hba data structure. 12869 * 12870 * This routine is invoked to enable the MSI-X interrupt vectors to device 12871 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12872 * to cpus on the system. 12873 * 12874 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12875 * the number of cpus on the same numa node as this adapter. The vectors are 12876 * allocated without requesting OS affinity mapping. A vector will be 12877 * allocated and assigned to each online and offline cpu. If the cpu is 12878 * online, then affinity will be set to that cpu. If the cpu is offline, then 12879 * affinity will be set to the nearest peer cpu within the numa node that is 12880 * online. If there are no online cpus within the numa node, affinity is not 12881 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12882 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12883 * configured. 12884 * 12885 * If numa mode is not enabled and there is more than 1 vector allocated, then 12886 * the driver relies on the managed irq interface where the OS assigns vector to 12887 * cpu affinity. The driver will then use that affinity mapping to setup its 12888 * cpu mapping table. 12889 * 12890 * Return codes 12891 * 0 - successful 12892 * other values - error 12893 **/ 12894 static int 12895 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 12896 { 12897 int vectors, rc, index; 12898 char *name; 12899 const struct cpumask *aff_mask = NULL; 12900 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 12901 struct lpfc_vector_map_info *cpup; 12902 struct lpfc_hba_eq_hdl *eqhdl; 12903 const struct cpumask *maskp; 12904 unsigned int flags = PCI_IRQ_MSIX; 12905 12906 /* Set up MSI-X multi-message vectors */ 12907 vectors = phba->cfg_irq_chann; 12908 12909 if (phba->irq_chann_mode != NORMAL_MODE) 12910 aff_mask = &phba->sli4_hba.irq_aff_mask; 12911 12912 if (aff_mask) { 12913 cpu_cnt = cpumask_weight(aff_mask); 12914 vectors = min(phba->cfg_irq_chann, cpu_cnt); 12915 12916 /* cpu: iterates over aff_mask including offline or online 12917 * cpu_select: iterates over online aff_mask to set affinity 12918 */ 12919 cpu = cpumask_first(aff_mask); 12920 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12921 } else { 12922 flags |= PCI_IRQ_AFFINITY; 12923 } 12924 12925 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 12926 if (rc < 0) { 12927 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12928 "0484 PCI enable MSI-X failed (%d)\n", rc); 12929 goto vec_fail_out; 12930 } 12931 vectors = rc; 12932 12933 /* Assign MSI-X vectors to interrupt handlers */ 12934 for (index = 0; index < vectors; index++) { 12935 eqhdl = lpfc_get_eq_hdl(index); 12936 name = eqhdl->handler_name; 12937 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 12938 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 12939 LPFC_DRIVER_HANDLER_NAME"%d", index); 12940 12941 eqhdl->idx = index; 12942 rc = request_irq(pci_irq_vector(phba->pcidev, index), 12943 &lpfc_sli4_hba_intr_handler, 0, 12944 name, eqhdl); 12945 if (rc) { 12946 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12947 "0486 MSI-X fast-path (%d) " 12948 "request_irq failed (%d)\n", index, rc); 12949 goto cfg_fail_out; 12950 } 12951 12952 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 12953 12954 if (aff_mask) { 12955 /* If found a neighboring online cpu, set affinity */ 12956 if (cpu_select < nr_cpu_ids) 12957 lpfc_irq_set_aff(eqhdl, cpu_select); 12958 12959 /* Assign EQ to cpu_map */ 12960 lpfc_assign_eq_map_info(phba, index, 12961 LPFC_CPU_FIRST_IRQ, 12962 cpu); 12963 12964 /* Iterate to next offline or online cpu in aff_mask */ 12965 cpu = cpumask_next(cpu, aff_mask); 12966 12967 /* Find next online cpu in aff_mask to set affinity */ 12968 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12969 } else if (vectors == 1) { 12970 cpu = cpumask_first(cpu_present_mask); 12971 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 12972 cpu); 12973 } else { 12974 maskp = pci_irq_get_affinity(phba->pcidev, index); 12975 12976 /* Loop through all CPUs associated with vector index */ 12977 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 12978 cpup = &phba->sli4_hba.cpu_map[cpu]; 12979 12980 /* If this is the first CPU thats assigned to 12981 * this vector, set LPFC_CPU_FIRST_IRQ. 12982 * 12983 * With certain platforms its possible that irq 12984 * vectors are affinitized to all the cpu's. 12985 * This can result in each cpu_map.eq to be set 12986 * to the last vector, resulting in overwrite 12987 * of all the previous cpu_map.eq. Ensure that 12988 * each vector receives a place in cpu_map. 12989 * Later call to lpfc_cpu_affinity_check will 12990 * ensure we are nicely balanced out. 12991 */ 12992 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 12993 continue; 12994 lpfc_assign_eq_map_info(phba, index, 12995 LPFC_CPU_FIRST_IRQ, 12996 cpu); 12997 break; 12998 } 12999 } 13000 } 13001 13002 if (vectors != phba->cfg_irq_chann) { 13003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13004 "3238 Reducing IO channels to match number of " 13005 "MSI-X vectors, requested %d got %d\n", 13006 phba->cfg_irq_chann, vectors); 13007 if (phba->cfg_irq_chann > vectors) 13008 phba->cfg_irq_chann = vectors; 13009 } 13010 13011 return rc; 13012 13013 cfg_fail_out: 13014 /* free the irq already requested */ 13015 for (--index; index >= 0; index--) { 13016 eqhdl = lpfc_get_eq_hdl(index); 13017 lpfc_irq_clear_aff(eqhdl); 13018 free_irq(eqhdl->irq, eqhdl); 13019 } 13020 13021 /* Unconfigure MSI-X capability structure */ 13022 pci_free_irq_vectors(phba->pcidev); 13023 13024 vec_fail_out: 13025 return rc; 13026 } 13027 13028 /** 13029 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 13030 * @phba: pointer to lpfc hba data structure. 13031 * 13032 * This routine is invoked to enable the MSI interrupt mode to device with 13033 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 13034 * called to enable the MSI vector. The device driver is responsible for 13035 * calling the request_irq() to register MSI vector with a interrupt the 13036 * handler, which is done in this function. 13037 * 13038 * Return codes 13039 * 0 - successful 13040 * other values - error 13041 **/ 13042 static int 13043 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 13044 { 13045 int rc, index; 13046 unsigned int cpu; 13047 struct lpfc_hba_eq_hdl *eqhdl; 13048 13049 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 13050 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 13051 if (rc > 0) 13052 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13053 "0487 PCI enable MSI mode success.\n"); 13054 else { 13055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13056 "0488 PCI enable MSI mode failed (%d)\n", rc); 13057 return rc ? rc : -1; 13058 } 13059 13060 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13061 0, LPFC_DRIVER_NAME, phba); 13062 if (rc) { 13063 pci_free_irq_vectors(phba->pcidev); 13064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13065 "0490 MSI request_irq failed (%d)\n", rc); 13066 return rc; 13067 } 13068 13069 eqhdl = lpfc_get_eq_hdl(0); 13070 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13071 13072 cpu = cpumask_first(cpu_present_mask); 13073 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13074 13075 for (index = 0; index < phba->cfg_irq_chann; index++) { 13076 eqhdl = lpfc_get_eq_hdl(index); 13077 eqhdl->idx = index; 13078 } 13079 13080 return 0; 13081 } 13082 13083 /** 13084 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13085 * @phba: pointer to lpfc hba data structure. 13086 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13087 * 13088 * This routine is invoked to enable device interrupt and associate driver's 13089 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13090 * interface spec. Depends on the interrupt mode configured to the driver, 13091 * the driver will try to fallback from the configured interrupt mode to an 13092 * interrupt mode which is supported by the platform, kernel, and device in 13093 * the order of: 13094 * MSI-X -> MSI -> IRQ. 13095 * 13096 * Return codes 13097 * 0 - successful 13098 * other values - error 13099 **/ 13100 static uint32_t 13101 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13102 { 13103 uint32_t intr_mode = LPFC_INTR_ERROR; 13104 int retval, idx; 13105 13106 if (cfg_mode == 2) { 13107 /* Preparation before conf_msi mbox cmd */ 13108 retval = 0; 13109 if (!retval) { 13110 /* Now, try to enable MSI-X interrupt mode */ 13111 retval = lpfc_sli4_enable_msix(phba); 13112 if (!retval) { 13113 /* Indicate initialization to MSI-X mode */ 13114 phba->intr_type = MSIX; 13115 intr_mode = 2; 13116 } 13117 } 13118 } 13119 13120 /* Fallback to MSI if MSI-X initialization failed */ 13121 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13122 retval = lpfc_sli4_enable_msi(phba); 13123 if (!retval) { 13124 /* Indicate initialization to MSI mode */ 13125 phba->intr_type = MSI; 13126 intr_mode = 1; 13127 } 13128 } 13129 13130 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13131 if (phba->intr_type == NONE) { 13132 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13133 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13134 if (!retval) { 13135 struct lpfc_hba_eq_hdl *eqhdl; 13136 unsigned int cpu; 13137 13138 /* Indicate initialization to INTx mode */ 13139 phba->intr_type = INTx; 13140 intr_mode = 0; 13141 13142 eqhdl = lpfc_get_eq_hdl(0); 13143 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13144 13145 cpu = cpumask_first(cpu_present_mask); 13146 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13147 cpu); 13148 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13149 eqhdl = lpfc_get_eq_hdl(idx); 13150 eqhdl->idx = idx; 13151 } 13152 } 13153 } 13154 return intr_mode; 13155 } 13156 13157 /** 13158 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13159 * @phba: pointer to lpfc hba data structure. 13160 * 13161 * This routine is invoked to disable device interrupt and disassociate 13162 * the driver's interrupt handler(s) from interrupt vector(s) to device 13163 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13164 * will release the interrupt vector(s) for the message signaled interrupt. 13165 **/ 13166 static void 13167 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13168 { 13169 /* Disable the currently initialized interrupt mode */ 13170 if (phba->intr_type == MSIX) { 13171 int index; 13172 struct lpfc_hba_eq_hdl *eqhdl; 13173 13174 /* Free up MSI-X multi-message vectors */ 13175 for (index = 0; index < phba->cfg_irq_chann; index++) { 13176 eqhdl = lpfc_get_eq_hdl(index); 13177 lpfc_irq_clear_aff(eqhdl); 13178 free_irq(eqhdl->irq, eqhdl); 13179 } 13180 } else { 13181 free_irq(phba->pcidev->irq, phba); 13182 } 13183 13184 pci_free_irq_vectors(phba->pcidev); 13185 13186 /* Reset interrupt management states */ 13187 phba->intr_type = NONE; 13188 phba->sli.slistat.sli_intr = 0; 13189 } 13190 13191 /** 13192 * lpfc_unset_hba - Unset SLI3 hba device initialization 13193 * @phba: pointer to lpfc hba data structure. 13194 * 13195 * This routine is invoked to unset the HBA device initialization steps to 13196 * a device with SLI-3 interface spec. 13197 **/ 13198 static void 13199 lpfc_unset_hba(struct lpfc_hba *phba) 13200 { 13201 struct lpfc_vport *vport = phba->pport; 13202 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13203 13204 spin_lock_irq(shost->host_lock); 13205 vport->load_flag |= FC_UNLOADING; 13206 spin_unlock_irq(shost->host_lock); 13207 13208 kfree(phba->vpi_bmask); 13209 kfree(phba->vpi_ids); 13210 13211 lpfc_stop_hba_timers(phba); 13212 13213 phba->pport->work_port_events = 0; 13214 13215 lpfc_sli_hba_down(phba); 13216 13217 lpfc_sli_brdrestart(phba); 13218 13219 lpfc_sli_disable_intr(phba); 13220 13221 return; 13222 } 13223 13224 /** 13225 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13226 * @phba: Pointer to HBA context object. 13227 * 13228 * This function is called in the SLI4 code path to wait for completion 13229 * of device's XRIs exchange busy. It will check the XRI exchange busy 13230 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13231 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13232 * I/Os every 30 seconds, log error message, and wait forever. Only when 13233 * all XRI exchange busy complete, the driver unload shall proceed with 13234 * invoking the function reset ioctl mailbox command to the CNA and the 13235 * the rest of the driver unload resource release. 13236 **/ 13237 static void 13238 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13239 { 13240 struct lpfc_sli4_hdw_queue *qp; 13241 int idx, ccnt; 13242 int wait_time = 0; 13243 int io_xri_cmpl = 1; 13244 int nvmet_xri_cmpl = 1; 13245 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13246 13247 /* Driver just aborted IOs during the hba_unset process. Pause 13248 * here to give the HBA time to complete the IO and get entries 13249 * into the abts lists. 13250 */ 13251 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13252 13253 /* Wait for NVME pending IO to flush back to transport. */ 13254 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13255 lpfc_nvme_wait_for_io_drain(phba); 13256 13257 ccnt = 0; 13258 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13259 qp = &phba->sli4_hba.hdwq[idx]; 13260 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13261 if (!io_xri_cmpl) /* if list is NOT empty */ 13262 ccnt++; 13263 } 13264 if (ccnt) 13265 io_xri_cmpl = 0; 13266 13267 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13268 nvmet_xri_cmpl = 13269 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13270 } 13271 13272 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13273 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13274 if (!nvmet_xri_cmpl) 13275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13276 "6424 NVMET XRI exchange busy " 13277 "wait time: %d seconds.\n", 13278 wait_time/1000); 13279 if (!io_xri_cmpl) 13280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13281 "6100 IO XRI exchange busy " 13282 "wait time: %d seconds.\n", 13283 wait_time/1000); 13284 if (!els_xri_cmpl) 13285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13286 "2878 ELS XRI exchange busy " 13287 "wait time: %d seconds.\n", 13288 wait_time/1000); 13289 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13290 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13291 } else { 13292 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13293 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13294 } 13295 13296 ccnt = 0; 13297 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13298 qp = &phba->sli4_hba.hdwq[idx]; 13299 io_xri_cmpl = list_empty( 13300 &qp->lpfc_abts_io_buf_list); 13301 if (!io_xri_cmpl) /* if list is NOT empty */ 13302 ccnt++; 13303 } 13304 if (ccnt) 13305 io_xri_cmpl = 0; 13306 13307 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13308 nvmet_xri_cmpl = list_empty( 13309 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13310 } 13311 els_xri_cmpl = 13312 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13313 13314 } 13315 } 13316 13317 /** 13318 * lpfc_sli4_hba_unset - Unset the fcoe hba 13319 * @phba: Pointer to HBA context object. 13320 * 13321 * This function is called in the SLI4 code path to reset the HBA's FCoE 13322 * function. The caller is not required to hold any lock. This routine 13323 * issues PCI function reset mailbox command to reset the FCoE function. 13324 * At the end of the function, it calls lpfc_hba_down_post function to 13325 * free any pending commands. 13326 **/ 13327 static void 13328 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13329 { 13330 int wait_cnt = 0; 13331 LPFC_MBOXQ_t *mboxq; 13332 struct pci_dev *pdev = phba->pcidev; 13333 13334 lpfc_stop_hba_timers(phba); 13335 hrtimer_cancel(&phba->cmf_timer); 13336 13337 if (phba->pport) 13338 phba->sli4_hba.intr_enable = 0; 13339 13340 /* 13341 * Gracefully wait out the potential current outstanding asynchronous 13342 * mailbox command. 13343 */ 13344 13345 /* First, block any pending async mailbox command from posted */ 13346 spin_lock_irq(&phba->hbalock); 13347 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13348 spin_unlock_irq(&phba->hbalock); 13349 /* Now, trying to wait it out if we can */ 13350 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13351 msleep(10); 13352 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13353 break; 13354 } 13355 /* Forcefully release the outstanding mailbox command if timed out */ 13356 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13357 spin_lock_irq(&phba->hbalock); 13358 mboxq = phba->sli.mbox_active; 13359 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13360 __lpfc_mbox_cmpl_put(phba, mboxq); 13361 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13362 phba->sli.mbox_active = NULL; 13363 spin_unlock_irq(&phba->hbalock); 13364 } 13365 13366 /* Abort all iocbs associated with the hba */ 13367 lpfc_sli_hba_iocb_abort(phba); 13368 13369 /* Wait for completion of device XRI exchange busy */ 13370 lpfc_sli4_xri_exchange_busy_wait(phba); 13371 13372 /* per-phba callback de-registration for hotplug event */ 13373 if (phba->pport) 13374 lpfc_cpuhp_remove(phba); 13375 13376 /* Disable PCI subsystem interrupt */ 13377 lpfc_sli4_disable_intr(phba); 13378 13379 /* Disable SR-IOV if enabled */ 13380 if (phba->cfg_sriov_nr_virtfn) 13381 pci_disable_sriov(pdev); 13382 13383 /* Stop kthread signal shall trigger work_done one more time */ 13384 kthread_stop(phba->worker_thread); 13385 13386 /* Disable FW logging to host memory */ 13387 lpfc_ras_stop_fwlog(phba); 13388 13389 /* Unset the queues shared with the hardware then release all 13390 * allocated resources. 13391 */ 13392 lpfc_sli4_queue_unset(phba); 13393 lpfc_sli4_queue_destroy(phba); 13394 13395 /* Reset SLI4 HBA FCoE function */ 13396 lpfc_pci_function_reset(phba); 13397 13398 /* Free RAS DMA memory */ 13399 if (phba->ras_fwlog.ras_enabled) 13400 lpfc_sli4_ras_dma_free(phba); 13401 13402 /* Stop the SLI4 device port */ 13403 if (phba->pport) 13404 phba->pport->work_port_events = 0; 13405 } 13406 13407 static uint32_t 13408 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13409 { 13410 uint32_t msb = 0; 13411 uint32_t bit; 13412 13413 for (bit = 0; bit < 8; bit++) { 13414 msb = (crc >> 31) & 1; 13415 crc <<= 1; 13416 13417 if (msb ^ (byte & 1)) { 13418 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13419 crc |= 1; 13420 } 13421 byte >>= 1; 13422 } 13423 return crc; 13424 } 13425 13426 static uint32_t 13427 lpfc_cgn_reverse_bits(uint32_t wd) 13428 { 13429 uint32_t result = 0; 13430 uint32_t i; 13431 13432 for (i = 0; i < 32; i++) { 13433 result <<= 1; 13434 result |= (1 & (wd >> i)); 13435 } 13436 return result; 13437 } 13438 13439 /* 13440 * The routine corresponds with the algorithm the HBA firmware 13441 * uses to validate the data integrity. 13442 */ 13443 uint32_t 13444 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13445 { 13446 uint32_t i; 13447 uint32_t result; 13448 uint8_t *data = (uint8_t *)ptr; 13449 13450 for (i = 0; i < byteLen; ++i) 13451 crc = lpfc_cgn_crc32(crc, data[i]); 13452 13453 result = ~lpfc_cgn_reverse_bits(crc); 13454 return result; 13455 } 13456 13457 void 13458 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13459 { 13460 struct lpfc_cgn_info *cp; 13461 struct timespec64 cmpl_time; 13462 struct tm broken; 13463 uint16_t size; 13464 uint32_t crc; 13465 13466 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13467 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13468 13469 if (!phba->cgn_i) 13470 return; 13471 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13472 13473 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13474 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13475 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13476 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13477 13478 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13479 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13480 atomic64_set(&phba->cgn_latency_evt, 0); 13481 phba->cgn_evt_minute = 0; 13482 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; 13483 13484 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); 13485 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13486 cp->cgn_info_version = LPFC_CGN_INFO_V3; 13487 13488 /* cgn parameters */ 13489 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13490 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13491 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13492 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13493 13494 ktime_get_real_ts64(&cmpl_time); 13495 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13496 13497 cp->cgn_info_month = broken.tm_mon + 1; 13498 cp->cgn_info_day = broken.tm_mday; 13499 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ 13500 cp->cgn_info_hour = broken.tm_hour; 13501 cp->cgn_info_minute = broken.tm_min; 13502 cp->cgn_info_second = broken.tm_sec; 13503 13504 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13505 "2643 CGNInfo Init: Start Time " 13506 "%d/%d/%d %d:%d:%d\n", 13507 cp->cgn_info_day, cp->cgn_info_month, 13508 cp->cgn_info_year, cp->cgn_info_hour, 13509 cp->cgn_info_minute, cp->cgn_info_second); 13510 13511 /* Fill in default LUN qdepth */ 13512 if (phba->pport) { 13513 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13514 cp->cgn_lunq = cpu_to_le16(size); 13515 } 13516 13517 /* last used Index initialized to 0xff already */ 13518 13519 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13520 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13521 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13522 cp->cgn_info_crc = cpu_to_le32(crc); 13523 13524 phba->cgn_evt_timestamp = jiffies + 13525 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13526 } 13527 13528 void 13529 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13530 { 13531 struct lpfc_cgn_info *cp; 13532 struct timespec64 cmpl_time; 13533 struct tm broken; 13534 uint32_t crc; 13535 13536 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13537 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13538 13539 if (!phba->cgn_i) 13540 return; 13541 13542 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13543 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); 13544 13545 ktime_get_real_ts64(&cmpl_time); 13546 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13547 13548 cp->cgn_stat_month = broken.tm_mon + 1; 13549 cp->cgn_stat_day = broken.tm_mday; 13550 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ 13551 cp->cgn_stat_hour = broken.tm_hour; 13552 cp->cgn_stat_minute = broken.tm_min; 13553 13554 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13555 "2647 CGNstat Init: Start Time " 13556 "%d/%d/%d %d:%d\n", 13557 cp->cgn_stat_day, cp->cgn_stat_month, 13558 cp->cgn_stat_year, cp->cgn_stat_hour, 13559 cp->cgn_stat_minute); 13560 13561 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13562 cp->cgn_info_crc = cpu_to_le32(crc); 13563 } 13564 13565 /** 13566 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13567 * @phba: Pointer to hba context object. 13568 * @reg: flag to determine register or unregister. 13569 */ 13570 static int 13571 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13572 { 13573 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13574 union lpfc_sli4_cfg_shdr *shdr; 13575 uint32_t shdr_status, shdr_add_status; 13576 LPFC_MBOXQ_t *mboxq; 13577 int length, rc; 13578 13579 if (!phba->cgn_i) 13580 return -ENXIO; 13581 13582 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13583 if (!mboxq) { 13584 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13585 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13586 "HBA state x%x reg %d\n", 13587 phba->pport->port_state, reg); 13588 return -ENOMEM; 13589 } 13590 13591 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13592 sizeof(struct lpfc_sli4_cfg_mhdr)); 13593 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13594 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13595 LPFC_SLI4_MBX_EMBED); 13596 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13597 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13598 if (reg > 0) 13599 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13600 else 13601 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13602 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13603 reg_congestion_buf->addr_lo = 13604 putPaddrLow(phba->cgn_i->phys); 13605 reg_congestion_buf->addr_hi = 13606 putPaddrHigh(phba->cgn_i->phys); 13607 13608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13609 shdr = (union lpfc_sli4_cfg_shdr *) 13610 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13611 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13612 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13613 &shdr->response); 13614 mempool_free(mboxq, phba->mbox_mem_pool); 13615 if (shdr_status || shdr_add_status || rc) { 13616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13617 "2642 REG_CONGESTION_BUF mailbox " 13618 "failed with status x%x add_status x%x," 13619 " mbx status x%x reg %d\n", 13620 shdr_status, shdr_add_status, rc, reg); 13621 return -ENXIO; 13622 } 13623 return 0; 13624 } 13625 13626 int 13627 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13628 { 13629 lpfc_cmf_stop(phba); 13630 return __lpfc_reg_congestion_buf(phba, 0); 13631 } 13632 13633 int 13634 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13635 { 13636 return __lpfc_reg_congestion_buf(phba, 1); 13637 } 13638 13639 /** 13640 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13641 * @phba: Pointer to HBA context object. 13642 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13643 * 13644 * This function is called in the SLI4 code path to read the port's 13645 * sli4 capabilities. 13646 * 13647 * This function may be be called from any context that can block-wait 13648 * for the completion. The expectation is that this routine is called 13649 * typically from probe_one or from the online routine. 13650 **/ 13651 int 13652 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13653 { 13654 int rc; 13655 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13656 struct lpfc_pc_sli4_params *sli4_params; 13657 uint32_t mbox_tmo; 13658 int length; 13659 bool exp_wqcq_pages = true; 13660 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13661 13662 /* 13663 * By default, the driver assumes the SLI4 port requires RPI 13664 * header postings. The SLI4_PARAM response will correct this 13665 * assumption. 13666 */ 13667 phba->sli4_hba.rpi_hdrs_in_use = 1; 13668 13669 /* Read the port's SLI4 Config Parameters */ 13670 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13671 sizeof(struct lpfc_sli4_cfg_mhdr)); 13672 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13673 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13674 length, LPFC_SLI4_MBX_EMBED); 13675 if (!phba->sli4_hba.intr_enable) 13676 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13677 else { 13678 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13679 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13680 } 13681 if (unlikely(rc)) 13682 return rc; 13683 sli4_params = &phba->sli4_hba.pc_sli4_params; 13684 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13685 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13686 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13687 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13688 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13689 mbx_sli4_parameters); 13690 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13691 mbx_sli4_parameters); 13692 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13693 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13694 else 13695 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13696 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13697 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13698 mbx_sli4_parameters); 13699 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13700 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13701 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13702 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13703 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13704 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13705 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13706 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13707 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13708 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13709 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13710 mbx_sli4_parameters); 13711 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13712 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13713 mbx_sli4_parameters); 13714 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13715 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13716 13717 /* Check for Extended Pre-Registered SGL support */ 13718 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13719 13720 /* Check for firmware nvme support */ 13721 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13722 bf_get(cfg_xib, mbx_sli4_parameters)); 13723 13724 if (rc) { 13725 /* Save this to indicate the Firmware supports NVME */ 13726 sli4_params->nvme = 1; 13727 13728 /* Firmware NVME support, check driver FC4 NVME support */ 13729 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13730 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13731 "6133 Disabling NVME support: " 13732 "FC4 type not supported: x%x\n", 13733 phba->cfg_enable_fc4_type); 13734 goto fcponly; 13735 } 13736 } else { 13737 /* No firmware NVME support, check driver FC4 NVME support */ 13738 sli4_params->nvme = 0; 13739 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13741 "6101 Disabling NVME support: Not " 13742 "supported by firmware (%d %d) x%x\n", 13743 bf_get(cfg_nvme, mbx_sli4_parameters), 13744 bf_get(cfg_xib, mbx_sli4_parameters), 13745 phba->cfg_enable_fc4_type); 13746 fcponly: 13747 phba->nvmet_support = 0; 13748 phba->cfg_nvmet_mrq = 0; 13749 phba->cfg_nvme_seg_cnt = 0; 13750 13751 /* If no FC4 type support, move to just SCSI support */ 13752 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13753 return -ENODEV; 13754 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13755 } 13756 } 13757 13758 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13759 * accommodate 512K and 1M IOs in a single nvme buf. 13760 */ 13761 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13762 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13763 13764 /* Enable embedded Payload BDE if support is indicated */ 13765 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13766 phba->cfg_enable_pbde = 1; 13767 else 13768 phba->cfg_enable_pbde = 0; 13769 13770 /* 13771 * To support Suppress Response feature we must satisfy 3 conditions. 13772 * lpfc_suppress_rsp module parameter must be set (default). 13773 * In SLI4-Parameters Descriptor: 13774 * Extended Inline Buffers (XIB) must be supported. 13775 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13776 * (double negative). 13777 */ 13778 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13779 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13780 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13781 else 13782 phba->cfg_suppress_rsp = 0; 13783 13784 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13785 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13786 13787 /* Make sure that sge_supp_len can be handled by the driver */ 13788 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13789 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13790 13791 /* 13792 * Check whether the adapter supports an embedded copy of the 13793 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13794 * to use this option, 128-byte WQEs must be used. 13795 */ 13796 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13797 phba->fcp_embed_io = 1; 13798 else 13799 phba->fcp_embed_io = 0; 13800 13801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13802 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13803 bf_get(cfg_xib, mbx_sli4_parameters), 13804 phba->cfg_enable_pbde, 13805 phba->fcp_embed_io, sli4_params->nvme, 13806 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13807 13808 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13809 LPFC_SLI_INTF_IF_TYPE_2) && 13810 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13811 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13812 exp_wqcq_pages = false; 13813 13814 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13815 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13816 exp_wqcq_pages && 13817 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13818 phba->enab_exp_wqcq_pages = 1; 13819 else 13820 phba->enab_exp_wqcq_pages = 0; 13821 /* 13822 * Check if the SLI port supports MDS Diagnostics 13823 */ 13824 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13825 phba->mds_diags_support = 1; 13826 else 13827 phba->mds_diags_support = 0; 13828 13829 /* 13830 * Check if the SLI port supports NSLER 13831 */ 13832 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13833 phba->nsler = 1; 13834 else 13835 phba->nsler = 0; 13836 13837 return 0; 13838 } 13839 13840 /** 13841 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13842 * @pdev: pointer to PCI device 13843 * @pid: pointer to PCI device identifier 13844 * 13845 * This routine is to be called to attach a device with SLI-3 interface spec 13846 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13847 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13848 * information of the device and driver to see if the driver state that it can 13849 * support this kind of device. If the match is successful, the driver core 13850 * invokes this routine. If this routine determines it can claim the HBA, it 13851 * does all the initialization that it needs to do to handle the HBA properly. 13852 * 13853 * Return code 13854 * 0 - driver can claim the device 13855 * negative value - driver can not claim the device 13856 **/ 13857 static int 13858 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13859 { 13860 struct lpfc_hba *phba; 13861 struct lpfc_vport *vport = NULL; 13862 struct Scsi_Host *shost = NULL; 13863 int error; 13864 uint32_t cfg_mode, intr_mode; 13865 13866 /* Allocate memory for HBA structure */ 13867 phba = lpfc_hba_alloc(pdev); 13868 if (!phba) 13869 return -ENOMEM; 13870 13871 /* Perform generic PCI device enabling operation */ 13872 error = lpfc_enable_pci_dev(phba); 13873 if (error) 13874 goto out_free_phba; 13875 13876 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 13877 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 13878 if (error) 13879 goto out_disable_pci_dev; 13880 13881 /* Set up SLI-3 specific device PCI memory space */ 13882 error = lpfc_sli_pci_mem_setup(phba); 13883 if (error) { 13884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13885 "1402 Failed to set up pci memory space.\n"); 13886 goto out_disable_pci_dev; 13887 } 13888 13889 /* Set up SLI-3 specific device driver resources */ 13890 error = lpfc_sli_driver_resource_setup(phba); 13891 if (error) { 13892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13893 "1404 Failed to set up driver resource.\n"); 13894 goto out_unset_pci_mem_s3; 13895 } 13896 13897 /* Initialize and populate the iocb list per host */ 13898 13899 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 13900 if (error) { 13901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13902 "1405 Failed to initialize iocb list.\n"); 13903 goto out_unset_driver_resource_s3; 13904 } 13905 13906 /* Set up common device driver resources */ 13907 error = lpfc_setup_driver_resource_phase2(phba); 13908 if (error) { 13909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13910 "1406 Failed to set up driver resource.\n"); 13911 goto out_free_iocb_list; 13912 } 13913 13914 /* Get the default values for Model Name and Description */ 13915 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13916 13917 /* Create SCSI host to the physical port */ 13918 error = lpfc_create_shost(phba); 13919 if (error) { 13920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13921 "1407 Failed to create scsi host.\n"); 13922 goto out_unset_driver_resource; 13923 } 13924 13925 /* Configure sysfs attributes */ 13926 vport = phba->pport; 13927 error = lpfc_alloc_sysfs_attr(vport); 13928 if (error) { 13929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13930 "1476 Failed to allocate sysfs attr\n"); 13931 goto out_destroy_shost; 13932 } 13933 13934 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13935 /* Now, trying to enable interrupt and bring up the device */ 13936 cfg_mode = phba->cfg_use_msi; 13937 while (true) { 13938 /* Put device to a known state before enabling interrupt */ 13939 lpfc_stop_port(phba); 13940 /* Configure and enable interrupt */ 13941 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 13942 if (intr_mode == LPFC_INTR_ERROR) { 13943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13944 "0431 Failed to enable interrupt.\n"); 13945 error = -ENODEV; 13946 goto out_free_sysfs_attr; 13947 } 13948 /* SLI-3 HBA setup */ 13949 if (lpfc_sli_hba_setup(phba)) { 13950 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13951 "1477 Failed to set up hba\n"); 13952 error = -ENODEV; 13953 goto out_remove_device; 13954 } 13955 13956 /* Wait 50ms for the interrupts of previous mailbox commands */ 13957 msleep(50); 13958 /* Check active interrupts on message signaled interrupts */ 13959 if (intr_mode == 0 || 13960 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 13961 /* Log the current active interrupt mode */ 13962 phba->intr_mode = intr_mode; 13963 lpfc_log_intr_mode(phba, intr_mode); 13964 break; 13965 } else { 13966 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13967 "0447 Configure interrupt mode (%d) " 13968 "failed active interrupt test.\n", 13969 intr_mode); 13970 /* Disable the current interrupt mode */ 13971 lpfc_sli_disable_intr(phba); 13972 /* Try next level of interrupt mode */ 13973 cfg_mode = --intr_mode; 13974 } 13975 } 13976 13977 /* Perform post initialization setup */ 13978 lpfc_post_init_setup(phba); 13979 13980 /* Check if there are static vports to be created. */ 13981 lpfc_create_static_vport(phba); 13982 13983 return 0; 13984 13985 out_remove_device: 13986 lpfc_unset_hba(phba); 13987 out_free_sysfs_attr: 13988 lpfc_free_sysfs_attr(vport); 13989 out_destroy_shost: 13990 lpfc_destroy_shost(phba); 13991 out_unset_driver_resource: 13992 lpfc_unset_driver_resource_phase2(phba); 13993 out_free_iocb_list: 13994 lpfc_free_iocb_list(phba); 13995 out_unset_driver_resource_s3: 13996 lpfc_sli_driver_resource_unset(phba); 13997 out_unset_pci_mem_s3: 13998 lpfc_sli_pci_mem_unset(phba); 13999 out_disable_pci_dev: 14000 lpfc_disable_pci_dev(phba); 14001 if (shost) 14002 scsi_host_put(shost); 14003 out_free_phba: 14004 lpfc_hba_free(phba); 14005 return error; 14006 } 14007 14008 /** 14009 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 14010 * @pdev: pointer to PCI device 14011 * 14012 * This routine is to be called to disattach a device with SLI-3 interface 14013 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 14014 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14015 * device to be removed from the PCI subsystem properly. 14016 **/ 14017 static void 14018 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 14019 { 14020 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14021 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14022 struct lpfc_vport **vports; 14023 struct lpfc_hba *phba = vport->phba; 14024 int i; 14025 14026 spin_lock_irq(&phba->hbalock); 14027 vport->load_flag |= FC_UNLOADING; 14028 spin_unlock_irq(&phba->hbalock); 14029 14030 lpfc_free_sysfs_attr(vport); 14031 14032 /* Release all the vports against this physical port */ 14033 vports = lpfc_create_vport_work_array(phba); 14034 if (vports != NULL) 14035 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14036 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14037 continue; 14038 fc_vport_terminate(vports[i]->fc_vport); 14039 } 14040 lpfc_destroy_vport_work_array(phba, vports); 14041 14042 /* Remove FC host with the physical port */ 14043 fc_remove_host(shost); 14044 scsi_remove_host(shost); 14045 14046 /* Clean up all nodes, mailboxes and IOs. */ 14047 lpfc_cleanup(vport); 14048 14049 /* 14050 * Bring down the SLI Layer. This step disable all interrupts, 14051 * clears the rings, discards all mailbox commands, and resets 14052 * the HBA. 14053 */ 14054 14055 /* HBA interrupt will be disabled after this call */ 14056 lpfc_sli_hba_down(phba); 14057 /* Stop kthread signal shall trigger work_done one more time */ 14058 kthread_stop(phba->worker_thread); 14059 /* Final cleanup of txcmplq and reset the HBA */ 14060 lpfc_sli_brdrestart(phba); 14061 14062 kfree(phba->vpi_bmask); 14063 kfree(phba->vpi_ids); 14064 14065 lpfc_stop_hba_timers(phba); 14066 spin_lock_irq(&phba->port_list_lock); 14067 list_del_init(&vport->listentry); 14068 spin_unlock_irq(&phba->port_list_lock); 14069 14070 lpfc_debugfs_terminate(vport); 14071 14072 /* Disable SR-IOV if enabled */ 14073 if (phba->cfg_sriov_nr_virtfn) 14074 pci_disable_sriov(pdev); 14075 14076 /* Disable interrupt */ 14077 lpfc_sli_disable_intr(phba); 14078 14079 scsi_host_put(shost); 14080 14081 /* 14082 * Call scsi_free before mem_free since scsi bufs are released to their 14083 * corresponding pools here. 14084 */ 14085 lpfc_scsi_free(phba); 14086 lpfc_free_iocb_list(phba); 14087 14088 lpfc_mem_free_all(phba); 14089 14090 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14091 phba->hbqslimp.virt, phba->hbqslimp.phys); 14092 14093 /* Free resources associated with SLI2 interface */ 14094 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14095 phba->slim2p.virt, phba->slim2p.phys); 14096 14097 /* unmap adapter SLIM and Control Registers */ 14098 iounmap(phba->ctrl_regs_memmap_p); 14099 iounmap(phba->slim_memmap_p); 14100 14101 lpfc_hba_free(phba); 14102 14103 pci_release_mem_regions(pdev); 14104 pci_disable_device(pdev); 14105 } 14106 14107 /** 14108 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14109 * @dev_d: pointer to device 14110 * 14111 * This routine is to be called from the kernel's PCI subsystem to support 14112 * system Power Management (PM) to device with SLI-3 interface spec. When 14113 * PM invokes this method, it quiesces the device by stopping the driver's 14114 * worker thread for the device, turning off device's interrupt and DMA, 14115 * and bring the device offline. Note that as the driver implements the 14116 * minimum PM requirements to a power-aware driver's PM support for the 14117 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14118 * to the suspend() method call will be treated as SUSPEND and the driver will 14119 * fully reinitialize its device during resume() method call, the driver will 14120 * set device to PCI_D3hot state in PCI config space instead of setting it 14121 * according to the @msg provided by the PM. 14122 * 14123 * Return code 14124 * 0 - driver suspended the device 14125 * Error otherwise 14126 **/ 14127 static int __maybe_unused 14128 lpfc_pci_suspend_one_s3(struct device *dev_d) 14129 { 14130 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14131 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14132 14133 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14134 "0473 PCI device Power Management suspend.\n"); 14135 14136 /* Bring down the device */ 14137 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14138 lpfc_offline(phba); 14139 kthread_stop(phba->worker_thread); 14140 14141 /* Disable interrupt from device */ 14142 lpfc_sli_disable_intr(phba); 14143 14144 return 0; 14145 } 14146 14147 /** 14148 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14149 * @dev_d: pointer to device 14150 * 14151 * This routine is to be called from the kernel's PCI subsystem to support 14152 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14153 * invokes this method, it restores the device's PCI config space state and 14154 * fully reinitializes the device and brings it online. Note that as the 14155 * driver implements the minimum PM requirements to a power-aware driver's 14156 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14157 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14158 * driver will fully reinitialize its device during resume() method call, 14159 * the device will be set to PCI_D0 directly in PCI config space before 14160 * restoring the state. 14161 * 14162 * Return code 14163 * 0 - driver suspended the device 14164 * Error otherwise 14165 **/ 14166 static int __maybe_unused 14167 lpfc_pci_resume_one_s3(struct device *dev_d) 14168 { 14169 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14170 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14171 uint32_t intr_mode; 14172 int error; 14173 14174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14175 "0452 PCI device Power Management resume.\n"); 14176 14177 /* Startup the kernel thread for this host adapter. */ 14178 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14179 "lpfc_worker_%d", phba->brd_no); 14180 if (IS_ERR(phba->worker_thread)) { 14181 error = PTR_ERR(phba->worker_thread); 14182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14183 "0434 PM resume failed to start worker " 14184 "thread: error=x%x.\n", error); 14185 return error; 14186 } 14187 14188 /* Init cpu_map array */ 14189 lpfc_cpu_map_array_init(phba); 14190 /* Init hba_eq_hdl array */ 14191 lpfc_hba_eq_hdl_array_init(phba); 14192 /* Configure and enable interrupt */ 14193 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14194 if (intr_mode == LPFC_INTR_ERROR) { 14195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14196 "0430 PM resume Failed to enable interrupt\n"); 14197 return -EIO; 14198 } else 14199 phba->intr_mode = intr_mode; 14200 14201 /* Restart HBA and bring it online */ 14202 lpfc_sli_brdrestart(phba); 14203 lpfc_online(phba); 14204 14205 /* Log the current active interrupt mode */ 14206 lpfc_log_intr_mode(phba, phba->intr_mode); 14207 14208 return 0; 14209 } 14210 14211 /** 14212 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14213 * @phba: pointer to lpfc hba data structure. 14214 * 14215 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14216 * aborts all the outstanding SCSI I/Os to the pci device. 14217 **/ 14218 static void 14219 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14220 { 14221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14222 "2723 PCI channel I/O abort preparing for recovery\n"); 14223 14224 /* 14225 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14226 * and let the SCSI mid-layer to retry them to recover. 14227 */ 14228 lpfc_sli_abort_fcp_rings(phba); 14229 } 14230 14231 /** 14232 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14233 * @phba: pointer to lpfc hba data structure. 14234 * 14235 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14236 * disables the device interrupt and pci device, and aborts the internal FCP 14237 * pending I/Os. 14238 **/ 14239 static void 14240 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14241 { 14242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14243 "2710 PCI channel disable preparing for reset\n"); 14244 14245 /* Block any management I/Os to the device */ 14246 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14247 14248 /* Block all SCSI devices' I/Os on the host */ 14249 lpfc_scsi_dev_block(phba); 14250 14251 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14252 lpfc_sli_flush_io_rings(phba); 14253 14254 /* stop all timers */ 14255 lpfc_stop_hba_timers(phba); 14256 14257 /* Disable interrupt and pci device */ 14258 lpfc_sli_disable_intr(phba); 14259 pci_disable_device(phba->pcidev); 14260 } 14261 14262 /** 14263 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14264 * @phba: pointer to lpfc hba data structure. 14265 * 14266 * This routine is called to prepare the SLI3 device for PCI slot permanently 14267 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14268 * pending I/Os. 14269 **/ 14270 static void 14271 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14272 { 14273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14274 "2711 PCI channel permanent disable for failure\n"); 14275 /* Block all SCSI devices' I/Os on the host */ 14276 lpfc_scsi_dev_block(phba); 14277 14278 /* stop all timers */ 14279 lpfc_stop_hba_timers(phba); 14280 14281 /* Clean up all driver's outstanding SCSI I/Os */ 14282 lpfc_sli_flush_io_rings(phba); 14283 } 14284 14285 /** 14286 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14287 * @pdev: pointer to PCI device. 14288 * @state: the current PCI connection state. 14289 * 14290 * This routine is called from the PCI subsystem for I/O error handling to 14291 * device with SLI-3 interface spec. This function is called by the PCI 14292 * subsystem after a PCI bus error affecting this device has been detected. 14293 * When this function is invoked, it will need to stop all the I/Os and 14294 * interrupt(s) to the device. Once that is done, it will return 14295 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14296 * as desired. 14297 * 14298 * Return codes 14299 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14300 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14301 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14302 **/ 14303 static pci_ers_result_t 14304 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14305 { 14306 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14307 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14308 14309 switch (state) { 14310 case pci_channel_io_normal: 14311 /* Non-fatal error, prepare for recovery */ 14312 lpfc_sli_prep_dev_for_recover(phba); 14313 return PCI_ERS_RESULT_CAN_RECOVER; 14314 case pci_channel_io_frozen: 14315 /* Fatal error, prepare for slot reset */ 14316 lpfc_sli_prep_dev_for_reset(phba); 14317 return PCI_ERS_RESULT_NEED_RESET; 14318 case pci_channel_io_perm_failure: 14319 /* Permanent failure, prepare for device down */ 14320 lpfc_sli_prep_dev_for_perm_failure(phba); 14321 return PCI_ERS_RESULT_DISCONNECT; 14322 default: 14323 /* Unknown state, prepare and request slot reset */ 14324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14325 "0472 Unknown PCI error state: x%x\n", state); 14326 lpfc_sli_prep_dev_for_reset(phba); 14327 return PCI_ERS_RESULT_NEED_RESET; 14328 } 14329 } 14330 14331 /** 14332 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14333 * @pdev: pointer to PCI device. 14334 * 14335 * This routine is called from the PCI subsystem for error handling to 14336 * device with SLI-3 interface spec. This is called after PCI bus has been 14337 * reset to restart the PCI card from scratch, as if from a cold-boot. 14338 * During the PCI subsystem error recovery, after driver returns 14339 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14340 * recovery and then call this routine before calling the .resume method 14341 * to recover the device. This function will initialize the HBA device, 14342 * enable the interrupt, but it will just put the HBA to offline state 14343 * without passing any I/O traffic. 14344 * 14345 * Return codes 14346 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14347 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14348 */ 14349 static pci_ers_result_t 14350 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14351 { 14352 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14353 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14354 struct lpfc_sli *psli = &phba->sli; 14355 uint32_t intr_mode; 14356 14357 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14358 if (pci_enable_device_mem(pdev)) { 14359 printk(KERN_ERR "lpfc: Cannot re-enable " 14360 "PCI device after reset.\n"); 14361 return PCI_ERS_RESULT_DISCONNECT; 14362 } 14363 14364 pci_restore_state(pdev); 14365 14366 /* 14367 * As the new kernel behavior of pci_restore_state() API call clears 14368 * device saved_state flag, need to save the restored state again. 14369 */ 14370 pci_save_state(pdev); 14371 14372 if (pdev->is_busmaster) 14373 pci_set_master(pdev); 14374 14375 spin_lock_irq(&phba->hbalock); 14376 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14377 spin_unlock_irq(&phba->hbalock); 14378 14379 /* Configure and enable interrupt */ 14380 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14381 if (intr_mode == LPFC_INTR_ERROR) { 14382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14383 "0427 Cannot re-enable interrupt after " 14384 "slot reset.\n"); 14385 return PCI_ERS_RESULT_DISCONNECT; 14386 } else 14387 phba->intr_mode = intr_mode; 14388 14389 /* Take device offline, it will perform cleanup */ 14390 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14391 lpfc_offline(phba); 14392 lpfc_sli_brdrestart(phba); 14393 14394 /* Log the current active interrupt mode */ 14395 lpfc_log_intr_mode(phba, phba->intr_mode); 14396 14397 return PCI_ERS_RESULT_RECOVERED; 14398 } 14399 14400 /** 14401 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14402 * @pdev: pointer to PCI device 14403 * 14404 * This routine is called from the PCI subsystem for error handling to device 14405 * with SLI-3 interface spec. It is called when kernel error recovery tells 14406 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14407 * error recovery. After this call, traffic can start to flow from this device 14408 * again. 14409 */ 14410 static void 14411 lpfc_io_resume_s3(struct pci_dev *pdev) 14412 { 14413 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14414 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14415 14416 /* Bring device online, it will be no-op for non-fatal error resume */ 14417 lpfc_online(phba); 14418 } 14419 14420 /** 14421 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14422 * @phba: pointer to lpfc hba data structure. 14423 * 14424 * returns the number of ELS/CT IOCBs to reserve 14425 **/ 14426 int 14427 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14428 { 14429 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14430 14431 if (phba->sli_rev == LPFC_SLI_REV4) { 14432 if (max_xri <= 100) 14433 return 10; 14434 else if (max_xri <= 256) 14435 return 25; 14436 else if (max_xri <= 512) 14437 return 50; 14438 else if (max_xri <= 1024) 14439 return 100; 14440 else if (max_xri <= 1536) 14441 return 150; 14442 else if (max_xri <= 2048) 14443 return 200; 14444 else 14445 return 250; 14446 } else 14447 return 0; 14448 } 14449 14450 /** 14451 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14452 * @phba: pointer to lpfc hba data structure. 14453 * 14454 * returns the number of ELS/CT + NVMET IOCBs to reserve 14455 **/ 14456 int 14457 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14458 { 14459 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14460 14461 if (phba->nvmet_support) 14462 max_xri += LPFC_NVMET_BUF_POST; 14463 return max_xri; 14464 } 14465 14466 14467 static int 14468 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14469 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14470 const struct firmware *fw) 14471 { 14472 int rc; 14473 u8 sli_family; 14474 14475 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14476 /* Three cases: (1) FW was not supported on the detected adapter. 14477 * (2) FW update has been locked out administratively. 14478 * (3) Some other error during FW update. 14479 * In each case, an unmaskable message is written to the console 14480 * for admin diagnosis. 14481 */ 14482 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14483 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14484 magic_number != MAGIC_NUMBER_G6) || 14485 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14486 magic_number != MAGIC_NUMBER_G7) || 14487 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14488 magic_number != MAGIC_NUMBER_G7P)) { 14489 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14490 "3030 This firmware version is not supported on" 14491 " this HBA model. Device:%x Magic:%x Type:%x " 14492 "ID:%x Size %d %zd\n", 14493 phba->pcidev->device, magic_number, ftype, fid, 14494 fsize, fw->size); 14495 rc = -EINVAL; 14496 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14498 "3021 Firmware downloads have been prohibited " 14499 "by a system configuration setting on " 14500 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14501 "%zd\n", 14502 phba->pcidev->device, magic_number, ftype, fid, 14503 fsize, fw->size); 14504 rc = -EACCES; 14505 } else { 14506 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14507 "3022 FW Download failed. Add Status x%x " 14508 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14509 "%zd\n", 14510 offset, phba->pcidev->device, magic_number, 14511 ftype, fid, fsize, fw->size); 14512 rc = -EIO; 14513 } 14514 return rc; 14515 } 14516 14517 /** 14518 * lpfc_write_firmware - attempt to write a firmware image to the port 14519 * @fw: pointer to firmware image returned from request_firmware. 14520 * @context: pointer to firmware image returned from request_firmware. 14521 * 14522 **/ 14523 static void 14524 lpfc_write_firmware(const struct firmware *fw, void *context) 14525 { 14526 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14527 char fwrev[FW_REV_STR_SIZE]; 14528 struct lpfc_grp_hdr *image; 14529 struct list_head dma_buffer_list; 14530 int i, rc = 0; 14531 struct lpfc_dmabuf *dmabuf, *next; 14532 uint32_t offset = 0, temp_offset = 0; 14533 uint32_t magic_number, ftype, fid, fsize; 14534 14535 /* It can be null in no-wait mode, sanity check */ 14536 if (!fw) { 14537 rc = -ENXIO; 14538 goto out; 14539 } 14540 image = (struct lpfc_grp_hdr *)fw->data; 14541 14542 magic_number = be32_to_cpu(image->magic_number); 14543 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14544 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14545 fsize = be32_to_cpu(image->size); 14546 14547 INIT_LIST_HEAD(&dma_buffer_list); 14548 lpfc_decode_firmware_rev(phba, fwrev, 1); 14549 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14551 "3023 Updating Firmware, Current Version:%s " 14552 "New Version:%s\n", 14553 fwrev, image->revision); 14554 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14555 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14556 GFP_KERNEL); 14557 if (!dmabuf) { 14558 rc = -ENOMEM; 14559 goto release_out; 14560 } 14561 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14562 SLI4_PAGE_SIZE, 14563 &dmabuf->phys, 14564 GFP_KERNEL); 14565 if (!dmabuf->virt) { 14566 kfree(dmabuf); 14567 rc = -ENOMEM; 14568 goto release_out; 14569 } 14570 list_add_tail(&dmabuf->list, &dma_buffer_list); 14571 } 14572 while (offset < fw->size) { 14573 temp_offset = offset; 14574 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14575 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14576 memcpy(dmabuf->virt, 14577 fw->data + temp_offset, 14578 fw->size - temp_offset); 14579 temp_offset = fw->size; 14580 break; 14581 } 14582 memcpy(dmabuf->virt, fw->data + temp_offset, 14583 SLI4_PAGE_SIZE); 14584 temp_offset += SLI4_PAGE_SIZE; 14585 } 14586 rc = lpfc_wr_object(phba, &dma_buffer_list, 14587 (fw->size - offset), &offset); 14588 if (rc) { 14589 rc = lpfc_log_write_firmware_error(phba, offset, 14590 magic_number, 14591 ftype, 14592 fid, 14593 fsize, 14594 fw); 14595 goto release_out; 14596 } 14597 } 14598 rc = offset; 14599 } else 14600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14601 "3029 Skipped Firmware update, Current " 14602 "Version:%s New Version:%s\n", 14603 fwrev, image->revision); 14604 14605 release_out: 14606 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14607 list_del(&dmabuf->list); 14608 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14609 dmabuf->virt, dmabuf->phys); 14610 kfree(dmabuf); 14611 } 14612 release_firmware(fw); 14613 out: 14614 if (rc < 0) 14615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14616 "3062 Firmware update error, status %d.\n", rc); 14617 else 14618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14619 "3024 Firmware update success: size %d.\n", rc); 14620 } 14621 14622 /** 14623 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14624 * @phba: pointer to lpfc hba data structure. 14625 * @fw_upgrade: which firmware to update. 14626 * 14627 * This routine is called to perform Linux generic firmware upgrade on device 14628 * that supports such feature. 14629 **/ 14630 int 14631 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14632 { 14633 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 14634 int ret; 14635 const struct firmware *fw; 14636 14637 /* Only supported on SLI4 interface type 2 for now */ 14638 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14639 LPFC_SLI_INTF_IF_TYPE_2) 14640 return -EPERM; 14641 14642 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 14643 14644 if (fw_upgrade == INT_FW_UPGRADE) { 14645 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14646 file_name, &phba->pcidev->dev, 14647 GFP_KERNEL, (void *)phba, 14648 lpfc_write_firmware); 14649 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14650 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14651 if (!ret) 14652 lpfc_write_firmware(fw, (void *)phba); 14653 } else { 14654 ret = -EINVAL; 14655 } 14656 14657 return ret; 14658 } 14659 14660 /** 14661 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14662 * @pdev: pointer to PCI device 14663 * @pid: pointer to PCI device identifier 14664 * 14665 * This routine is called from the kernel's PCI subsystem to device with 14666 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14667 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14668 * information of the device and driver to see if the driver state that it 14669 * can support this kind of device. If the match is successful, the driver 14670 * core invokes this routine. If this routine determines it can claim the HBA, 14671 * it does all the initialization that it needs to do to handle the HBA 14672 * properly. 14673 * 14674 * Return code 14675 * 0 - driver can claim the device 14676 * negative value - driver can not claim the device 14677 **/ 14678 static int 14679 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14680 { 14681 struct lpfc_hba *phba; 14682 struct lpfc_vport *vport = NULL; 14683 struct Scsi_Host *shost = NULL; 14684 int error; 14685 uint32_t cfg_mode, intr_mode; 14686 14687 /* Allocate memory for HBA structure */ 14688 phba = lpfc_hba_alloc(pdev); 14689 if (!phba) 14690 return -ENOMEM; 14691 14692 INIT_LIST_HEAD(&phba->poll_list); 14693 14694 /* Perform generic PCI device enabling operation */ 14695 error = lpfc_enable_pci_dev(phba); 14696 if (error) 14697 goto out_free_phba; 14698 14699 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14700 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14701 if (error) 14702 goto out_disable_pci_dev; 14703 14704 /* Set up SLI-4 specific device PCI memory space */ 14705 error = lpfc_sli4_pci_mem_setup(phba); 14706 if (error) { 14707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14708 "1410 Failed to set up pci memory space.\n"); 14709 goto out_disable_pci_dev; 14710 } 14711 14712 /* Set up SLI-4 Specific device driver resources */ 14713 error = lpfc_sli4_driver_resource_setup(phba); 14714 if (error) { 14715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14716 "1412 Failed to set up driver resource.\n"); 14717 goto out_unset_pci_mem_s4; 14718 } 14719 14720 INIT_LIST_HEAD(&phba->active_rrq_list); 14721 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14722 14723 /* Set up common device driver resources */ 14724 error = lpfc_setup_driver_resource_phase2(phba); 14725 if (error) { 14726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14727 "1414 Failed to set up driver resource.\n"); 14728 goto out_unset_driver_resource_s4; 14729 } 14730 14731 /* Get the default values for Model Name and Description */ 14732 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14733 14734 /* Now, trying to enable interrupt and bring up the device */ 14735 cfg_mode = phba->cfg_use_msi; 14736 14737 /* Put device to a known state before enabling interrupt */ 14738 phba->pport = NULL; 14739 lpfc_stop_port(phba); 14740 14741 /* Init cpu_map array */ 14742 lpfc_cpu_map_array_init(phba); 14743 14744 /* Init hba_eq_hdl array */ 14745 lpfc_hba_eq_hdl_array_init(phba); 14746 14747 /* Configure and enable interrupt */ 14748 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14749 if (intr_mode == LPFC_INTR_ERROR) { 14750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14751 "0426 Failed to enable interrupt.\n"); 14752 error = -ENODEV; 14753 goto out_unset_driver_resource; 14754 } 14755 /* Default to single EQ for non-MSI-X */ 14756 if (phba->intr_type != MSIX) { 14757 phba->cfg_irq_chann = 1; 14758 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14759 if (phba->nvmet_support) 14760 phba->cfg_nvmet_mrq = 1; 14761 } 14762 } 14763 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14764 14765 /* Create SCSI host to the physical port */ 14766 error = lpfc_create_shost(phba); 14767 if (error) { 14768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14769 "1415 Failed to create scsi host.\n"); 14770 goto out_disable_intr; 14771 } 14772 vport = phba->pport; 14773 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14774 14775 /* Configure sysfs attributes */ 14776 error = lpfc_alloc_sysfs_attr(vport); 14777 if (error) { 14778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14779 "1416 Failed to allocate sysfs attr\n"); 14780 goto out_destroy_shost; 14781 } 14782 14783 /* Set up SLI-4 HBA */ 14784 if (lpfc_sli4_hba_setup(phba)) { 14785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14786 "1421 Failed to set up hba\n"); 14787 error = -ENODEV; 14788 goto out_free_sysfs_attr; 14789 } 14790 14791 /* Log the current active interrupt mode */ 14792 phba->intr_mode = intr_mode; 14793 lpfc_log_intr_mode(phba, intr_mode); 14794 14795 /* Perform post initialization setup */ 14796 lpfc_post_init_setup(phba); 14797 14798 /* NVME support in FW earlier in the driver load corrects the 14799 * FC4 type making a check for nvme_support unnecessary. 14800 */ 14801 if (phba->nvmet_support == 0) { 14802 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14803 /* Create NVME binding with nvme_fc_transport. This 14804 * ensures the vport is initialized. If the localport 14805 * create fails, it should not unload the driver to 14806 * support field issues. 14807 */ 14808 error = lpfc_nvme_create_localport(vport); 14809 if (error) { 14810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14811 "6004 NVME registration " 14812 "failed, error x%x\n", 14813 error); 14814 } 14815 } 14816 } 14817 14818 /* check for firmware upgrade or downgrade */ 14819 if (phba->cfg_request_firmware_upgrade) 14820 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14821 14822 /* Check if there are static vports to be created. */ 14823 lpfc_create_static_vport(phba); 14824 14825 /* Enable RAS FW log support */ 14826 lpfc_sli4_ras_setup(phba); 14827 14828 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14829 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14830 14831 return 0; 14832 14833 out_free_sysfs_attr: 14834 lpfc_free_sysfs_attr(vport); 14835 out_destroy_shost: 14836 lpfc_destroy_shost(phba); 14837 out_disable_intr: 14838 lpfc_sli4_disable_intr(phba); 14839 out_unset_driver_resource: 14840 lpfc_unset_driver_resource_phase2(phba); 14841 out_unset_driver_resource_s4: 14842 lpfc_sli4_driver_resource_unset(phba); 14843 out_unset_pci_mem_s4: 14844 lpfc_sli4_pci_mem_unset(phba); 14845 out_disable_pci_dev: 14846 lpfc_disable_pci_dev(phba); 14847 if (shost) 14848 scsi_host_put(shost); 14849 out_free_phba: 14850 lpfc_hba_free(phba); 14851 return error; 14852 } 14853 14854 /** 14855 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14856 * @pdev: pointer to PCI device 14857 * 14858 * This routine is called from the kernel's PCI subsystem to device with 14859 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14860 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14861 * device to be removed from the PCI subsystem properly. 14862 **/ 14863 static void 14864 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14865 { 14866 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14867 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14868 struct lpfc_vport **vports; 14869 struct lpfc_hba *phba = vport->phba; 14870 int i; 14871 14872 /* Mark the device unloading flag */ 14873 spin_lock_irq(&phba->hbalock); 14874 vport->load_flag |= FC_UNLOADING; 14875 spin_unlock_irq(&phba->hbalock); 14876 if (phba->cgn_i) 14877 lpfc_unreg_congestion_buf(phba); 14878 14879 lpfc_free_sysfs_attr(vport); 14880 14881 /* Release all the vports against this physical port */ 14882 vports = lpfc_create_vport_work_array(phba); 14883 if (vports != NULL) 14884 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14885 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14886 continue; 14887 fc_vport_terminate(vports[i]->fc_vport); 14888 } 14889 lpfc_destroy_vport_work_array(phba, vports); 14890 14891 /* Remove FC host with the physical port */ 14892 fc_remove_host(shost); 14893 scsi_remove_host(shost); 14894 14895 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 14896 * localports are destroyed after to cleanup all transport memory. 14897 */ 14898 lpfc_cleanup(vport); 14899 lpfc_nvmet_destroy_targetport(phba); 14900 lpfc_nvme_destroy_localport(vport); 14901 14902 /* De-allocate multi-XRI pools */ 14903 if (phba->cfg_xri_rebalancing) 14904 lpfc_destroy_multixri_pools(phba); 14905 14906 /* 14907 * Bring down the SLI Layer. This step disables all interrupts, 14908 * clears the rings, discards all mailbox commands, and resets 14909 * the HBA FCoE function. 14910 */ 14911 lpfc_debugfs_terminate(vport); 14912 14913 lpfc_stop_hba_timers(phba); 14914 spin_lock_irq(&phba->port_list_lock); 14915 list_del_init(&vport->listentry); 14916 spin_unlock_irq(&phba->port_list_lock); 14917 14918 /* Perform scsi free before driver resource_unset since scsi 14919 * buffers are released to their corresponding pools here. 14920 */ 14921 lpfc_io_free(phba); 14922 lpfc_free_iocb_list(phba); 14923 lpfc_sli4_hba_unset(phba); 14924 14925 lpfc_unset_driver_resource_phase2(phba); 14926 lpfc_sli4_driver_resource_unset(phba); 14927 14928 /* Unmap adapter Control and Doorbell registers */ 14929 lpfc_sli4_pci_mem_unset(phba); 14930 14931 /* Release PCI resources and disable device's PCI function */ 14932 scsi_host_put(shost); 14933 lpfc_disable_pci_dev(phba); 14934 14935 /* Finally, free the driver's device data structure */ 14936 lpfc_hba_free(phba); 14937 14938 return; 14939 } 14940 14941 /** 14942 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 14943 * @dev_d: pointer to device 14944 * 14945 * This routine is called from the kernel's PCI subsystem to support system 14946 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 14947 * this method, it quiesces the device by stopping the driver's worker 14948 * thread for the device, turning off device's interrupt and DMA, and bring 14949 * the device offline. Note that as the driver implements the minimum PM 14950 * requirements to a power-aware driver's PM support for suspend/resume -- all 14951 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 14952 * method call will be treated as SUSPEND and the driver will fully 14953 * reinitialize its device during resume() method call, the driver will set 14954 * device to PCI_D3hot state in PCI config space instead of setting it 14955 * according to the @msg provided by the PM. 14956 * 14957 * Return code 14958 * 0 - driver suspended the device 14959 * Error otherwise 14960 **/ 14961 static int __maybe_unused 14962 lpfc_pci_suspend_one_s4(struct device *dev_d) 14963 { 14964 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14965 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14966 14967 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14968 "2843 PCI device Power Management suspend.\n"); 14969 14970 /* Bring down the device */ 14971 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14972 lpfc_offline(phba); 14973 kthread_stop(phba->worker_thread); 14974 14975 /* Disable interrupt from device */ 14976 lpfc_sli4_disable_intr(phba); 14977 lpfc_sli4_queue_destroy(phba); 14978 14979 return 0; 14980 } 14981 14982 /** 14983 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 14984 * @dev_d: pointer to device 14985 * 14986 * This routine is called from the kernel's PCI subsystem to support system 14987 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 14988 * this method, it restores the device's PCI config space state and fully 14989 * reinitializes the device and brings it online. Note that as the driver 14990 * implements the minimum PM requirements to a power-aware driver's PM for 14991 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14992 * to the suspend() method call will be treated as SUSPEND and the driver 14993 * will fully reinitialize its device during resume() method call, the device 14994 * will be set to PCI_D0 directly in PCI config space before restoring the 14995 * state. 14996 * 14997 * Return code 14998 * 0 - driver suspended the device 14999 * Error otherwise 15000 **/ 15001 static int __maybe_unused 15002 lpfc_pci_resume_one_s4(struct device *dev_d) 15003 { 15004 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 15005 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15006 uint32_t intr_mode; 15007 int error; 15008 15009 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15010 "0292 PCI device Power Management resume.\n"); 15011 15012 /* Startup the kernel thread for this host adapter. */ 15013 phba->worker_thread = kthread_run(lpfc_do_work, phba, 15014 "lpfc_worker_%d", phba->brd_no); 15015 if (IS_ERR(phba->worker_thread)) { 15016 error = PTR_ERR(phba->worker_thread); 15017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15018 "0293 PM resume failed to start worker " 15019 "thread: error=x%x.\n", error); 15020 return error; 15021 } 15022 15023 /* Configure and enable interrupt */ 15024 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15025 if (intr_mode == LPFC_INTR_ERROR) { 15026 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15027 "0294 PM resume Failed to enable interrupt\n"); 15028 return -EIO; 15029 } else 15030 phba->intr_mode = intr_mode; 15031 15032 /* Restart HBA and bring it online */ 15033 lpfc_sli_brdrestart(phba); 15034 lpfc_online(phba); 15035 15036 /* Log the current active interrupt mode */ 15037 lpfc_log_intr_mode(phba, phba->intr_mode); 15038 15039 return 0; 15040 } 15041 15042 /** 15043 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 15044 * @phba: pointer to lpfc hba data structure. 15045 * 15046 * This routine is called to prepare the SLI4 device for PCI slot recover. It 15047 * aborts all the outstanding SCSI I/Os to the pci device. 15048 **/ 15049 static void 15050 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 15051 { 15052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15053 "2828 PCI channel I/O abort preparing for recovery\n"); 15054 /* 15055 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 15056 * and let the SCSI mid-layer to retry them to recover. 15057 */ 15058 lpfc_sli_abort_fcp_rings(phba); 15059 } 15060 15061 /** 15062 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 15063 * @phba: pointer to lpfc hba data structure. 15064 * 15065 * This routine is called to prepare the SLI4 device for PCI slot reset. It 15066 * disables the device interrupt and pci device, and aborts the internal FCP 15067 * pending I/Os. 15068 **/ 15069 static void 15070 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15071 { 15072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15073 "2826 PCI channel disable preparing for reset\n"); 15074 15075 /* Block any management I/Os to the device */ 15076 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15077 15078 /* Block all SCSI devices' I/Os on the host */ 15079 lpfc_scsi_dev_block(phba); 15080 15081 /* Flush all driver's outstanding I/Os as we are to reset */ 15082 lpfc_sli_flush_io_rings(phba); 15083 15084 /* stop all timers */ 15085 lpfc_stop_hba_timers(phba); 15086 15087 /* Disable interrupt and pci device */ 15088 lpfc_sli4_disable_intr(phba); 15089 lpfc_sli4_queue_destroy(phba); 15090 pci_disable_device(phba->pcidev); 15091 } 15092 15093 /** 15094 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15095 * @phba: pointer to lpfc hba data structure. 15096 * 15097 * This routine is called to prepare the SLI4 device for PCI slot permanently 15098 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15099 * pending I/Os. 15100 **/ 15101 static void 15102 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15103 { 15104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15105 "2827 PCI channel permanent disable for failure\n"); 15106 15107 /* Block all SCSI devices' I/Os on the host */ 15108 lpfc_scsi_dev_block(phba); 15109 15110 /* stop all timers */ 15111 lpfc_stop_hba_timers(phba); 15112 15113 /* Clean up all driver's outstanding I/Os */ 15114 lpfc_sli_flush_io_rings(phba); 15115 } 15116 15117 /** 15118 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15119 * @pdev: pointer to PCI device. 15120 * @state: the current PCI connection state. 15121 * 15122 * This routine is called from the PCI subsystem for error handling to device 15123 * with SLI-4 interface spec. This function is called by the PCI subsystem 15124 * after a PCI bus error affecting this device has been detected. When this 15125 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15126 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15127 * for the PCI subsystem to perform proper recovery as desired. 15128 * 15129 * Return codes 15130 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15131 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15132 **/ 15133 static pci_ers_result_t 15134 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15135 { 15136 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15137 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15138 15139 switch (state) { 15140 case pci_channel_io_normal: 15141 /* Non-fatal error, prepare for recovery */ 15142 lpfc_sli4_prep_dev_for_recover(phba); 15143 return PCI_ERS_RESULT_CAN_RECOVER; 15144 case pci_channel_io_frozen: 15145 phba->hba_flag |= HBA_PCI_ERR; 15146 /* Fatal error, prepare for slot reset */ 15147 lpfc_sli4_prep_dev_for_reset(phba); 15148 return PCI_ERS_RESULT_NEED_RESET; 15149 case pci_channel_io_perm_failure: 15150 phba->hba_flag |= HBA_PCI_ERR; 15151 /* Permanent failure, prepare for device down */ 15152 lpfc_sli4_prep_dev_for_perm_failure(phba); 15153 return PCI_ERS_RESULT_DISCONNECT; 15154 default: 15155 phba->hba_flag |= HBA_PCI_ERR; 15156 /* Unknown state, prepare and request slot reset */ 15157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15158 "2825 Unknown PCI error state: x%x\n", state); 15159 lpfc_sli4_prep_dev_for_reset(phba); 15160 return PCI_ERS_RESULT_NEED_RESET; 15161 } 15162 } 15163 15164 /** 15165 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15166 * @pdev: pointer to PCI device. 15167 * 15168 * This routine is called from the PCI subsystem for error handling to device 15169 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15170 * restart the PCI card from scratch, as if from a cold-boot. During the 15171 * PCI subsystem error recovery, after the driver returns 15172 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15173 * recovery and then call this routine before calling the .resume method to 15174 * recover the device. This function will initialize the HBA device, enable 15175 * the interrupt, but it will just put the HBA to offline state without 15176 * passing any I/O traffic. 15177 * 15178 * Return codes 15179 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15180 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15181 */ 15182 static pci_ers_result_t 15183 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15184 { 15185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15187 struct lpfc_sli *psli = &phba->sli; 15188 uint32_t intr_mode; 15189 15190 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15191 if (pci_enable_device_mem(pdev)) { 15192 printk(KERN_ERR "lpfc: Cannot re-enable " 15193 "PCI device after reset.\n"); 15194 return PCI_ERS_RESULT_DISCONNECT; 15195 } 15196 15197 pci_restore_state(pdev); 15198 15199 phba->hba_flag &= ~HBA_PCI_ERR; 15200 /* 15201 * As the new kernel behavior of pci_restore_state() API call clears 15202 * device saved_state flag, need to save the restored state again. 15203 */ 15204 pci_save_state(pdev); 15205 15206 if (pdev->is_busmaster) 15207 pci_set_master(pdev); 15208 15209 spin_lock_irq(&phba->hbalock); 15210 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15211 spin_unlock_irq(&phba->hbalock); 15212 15213 /* Configure and enable interrupt */ 15214 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15215 if (intr_mode == LPFC_INTR_ERROR) { 15216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15217 "2824 Cannot re-enable interrupt after " 15218 "slot reset.\n"); 15219 return PCI_ERS_RESULT_DISCONNECT; 15220 } else 15221 phba->intr_mode = intr_mode; 15222 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15223 15224 /* Log the current active interrupt mode */ 15225 lpfc_log_intr_mode(phba, phba->intr_mode); 15226 15227 return PCI_ERS_RESULT_RECOVERED; 15228 } 15229 15230 /** 15231 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15232 * @pdev: pointer to PCI device 15233 * 15234 * This routine is called from the PCI subsystem for error handling to device 15235 * with SLI-4 interface spec. It is called when kernel error recovery tells 15236 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15237 * error recovery. After this call, traffic can start to flow from this device 15238 * again. 15239 **/ 15240 static void 15241 lpfc_io_resume_s4(struct pci_dev *pdev) 15242 { 15243 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15244 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15245 15246 /* 15247 * In case of slot reset, as function reset is performed through 15248 * mailbox command which needs DMA to be enabled, this operation 15249 * has to be moved to the io resume phase. Taking device offline 15250 * will perform the necessary cleanup. 15251 */ 15252 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15253 /* Perform device reset */ 15254 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15255 lpfc_offline(phba); 15256 lpfc_sli_brdrestart(phba); 15257 /* Bring the device back online */ 15258 lpfc_online(phba); 15259 } 15260 } 15261 15262 /** 15263 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15264 * @pdev: pointer to PCI device 15265 * @pid: pointer to PCI device identifier 15266 * 15267 * This routine is to be registered to the kernel's PCI subsystem. When an 15268 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15269 * at PCI device-specific information of the device and driver to see if the 15270 * driver state that it can support this kind of device. If the match is 15271 * successful, the driver core invokes this routine. This routine dispatches 15272 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15273 * do all the initialization that it needs to do to handle the HBA device 15274 * properly. 15275 * 15276 * Return code 15277 * 0 - driver can claim the device 15278 * negative value - driver can not claim the device 15279 **/ 15280 static int 15281 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15282 { 15283 int rc; 15284 struct lpfc_sli_intf intf; 15285 15286 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15287 return -ENODEV; 15288 15289 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15290 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15291 rc = lpfc_pci_probe_one_s4(pdev, pid); 15292 else 15293 rc = lpfc_pci_probe_one_s3(pdev, pid); 15294 15295 return rc; 15296 } 15297 15298 /** 15299 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15300 * @pdev: pointer to PCI device 15301 * 15302 * This routine is to be registered to the kernel's PCI subsystem. When an 15303 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15304 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15305 * remove routine, which will perform all the necessary cleanup for the 15306 * device to be removed from the PCI subsystem properly. 15307 **/ 15308 static void 15309 lpfc_pci_remove_one(struct pci_dev *pdev) 15310 { 15311 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15312 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15313 15314 switch (phba->pci_dev_grp) { 15315 case LPFC_PCI_DEV_LP: 15316 lpfc_pci_remove_one_s3(pdev); 15317 break; 15318 case LPFC_PCI_DEV_OC: 15319 lpfc_pci_remove_one_s4(pdev); 15320 break; 15321 default: 15322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15323 "1424 Invalid PCI device group: 0x%x\n", 15324 phba->pci_dev_grp); 15325 break; 15326 } 15327 return; 15328 } 15329 15330 /** 15331 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15332 * @dev: pointer to device 15333 * 15334 * This routine is to be registered to the kernel's PCI subsystem to support 15335 * system Power Management (PM). When PM invokes this method, it dispatches 15336 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15337 * suspend the device. 15338 * 15339 * Return code 15340 * 0 - driver suspended the device 15341 * Error otherwise 15342 **/ 15343 static int __maybe_unused 15344 lpfc_pci_suspend_one(struct device *dev) 15345 { 15346 struct Scsi_Host *shost = dev_get_drvdata(dev); 15347 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15348 int rc = -ENODEV; 15349 15350 switch (phba->pci_dev_grp) { 15351 case LPFC_PCI_DEV_LP: 15352 rc = lpfc_pci_suspend_one_s3(dev); 15353 break; 15354 case LPFC_PCI_DEV_OC: 15355 rc = lpfc_pci_suspend_one_s4(dev); 15356 break; 15357 default: 15358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15359 "1425 Invalid PCI device group: 0x%x\n", 15360 phba->pci_dev_grp); 15361 break; 15362 } 15363 return rc; 15364 } 15365 15366 /** 15367 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15368 * @dev: pointer to device 15369 * 15370 * This routine is to be registered to the kernel's PCI subsystem to support 15371 * system Power Management (PM). When PM invokes this method, it dispatches 15372 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15373 * resume the device. 15374 * 15375 * Return code 15376 * 0 - driver suspended the device 15377 * Error otherwise 15378 **/ 15379 static int __maybe_unused 15380 lpfc_pci_resume_one(struct device *dev) 15381 { 15382 struct Scsi_Host *shost = dev_get_drvdata(dev); 15383 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15384 int rc = -ENODEV; 15385 15386 switch (phba->pci_dev_grp) { 15387 case LPFC_PCI_DEV_LP: 15388 rc = lpfc_pci_resume_one_s3(dev); 15389 break; 15390 case LPFC_PCI_DEV_OC: 15391 rc = lpfc_pci_resume_one_s4(dev); 15392 break; 15393 default: 15394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15395 "1426 Invalid PCI device group: 0x%x\n", 15396 phba->pci_dev_grp); 15397 break; 15398 } 15399 return rc; 15400 } 15401 15402 /** 15403 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15404 * @pdev: pointer to PCI device. 15405 * @state: the current PCI connection state. 15406 * 15407 * This routine is registered to the PCI subsystem for error handling. This 15408 * function is called by the PCI subsystem after a PCI bus error affecting 15409 * this device has been detected. When this routine is invoked, it dispatches 15410 * the action to the proper SLI-3 or SLI-4 device error detected handling 15411 * routine, which will perform the proper error detected operation. 15412 * 15413 * Return codes 15414 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15415 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15416 **/ 15417 static pci_ers_result_t 15418 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15419 { 15420 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15421 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15422 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15423 15424 if (phba->link_state == LPFC_HBA_ERROR && 15425 phba->hba_flag & HBA_IOQ_FLUSH) 15426 return PCI_ERS_RESULT_NEED_RESET; 15427 15428 switch (phba->pci_dev_grp) { 15429 case LPFC_PCI_DEV_LP: 15430 rc = lpfc_io_error_detected_s3(pdev, state); 15431 break; 15432 case LPFC_PCI_DEV_OC: 15433 rc = lpfc_io_error_detected_s4(pdev, state); 15434 break; 15435 default: 15436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15437 "1427 Invalid PCI device group: 0x%x\n", 15438 phba->pci_dev_grp); 15439 break; 15440 } 15441 return rc; 15442 } 15443 15444 /** 15445 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15446 * @pdev: pointer to PCI device. 15447 * 15448 * This routine is registered to the PCI subsystem for error handling. This 15449 * function is called after PCI bus has been reset to restart the PCI card 15450 * from scratch, as if from a cold-boot. When this routine is invoked, it 15451 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15452 * routine, which will perform the proper device reset. 15453 * 15454 * Return codes 15455 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15456 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15457 **/ 15458 static pci_ers_result_t 15459 lpfc_io_slot_reset(struct pci_dev *pdev) 15460 { 15461 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15462 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15463 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15464 15465 switch (phba->pci_dev_grp) { 15466 case LPFC_PCI_DEV_LP: 15467 rc = lpfc_io_slot_reset_s3(pdev); 15468 break; 15469 case LPFC_PCI_DEV_OC: 15470 rc = lpfc_io_slot_reset_s4(pdev); 15471 break; 15472 default: 15473 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15474 "1428 Invalid PCI device group: 0x%x\n", 15475 phba->pci_dev_grp); 15476 break; 15477 } 15478 return rc; 15479 } 15480 15481 /** 15482 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15483 * @pdev: pointer to PCI device 15484 * 15485 * This routine is registered to the PCI subsystem for error handling. It 15486 * is called when kernel error recovery tells the lpfc driver that it is 15487 * OK to resume normal PCI operation after PCI bus error recovery. When 15488 * this routine is invoked, it dispatches the action to the proper SLI-3 15489 * or SLI-4 device io_resume routine, which will resume the device operation. 15490 **/ 15491 static void 15492 lpfc_io_resume(struct pci_dev *pdev) 15493 { 15494 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15495 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15496 15497 switch (phba->pci_dev_grp) { 15498 case LPFC_PCI_DEV_LP: 15499 lpfc_io_resume_s3(pdev); 15500 break; 15501 case LPFC_PCI_DEV_OC: 15502 lpfc_io_resume_s4(pdev); 15503 break; 15504 default: 15505 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15506 "1429 Invalid PCI device group: 0x%x\n", 15507 phba->pci_dev_grp); 15508 break; 15509 } 15510 return; 15511 } 15512 15513 /** 15514 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15515 * @phba: pointer to lpfc hba data structure. 15516 * 15517 * This routine checks to see if OAS is supported for this adapter. If 15518 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15519 * the enable oas flag is cleared and the pool created for OAS device data 15520 * is destroyed. 15521 * 15522 **/ 15523 static void 15524 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15525 { 15526 15527 if (!phba->cfg_EnableXLane) 15528 return; 15529 15530 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15531 phba->cfg_fof = 1; 15532 } else { 15533 phba->cfg_fof = 0; 15534 mempool_destroy(phba->device_data_mem_pool); 15535 phba->device_data_mem_pool = NULL; 15536 } 15537 15538 return; 15539 } 15540 15541 /** 15542 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15543 * @phba: pointer to lpfc hba data structure. 15544 * 15545 * This routine checks to see if RAS is supported by the adapter. Check the 15546 * function through which RAS support enablement is to be done. 15547 **/ 15548 void 15549 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15550 { 15551 /* if ASIC_GEN_NUM >= 0xC) */ 15552 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15553 LPFC_SLI_INTF_IF_TYPE_6) || 15554 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15555 LPFC_SLI_INTF_FAMILY_G6)) { 15556 phba->ras_fwlog.ras_hwsupport = true; 15557 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15558 phba->cfg_ras_fwlog_buffsize) 15559 phba->ras_fwlog.ras_enabled = true; 15560 else 15561 phba->ras_fwlog.ras_enabled = false; 15562 } else { 15563 phba->ras_fwlog.ras_hwsupport = false; 15564 } 15565 } 15566 15567 15568 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15569 15570 static const struct pci_error_handlers lpfc_err_handler = { 15571 .error_detected = lpfc_io_error_detected, 15572 .slot_reset = lpfc_io_slot_reset, 15573 .resume = lpfc_io_resume, 15574 }; 15575 15576 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15577 lpfc_pci_suspend_one, 15578 lpfc_pci_resume_one); 15579 15580 static struct pci_driver lpfc_driver = { 15581 .name = LPFC_DRIVER_NAME, 15582 .id_table = lpfc_id_table, 15583 .probe = lpfc_pci_probe_one, 15584 .remove = lpfc_pci_remove_one, 15585 .shutdown = lpfc_pci_remove_one, 15586 .driver.pm = &lpfc_pci_pm_ops_one, 15587 .err_handler = &lpfc_err_handler, 15588 }; 15589 15590 static const struct file_operations lpfc_mgmt_fop = { 15591 .owner = THIS_MODULE, 15592 }; 15593 15594 static struct miscdevice lpfc_mgmt_dev = { 15595 .minor = MISC_DYNAMIC_MINOR, 15596 .name = "lpfcmgmt", 15597 .fops = &lpfc_mgmt_fop, 15598 }; 15599 15600 /** 15601 * lpfc_init - lpfc module initialization routine 15602 * 15603 * This routine is to be invoked when the lpfc module is loaded into the 15604 * kernel. The special kernel macro module_init() is used to indicate the 15605 * role of this routine to the kernel as lpfc module entry point. 15606 * 15607 * Return codes 15608 * 0 - successful 15609 * -ENOMEM - FC attach transport failed 15610 * all others - failed 15611 */ 15612 static int __init 15613 lpfc_init(void) 15614 { 15615 int error = 0; 15616 15617 pr_info(LPFC_MODULE_DESC "\n"); 15618 pr_info(LPFC_COPYRIGHT "\n"); 15619 15620 error = misc_register(&lpfc_mgmt_dev); 15621 if (error) 15622 printk(KERN_ERR "Could not register lpfcmgmt device, " 15623 "misc_register returned with status %d", error); 15624 15625 error = -ENOMEM; 15626 lpfc_transport_functions.vport_create = lpfc_vport_create; 15627 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15628 lpfc_transport_template = 15629 fc_attach_transport(&lpfc_transport_functions); 15630 if (lpfc_transport_template == NULL) 15631 goto unregister; 15632 lpfc_vport_transport_template = 15633 fc_attach_transport(&lpfc_vport_transport_functions); 15634 if (lpfc_vport_transport_template == NULL) { 15635 fc_release_transport(lpfc_transport_template); 15636 goto unregister; 15637 } 15638 lpfc_wqe_cmd_template(); 15639 lpfc_nvmet_cmd_template(); 15640 15641 /* Initialize in case vector mapping is needed */ 15642 lpfc_present_cpu = num_present_cpus(); 15643 15644 lpfc_pldv_detect = false; 15645 15646 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15647 "lpfc/sli4:online", 15648 lpfc_cpu_online, lpfc_cpu_offline); 15649 if (error < 0) 15650 goto cpuhp_failure; 15651 lpfc_cpuhp_state = error; 15652 15653 error = pci_register_driver(&lpfc_driver); 15654 if (error) 15655 goto unwind; 15656 15657 return error; 15658 15659 unwind: 15660 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15661 cpuhp_failure: 15662 fc_release_transport(lpfc_transport_template); 15663 fc_release_transport(lpfc_vport_transport_template); 15664 unregister: 15665 misc_deregister(&lpfc_mgmt_dev); 15666 15667 return error; 15668 } 15669 15670 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15671 { 15672 unsigned int start_idx; 15673 unsigned int dbg_cnt; 15674 unsigned int temp_idx; 15675 int i; 15676 int j = 0; 15677 unsigned long rem_nsec, iflags; 15678 bool log_verbose = false; 15679 struct lpfc_vport *port_iterator; 15680 15681 /* Don't dump messages if we explicitly set log_verbose for the 15682 * physical port or any vport. 15683 */ 15684 if (phba->cfg_log_verbose) 15685 return; 15686 15687 spin_lock_irqsave(&phba->port_list_lock, iflags); 15688 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 15689 if (port_iterator->load_flag & FC_UNLOADING) 15690 continue; 15691 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 15692 if (port_iterator->cfg_log_verbose) 15693 log_verbose = true; 15694 15695 scsi_host_put(lpfc_shost_from_vport(port_iterator)); 15696 15697 if (log_verbose) { 15698 spin_unlock_irqrestore(&phba->port_list_lock, 15699 iflags); 15700 return; 15701 } 15702 } 15703 } 15704 spin_unlock_irqrestore(&phba->port_list_lock, iflags); 15705 15706 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15707 return; 15708 15709 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15710 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15711 if (!dbg_cnt) 15712 goto out; 15713 temp_idx = start_idx; 15714 if (dbg_cnt >= DBG_LOG_SZ) { 15715 dbg_cnt = DBG_LOG_SZ; 15716 temp_idx -= 1; 15717 } else { 15718 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15719 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15720 } else { 15721 if (start_idx < dbg_cnt) 15722 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15723 else 15724 start_idx -= dbg_cnt; 15725 } 15726 } 15727 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15728 start_idx, temp_idx, dbg_cnt); 15729 15730 for (i = 0; i < dbg_cnt; i++) { 15731 if ((start_idx + i) < DBG_LOG_SZ) 15732 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15733 else 15734 temp_idx = j++; 15735 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15736 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15737 temp_idx, 15738 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15739 rem_nsec / 1000, 15740 phba->dbg_log[temp_idx].log); 15741 } 15742 out: 15743 atomic_set(&phba->dbg_log_cnt, 0); 15744 atomic_set(&phba->dbg_log_dmping, 0); 15745 } 15746 15747 __printf(2, 3) 15748 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15749 { 15750 unsigned int idx; 15751 va_list args; 15752 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15753 struct va_format vaf; 15754 15755 15756 va_start(args, fmt); 15757 if (unlikely(dbg_dmping)) { 15758 vaf.fmt = fmt; 15759 vaf.va = &args; 15760 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15761 va_end(args); 15762 return; 15763 } 15764 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15765 DBG_LOG_SZ; 15766 15767 atomic_inc(&phba->dbg_log_cnt); 15768 15769 vscnprintf(phba->dbg_log[idx].log, 15770 sizeof(phba->dbg_log[idx].log), fmt, args); 15771 va_end(args); 15772 15773 phba->dbg_log[idx].t_ns = local_clock(); 15774 } 15775 15776 /** 15777 * lpfc_exit - lpfc module removal routine 15778 * 15779 * This routine is invoked when the lpfc module is removed from the kernel. 15780 * The special kernel macro module_exit() is used to indicate the role of 15781 * this routine to the kernel as lpfc module exit point. 15782 */ 15783 static void __exit 15784 lpfc_exit(void) 15785 { 15786 misc_deregister(&lpfc_mgmt_dev); 15787 pci_unregister_driver(&lpfc_driver); 15788 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15789 fc_release_transport(lpfc_transport_template); 15790 fc_release_transport(lpfc_vport_transport_template); 15791 idr_destroy(&lpfc_hba_index); 15792 } 15793 15794 module_init(lpfc_init); 15795 module_exit(lpfc_exit); 15796 MODULE_LICENSE("GPL"); 15797 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15798 MODULE_AUTHOR("Broadcom"); 15799 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15800