1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 static bool lpfc_pldv_detect; 72 73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 75 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 77 static int lpfc_post_rcv_buf(struct lpfc_hba *); 78 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 80 static int lpfc_setup_endian_order(struct lpfc_hba *); 81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 82 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 84 static void lpfc_init_sgl_list(struct lpfc_hba *); 85 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 86 static void lpfc_free_active_sgl(struct lpfc_hba *); 87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 92 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 260 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 261 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 262 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 263 lpfc_vpd_data + offset, 264 mb->un.varDmp.word_cnt); 265 offset += mb->un.varDmp.word_cnt; 266 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 267 268 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 269 270 kfree(lpfc_vpd_data); 271 out_free_mbox: 272 mempool_free(pmb, phba->mbox_mem_pool); 273 return 0; 274 } 275 276 /** 277 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for driver's configuring asynchronous event 282 * mailbox command to the device. If the mailbox command returns successfully, 283 * it will set internal async event support flag to 1; otherwise, it will 284 * set internal async event support flag to 0. 285 **/ 286 static void 287 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 288 { 289 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 290 phba->temp_sensor_support = 1; 291 else 292 phba->temp_sensor_support = 0; 293 mempool_free(pmboxq, phba->mbox_mem_pool); 294 return; 295 } 296 297 /** 298 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 299 * @phba: pointer to lpfc hba data structure. 300 * @pmboxq: pointer to the driver internal queue element for mailbox command. 301 * 302 * This is the completion handler for dump mailbox command for getting 303 * wake up parameters. When this command complete, the response contain 304 * Option rom version of the HBA. This function translate the version number 305 * into a human readable string and store it in OptionROMVersion. 306 **/ 307 static void 308 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 309 { 310 struct prog_id *prg; 311 uint32_t prog_id_word; 312 char dist = ' '; 313 /* character array used for decoding dist type. */ 314 char dist_char[] = "nabx"; 315 316 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 317 mempool_free(pmboxq, phba->mbox_mem_pool); 318 return; 319 } 320 321 prg = (struct prog_id *) &prog_id_word; 322 323 /* word 7 contain option rom version */ 324 prog_id_word = pmboxq->u.mb.un.varWords[7]; 325 326 /* Decode the Option rom version word to a readable string */ 327 if (prg->dist < 4) 328 dist = dist_char[prg->dist]; 329 330 if ((prg->dist == 3) && (prg->num == 0)) 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 332 prg->ver, prg->rev, prg->lev); 333 else 334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 335 prg->ver, prg->rev, prg->lev, 336 dist, prg->num); 337 mempool_free(pmboxq, phba->mbox_mem_pool); 338 return; 339 } 340 341 /** 342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 343 * @vport: pointer to lpfc vport data structure. 344 * 345 * 346 * Return codes 347 * None. 348 **/ 349 void 350 lpfc_update_vport_wwn(struct lpfc_vport *vport) 351 { 352 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 353 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 354 355 /* 356 * If the name is empty or there exists a soft name 357 * then copy the service params name, otherwise use the fc name 358 */ 359 if (vport->fc_nodename.u.wwn[0] == 0) 360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 361 sizeof(struct lpfc_name)); 362 else 363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 364 sizeof(struct lpfc_name)); 365 366 /* 367 * If the port name has changed, then set the Param changes flag 368 * to unreg the login 369 */ 370 if (vport->fc_portname.u.wwn[0] != 0 && 371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 372 sizeof(struct lpfc_name))) 373 vport->vport_flag |= FAWWPN_PARAM_CHG; 374 375 if (vport->fc_portname.u.wwn[0] == 0 || 376 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 377 vport->vport_flag & FAWWPN_SET) { 378 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 379 sizeof(struct lpfc_name)); 380 vport->vport_flag &= ~FAWWPN_SET; 381 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 382 vport->vport_flag |= FAWWPN_SET; 383 } 384 else 385 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 386 sizeof(struct lpfc_name)); 387 } 388 389 /** 390 * lpfc_config_port_post - Perform lpfc initialization after config port 391 * @phba: pointer to lpfc hba data structure. 392 * 393 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 394 * command call. It performs all internal resource and state setups on the 395 * port: post IOCB buffers, enable appropriate host interrupt attentions, 396 * ELS ring timers, etc. 397 * 398 * Return codes 399 * 0 - success. 400 * Any other value - error. 401 **/ 402 int 403 lpfc_config_port_post(struct lpfc_hba *phba) 404 { 405 struct lpfc_vport *vport = phba->pport; 406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 407 LPFC_MBOXQ_t *pmb; 408 MAILBOX_t *mb; 409 struct lpfc_dmabuf *mp; 410 struct lpfc_sli *psli = &phba->sli; 411 uint32_t status, timeout; 412 int i, j; 413 int rc; 414 415 spin_lock_irq(&phba->hbalock); 416 /* 417 * If the Config port completed correctly the HBA is not 418 * over heated any more. 419 */ 420 if (phba->over_temp_state == HBA_OVER_TEMP) 421 phba->over_temp_state = HBA_NORMAL_TEMP; 422 spin_unlock_irq(&phba->hbalock); 423 424 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 425 if (!pmb) { 426 phba->link_state = LPFC_HBA_ERROR; 427 return -ENOMEM; 428 } 429 mb = &pmb->u.mb; 430 431 /* Get login parameters for NID. */ 432 rc = lpfc_read_sparam(phba, pmb, 0); 433 if (rc) { 434 mempool_free(pmb, phba->mbox_mem_pool); 435 return -ENOMEM; 436 } 437 438 pmb->vport = vport; 439 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 441 "0448 Adapter failed init, mbxCmd x%x " 442 "READ_SPARM mbxStatus x%x\n", 443 mb->mbxCommand, mb->mbxStatus); 444 phba->link_state = LPFC_HBA_ERROR; 445 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 446 mempool_free(pmb, phba->mbox_mem_pool); 447 lpfc_mbuf_free(phba, mp->virt, mp->phys); 448 kfree(mp); 449 return -EIO; 450 } 451 452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 453 454 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 456 kfree(mp); 457 pmb->ctx_buf = NULL; 458 lpfc_update_vport_wwn(vport); 459 460 /* Update the fc_host data structures with new wwn. */ 461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 463 fc_host_max_npiv_vports(shost) = phba->max_vpi; 464 465 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 466 /* This should be consolidated into parse_vpd ? - mr */ 467 if (phba->SerialNumber[0] == 0) { 468 uint8_t *outptr; 469 470 outptr = &vport->fc_nodename.u.s.IEEE[0]; 471 for (i = 0; i < 12; i++) { 472 status = *outptr++; 473 j = ((status & 0xf0) >> 4); 474 if (j <= 9) 475 phba->SerialNumber[i] = 476 (char)((uint8_t) 0x30 + (uint8_t) j); 477 else 478 phba->SerialNumber[i] = 479 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 480 i++; 481 j = (status & 0xf); 482 if (j <= 9) 483 phba->SerialNumber[i] = 484 (char)((uint8_t) 0x30 + (uint8_t) j); 485 else 486 phba->SerialNumber[i] = 487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 488 } 489 } 490 491 lpfc_read_config(phba, pmb); 492 pmb->vport = vport; 493 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 495 "0453 Adapter failed to init, mbxCmd x%x " 496 "READ_CONFIG, mbxStatus x%x\n", 497 mb->mbxCommand, mb->mbxStatus); 498 phba->link_state = LPFC_HBA_ERROR; 499 mempool_free( pmb, phba->mbox_mem_pool); 500 return -EIO; 501 } 502 503 /* Check if the port is disabled */ 504 lpfc_sli_read_link_ste(phba); 505 506 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 507 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 508 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 509 "3359 HBA queue depth changed from %d to %d\n", 510 phba->cfg_hba_queue_depth, 511 mb->un.varRdConfig.max_xri); 512 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 513 } 514 515 phba->lmt = mb->un.varRdConfig.lmt; 516 517 /* Get the default values for Model Name and Description */ 518 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 519 520 phba->link_state = LPFC_LINK_DOWN; 521 522 /* Only process IOCBs on ELS ring till hba_state is READY */ 523 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 524 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 525 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 526 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 527 528 /* Post receive buffers for desired rings */ 529 if (phba->sli_rev != 3) 530 lpfc_post_rcv_buf(phba); 531 532 /* 533 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 534 */ 535 if (phba->intr_type == MSIX) { 536 rc = lpfc_config_msi(phba, pmb); 537 if (rc) { 538 mempool_free(pmb, phba->mbox_mem_pool); 539 return -EIO; 540 } 541 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 542 if (rc != MBX_SUCCESS) { 543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 544 "0352 Config MSI mailbox command " 545 "failed, mbxCmd x%x, mbxStatus x%x\n", 546 pmb->u.mb.mbxCommand, 547 pmb->u.mb.mbxStatus); 548 mempool_free(pmb, phba->mbox_mem_pool); 549 return -EIO; 550 } 551 } 552 553 spin_lock_irq(&phba->hbalock); 554 /* Initialize ERATT handling flag */ 555 phba->hba_flag &= ~HBA_ERATT_HANDLED; 556 557 /* Enable appropriate host interrupts */ 558 if (lpfc_readl(phba->HCregaddr, &status)) { 559 spin_unlock_irq(&phba->hbalock); 560 return -EIO; 561 } 562 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 563 if (psli->num_rings > 0) 564 status |= HC_R0INT_ENA; 565 if (psli->num_rings > 1) 566 status |= HC_R1INT_ENA; 567 if (psli->num_rings > 2) 568 status |= HC_R2INT_ENA; 569 if (psli->num_rings > 3) 570 status |= HC_R3INT_ENA; 571 572 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 573 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 574 status &= ~(HC_R0INT_ENA); 575 576 writel(status, phba->HCregaddr); 577 readl(phba->HCregaddr); /* flush */ 578 spin_unlock_irq(&phba->hbalock); 579 580 /* Set up ring-0 (ELS) timer */ 581 timeout = phba->fc_ratov * 2; 582 mod_timer(&vport->els_tmofunc, 583 jiffies + msecs_to_jiffies(1000 * timeout)); 584 /* Set up heart beat (HB) timer */ 585 mod_timer(&phba->hb_tmofunc, 586 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 587 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 588 phba->last_completion_time = jiffies; 589 /* Set up error attention (ERATT) polling timer */ 590 mod_timer(&phba->eratt_poll, 591 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 592 593 if (phba->hba_flag & LINK_DISABLED) { 594 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 595 "2598 Adapter Link is disabled.\n"); 596 lpfc_down_link(phba, pmb); 597 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 598 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 599 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 601 "2599 Adapter failed to issue DOWN_LINK" 602 " mbox command rc 0x%x\n", rc); 603 604 mempool_free(pmb, phba->mbox_mem_pool); 605 return -EIO; 606 } 607 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 608 mempool_free(pmb, phba->mbox_mem_pool); 609 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 610 if (rc) 611 return rc; 612 } 613 /* MBOX buffer will be freed in mbox compl */ 614 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 615 if (!pmb) { 616 phba->link_state = LPFC_HBA_ERROR; 617 return -ENOMEM; 618 } 619 620 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 621 pmb->mbox_cmpl = lpfc_config_async_cmpl; 622 pmb->vport = phba->pport; 623 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 624 625 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 627 "0456 Adapter failed to issue " 628 "ASYNCEVT_ENABLE mbox status x%x\n", 629 rc); 630 mempool_free(pmb, phba->mbox_mem_pool); 631 } 632 633 /* Get Option rom version */ 634 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 635 if (!pmb) { 636 phba->link_state = LPFC_HBA_ERROR; 637 return -ENOMEM; 638 } 639 640 lpfc_dump_wakeup_param(phba, pmb); 641 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 642 pmb->vport = phba->pport; 643 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 644 645 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 647 "0435 Adapter failed " 648 "to get Option ROM version status x%x\n", rc); 649 mempool_free(pmb, phba->mbox_mem_pool); 650 } 651 652 return 0; 653 } 654 655 /** 656 * lpfc_sli4_refresh_params - update driver copy of params. 657 * @phba: Pointer to HBA context object. 658 * 659 * This is called to refresh driver copy of dynamic fields from the 660 * common_get_sli4_parameters descriptor. 661 **/ 662 int 663 lpfc_sli4_refresh_params(struct lpfc_hba *phba) 664 { 665 LPFC_MBOXQ_t *mboxq; 666 struct lpfc_mqe *mqe; 667 struct lpfc_sli4_parameters *mbx_sli4_parameters; 668 int length, rc; 669 670 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 671 if (!mboxq) 672 return -ENOMEM; 673 674 mqe = &mboxq->u.mqe; 675 /* Read the port's SLI4 Config Parameters */ 676 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 677 sizeof(struct lpfc_sli4_cfg_mhdr)); 678 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 679 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 680 length, LPFC_SLI4_MBX_EMBED); 681 682 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 683 if (unlikely(rc)) { 684 mempool_free(mboxq, phba->mbox_mem_pool); 685 return rc; 686 } 687 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 688 phba->sli4_hba.pc_sli4_params.mi_ver = 689 bf_get(cfg_mi_ver, mbx_sli4_parameters); 690 phba->sli4_hba.pc_sli4_params.cmf = 691 bf_get(cfg_cmf, mbx_sli4_parameters); 692 phba->sli4_hba.pc_sli4_params.pls = 693 bf_get(cfg_pvl, mbx_sli4_parameters); 694 695 mempool_free(mboxq, phba->mbox_mem_pool); 696 return rc; 697 } 698 699 /** 700 * lpfc_hba_init_link - Initialize the FC link 701 * @phba: pointer to lpfc hba data structure. 702 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 703 * 704 * This routine will issue the INIT_LINK mailbox command call. 705 * It is available to other drivers through the lpfc_hba data 706 * structure for use as a delayed link up mechanism with the 707 * module parameter lpfc_suppress_link_up. 708 * 709 * Return code 710 * 0 - success 711 * Any other value - error 712 **/ 713 static int 714 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 715 { 716 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 717 } 718 719 /** 720 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 721 * @phba: pointer to lpfc hba data structure. 722 * @fc_topology: desired fc topology. 723 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 724 * 725 * This routine will issue the INIT_LINK mailbox command call. 726 * It is available to other drivers through the lpfc_hba data 727 * structure for use as a delayed link up mechanism with the 728 * module parameter lpfc_suppress_link_up. 729 * 730 * Return code 731 * 0 - success 732 * Any other value - error 733 **/ 734 int 735 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 736 uint32_t flag) 737 { 738 struct lpfc_vport *vport = phba->pport; 739 LPFC_MBOXQ_t *pmb; 740 MAILBOX_t *mb; 741 int rc; 742 743 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 744 if (!pmb) { 745 phba->link_state = LPFC_HBA_ERROR; 746 return -ENOMEM; 747 } 748 mb = &pmb->u.mb; 749 pmb->vport = vport; 750 751 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 752 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 753 !(phba->lmt & LMT_1Gb)) || 754 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 755 !(phba->lmt & LMT_2Gb)) || 756 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 757 !(phba->lmt & LMT_4Gb)) || 758 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 759 !(phba->lmt & LMT_8Gb)) || 760 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 761 !(phba->lmt & LMT_10Gb)) || 762 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 763 !(phba->lmt & LMT_16Gb)) || 764 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 765 !(phba->lmt & LMT_32Gb)) || 766 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 767 !(phba->lmt & LMT_64Gb))) { 768 /* Reset link speed to auto */ 769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 770 "1302 Invalid speed for this board:%d " 771 "Reset link speed to auto.\n", 772 phba->cfg_link_speed); 773 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 774 } 775 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 776 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 777 if (phba->sli_rev < LPFC_SLI_REV4) 778 lpfc_set_loopback_flag(phba); 779 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 780 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 782 "0498 Adapter failed to init, mbxCmd x%x " 783 "INIT_LINK, mbxStatus x%x\n", 784 mb->mbxCommand, mb->mbxStatus); 785 if (phba->sli_rev <= LPFC_SLI_REV3) { 786 /* Clear all interrupt enable conditions */ 787 writel(0, phba->HCregaddr); 788 readl(phba->HCregaddr); /* flush */ 789 /* Clear all pending interrupts */ 790 writel(0xffffffff, phba->HAregaddr); 791 readl(phba->HAregaddr); /* flush */ 792 } 793 phba->link_state = LPFC_HBA_ERROR; 794 if (rc != MBX_BUSY || flag == MBX_POLL) 795 mempool_free(pmb, phba->mbox_mem_pool); 796 return -EIO; 797 } 798 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 799 if (flag == MBX_POLL) 800 mempool_free(pmb, phba->mbox_mem_pool); 801 802 return 0; 803 } 804 805 /** 806 * lpfc_hba_down_link - this routine downs the FC link 807 * @phba: pointer to lpfc hba data structure. 808 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 809 * 810 * This routine will issue the DOWN_LINK mailbox command call. 811 * It is available to other drivers through the lpfc_hba data 812 * structure for use to stop the link. 813 * 814 * Return code 815 * 0 - success 816 * Any other value - error 817 **/ 818 static int 819 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 820 { 821 LPFC_MBOXQ_t *pmb; 822 int rc; 823 824 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 825 if (!pmb) { 826 phba->link_state = LPFC_HBA_ERROR; 827 return -ENOMEM; 828 } 829 830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 831 "0491 Adapter Link is disabled.\n"); 832 lpfc_down_link(phba, pmb); 833 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 834 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 835 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 837 "2522 Adapter failed to issue DOWN_LINK" 838 " mbox command rc 0x%x\n", rc); 839 840 mempool_free(pmb, phba->mbox_mem_pool); 841 return -EIO; 842 } 843 if (flag == MBX_POLL) 844 mempool_free(pmb, phba->mbox_mem_pool); 845 846 return 0; 847 } 848 849 /** 850 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 851 * @phba: pointer to lpfc HBA data structure. 852 * 853 * This routine will do LPFC uninitialization before the HBA is reset when 854 * bringing down the SLI Layer. 855 * 856 * Return codes 857 * 0 - success. 858 * Any other value - error. 859 **/ 860 int 861 lpfc_hba_down_prep(struct lpfc_hba *phba) 862 { 863 struct lpfc_vport **vports; 864 int i; 865 866 if (phba->sli_rev <= LPFC_SLI_REV3) { 867 /* Disable interrupts */ 868 writel(0, phba->HCregaddr); 869 readl(phba->HCregaddr); /* flush */ 870 } 871 872 if (phba->pport->load_flag & FC_UNLOADING) 873 lpfc_cleanup_discovery_resources(phba->pport); 874 else { 875 vports = lpfc_create_vport_work_array(phba); 876 if (vports != NULL) 877 for (i = 0; i <= phba->max_vports && 878 vports[i] != NULL; i++) 879 lpfc_cleanup_discovery_resources(vports[i]); 880 lpfc_destroy_vport_work_array(phba, vports); 881 } 882 return 0; 883 } 884 885 /** 886 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 887 * rspiocb which got deferred 888 * 889 * @phba: pointer to lpfc HBA data structure. 890 * 891 * This routine will cleanup completed slow path events after HBA is reset 892 * when bringing down the SLI Layer. 893 * 894 * 895 * Return codes 896 * void. 897 **/ 898 static void 899 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 900 { 901 struct lpfc_iocbq *rspiocbq; 902 struct hbq_dmabuf *dmabuf; 903 struct lpfc_cq_event *cq_event; 904 905 spin_lock_irq(&phba->hbalock); 906 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 907 spin_unlock_irq(&phba->hbalock); 908 909 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 910 /* Get the response iocb from the head of work queue */ 911 spin_lock_irq(&phba->hbalock); 912 list_remove_head(&phba->sli4_hba.sp_queue_event, 913 cq_event, struct lpfc_cq_event, list); 914 spin_unlock_irq(&phba->hbalock); 915 916 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 917 case CQE_CODE_COMPL_WQE: 918 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 919 cq_event); 920 lpfc_sli_release_iocbq(phba, rspiocbq); 921 break; 922 case CQE_CODE_RECEIVE: 923 case CQE_CODE_RECEIVE_V1: 924 dmabuf = container_of(cq_event, struct hbq_dmabuf, 925 cq_event); 926 lpfc_in_buf_free(phba, &dmabuf->dbuf); 927 } 928 } 929 } 930 931 /** 932 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 933 * @phba: pointer to lpfc HBA data structure. 934 * 935 * This routine will cleanup posted ELS buffers after the HBA is reset 936 * when bringing down the SLI Layer. 937 * 938 * 939 * Return codes 940 * void. 941 **/ 942 static void 943 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 944 { 945 struct lpfc_sli *psli = &phba->sli; 946 struct lpfc_sli_ring *pring; 947 struct lpfc_dmabuf *mp, *next_mp; 948 LIST_HEAD(buflist); 949 int count; 950 951 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 952 lpfc_sli_hbqbuf_free_all(phba); 953 else { 954 /* Cleanup preposted buffers on the ELS ring */ 955 pring = &psli->sli3_ring[LPFC_ELS_RING]; 956 spin_lock_irq(&phba->hbalock); 957 list_splice_init(&pring->postbufq, &buflist); 958 spin_unlock_irq(&phba->hbalock); 959 960 count = 0; 961 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 962 list_del(&mp->list); 963 count++; 964 lpfc_mbuf_free(phba, mp->virt, mp->phys); 965 kfree(mp); 966 } 967 968 spin_lock_irq(&phba->hbalock); 969 pring->postbufq_cnt -= count; 970 spin_unlock_irq(&phba->hbalock); 971 } 972 } 973 974 /** 975 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 976 * @phba: pointer to lpfc HBA data structure. 977 * 978 * This routine will cleanup the txcmplq after the HBA is reset when bringing 979 * down the SLI Layer. 980 * 981 * Return codes 982 * void 983 **/ 984 static void 985 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 986 { 987 struct lpfc_sli *psli = &phba->sli; 988 struct lpfc_queue *qp = NULL; 989 struct lpfc_sli_ring *pring; 990 LIST_HEAD(completions); 991 int i; 992 struct lpfc_iocbq *piocb, *next_iocb; 993 994 if (phba->sli_rev != LPFC_SLI_REV4) { 995 for (i = 0; i < psli->num_rings; i++) { 996 pring = &psli->sli3_ring[i]; 997 spin_lock_irq(&phba->hbalock); 998 /* At this point in time the HBA is either reset or DOA 999 * Nothing should be on txcmplq as it will 1000 * NEVER complete. 1001 */ 1002 list_splice_init(&pring->txcmplq, &completions); 1003 pring->txcmplq_cnt = 0; 1004 spin_unlock_irq(&phba->hbalock); 1005 1006 lpfc_sli_abort_iocb_ring(phba, pring); 1007 } 1008 /* Cancel all the IOCBs from the completions list */ 1009 lpfc_sli_cancel_iocbs(phba, &completions, 1010 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1011 return; 1012 } 1013 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1014 pring = qp->pring; 1015 if (!pring) 1016 continue; 1017 spin_lock_irq(&pring->ring_lock); 1018 list_for_each_entry_safe(piocb, next_iocb, 1019 &pring->txcmplq, list) 1020 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 1021 list_splice_init(&pring->txcmplq, &completions); 1022 pring->txcmplq_cnt = 0; 1023 spin_unlock_irq(&pring->ring_lock); 1024 lpfc_sli_abort_iocb_ring(phba, pring); 1025 } 1026 /* Cancel all the IOCBs from the completions list */ 1027 lpfc_sli_cancel_iocbs(phba, &completions, 1028 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1029 } 1030 1031 /** 1032 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1033 * @phba: pointer to lpfc HBA data structure. 1034 * 1035 * This routine will do uninitialization after the HBA is reset when bring 1036 * down the SLI Layer. 1037 * 1038 * Return codes 1039 * 0 - success. 1040 * Any other value - error. 1041 **/ 1042 static int 1043 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1044 { 1045 lpfc_hba_free_post_buf(phba); 1046 lpfc_hba_clean_txcmplq(phba); 1047 return 0; 1048 } 1049 1050 /** 1051 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1052 * @phba: pointer to lpfc HBA data structure. 1053 * 1054 * This routine will do uninitialization after the HBA is reset when bring 1055 * down the SLI Layer. 1056 * 1057 * Return codes 1058 * 0 - success. 1059 * Any other value - error. 1060 **/ 1061 static int 1062 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1063 { 1064 struct lpfc_io_buf *psb, *psb_next; 1065 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1066 struct lpfc_sli4_hdw_queue *qp; 1067 LIST_HEAD(aborts); 1068 LIST_HEAD(nvme_aborts); 1069 LIST_HEAD(nvmet_aborts); 1070 struct lpfc_sglq *sglq_entry = NULL; 1071 int cnt, idx; 1072 1073 1074 lpfc_sli_hbqbuf_free_all(phba); 1075 lpfc_hba_clean_txcmplq(phba); 1076 1077 /* At this point in time the HBA is either reset or DOA. Either 1078 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1079 * on the lpfc_els_sgl_list so that it can either be freed if the 1080 * driver is unloading or reposted if the driver is restarting 1081 * the port. 1082 */ 1083 1084 /* sgl_list_lock required because worker thread uses this 1085 * list. 1086 */ 1087 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1088 list_for_each_entry(sglq_entry, 1089 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1090 sglq_entry->state = SGL_FREED; 1091 1092 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1093 &phba->sli4_hba.lpfc_els_sgl_list); 1094 1095 1096 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1097 1098 /* abts_xxxx_buf_list_lock required because worker thread uses this 1099 * list. 1100 */ 1101 spin_lock_irq(&phba->hbalock); 1102 cnt = 0; 1103 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1104 qp = &phba->sli4_hba.hdwq[idx]; 1105 1106 spin_lock(&qp->abts_io_buf_list_lock); 1107 list_splice_init(&qp->lpfc_abts_io_buf_list, 1108 &aborts); 1109 1110 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1111 psb->pCmd = NULL; 1112 psb->status = IOSTAT_SUCCESS; 1113 cnt++; 1114 } 1115 spin_lock(&qp->io_buf_list_put_lock); 1116 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1117 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1118 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1119 qp->abts_scsi_io_bufs = 0; 1120 qp->abts_nvme_io_bufs = 0; 1121 spin_unlock(&qp->io_buf_list_put_lock); 1122 spin_unlock(&qp->abts_io_buf_list_lock); 1123 } 1124 spin_unlock_irq(&phba->hbalock); 1125 1126 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1127 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1128 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1129 &nvmet_aborts); 1130 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1131 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1132 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1133 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1134 } 1135 } 1136 1137 lpfc_sli4_free_sp_events(phba); 1138 return cnt; 1139 } 1140 1141 /** 1142 * lpfc_hba_down_post - Wrapper func for hba down post routine 1143 * @phba: pointer to lpfc HBA data structure. 1144 * 1145 * This routine wraps the actual SLI3 or SLI4 routine for performing 1146 * uninitialization after the HBA is reset when bring down the SLI Layer. 1147 * 1148 * Return codes 1149 * 0 - success. 1150 * Any other value - error. 1151 **/ 1152 int 1153 lpfc_hba_down_post(struct lpfc_hba *phba) 1154 { 1155 return (*phba->lpfc_hba_down_post)(phba); 1156 } 1157 1158 /** 1159 * lpfc_hb_timeout - The HBA-timer timeout handler 1160 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1161 * 1162 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1163 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1164 * work-port-events bitmap and the worker thread is notified. This timeout 1165 * event will be used by the worker thread to invoke the actual timeout 1166 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1167 * be performed in the timeout handler and the HBA timeout event bit shall 1168 * be cleared by the worker thread after it has taken the event bitmap out. 1169 **/ 1170 static void 1171 lpfc_hb_timeout(struct timer_list *t) 1172 { 1173 struct lpfc_hba *phba; 1174 uint32_t tmo_posted; 1175 unsigned long iflag; 1176 1177 phba = from_timer(phba, t, hb_tmofunc); 1178 1179 /* Check for heart beat timeout conditions */ 1180 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1181 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1182 if (!tmo_posted) 1183 phba->pport->work_port_events |= WORKER_HB_TMO; 1184 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1185 1186 /* Tell the worker thread there is work to do */ 1187 if (!tmo_posted) 1188 lpfc_worker_wake_up(phba); 1189 return; 1190 } 1191 1192 /** 1193 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1194 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1195 * 1196 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1197 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1198 * work-port-events bitmap and the worker thread is notified. This timeout 1199 * event will be used by the worker thread to invoke the actual timeout 1200 * handler routine, lpfc_rrq_handler. Any periodical operations will 1201 * be performed in the timeout handler and the RRQ timeout event bit shall 1202 * be cleared by the worker thread after it has taken the event bitmap out. 1203 **/ 1204 static void 1205 lpfc_rrq_timeout(struct timer_list *t) 1206 { 1207 struct lpfc_hba *phba; 1208 unsigned long iflag; 1209 1210 phba = from_timer(phba, t, rrq_tmr); 1211 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1212 if (!(phba->pport->load_flag & FC_UNLOADING)) 1213 phba->hba_flag |= HBA_RRQ_ACTIVE; 1214 else 1215 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1216 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1217 1218 if (!(phba->pport->load_flag & FC_UNLOADING)) 1219 lpfc_worker_wake_up(phba); 1220 } 1221 1222 /** 1223 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1224 * @phba: pointer to lpfc hba data structure. 1225 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1226 * 1227 * This is the callback function to the lpfc heart-beat mailbox command. 1228 * If configured, the lpfc driver issues the heart-beat mailbox command to 1229 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1230 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1231 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1232 * heart-beat outstanding state. Once the mailbox command comes back and 1233 * no error conditions detected, the heart-beat mailbox command timer is 1234 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1235 * state is cleared for the next heart-beat. If the timer expired with the 1236 * heart-beat outstanding state set, the driver will put the HBA offline. 1237 **/ 1238 static void 1239 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1240 { 1241 unsigned long drvr_flag; 1242 1243 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1244 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1245 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1246 1247 /* Check and reset heart-beat timer if necessary */ 1248 mempool_free(pmboxq, phba->mbox_mem_pool); 1249 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1250 !(phba->link_state == LPFC_HBA_ERROR) && 1251 !(phba->pport->load_flag & FC_UNLOADING)) 1252 mod_timer(&phba->hb_tmofunc, 1253 jiffies + 1254 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1255 return; 1256 } 1257 1258 /* 1259 * lpfc_idle_stat_delay_work - idle_stat tracking 1260 * 1261 * This routine tracks per-cq idle_stat and determines polling decisions. 1262 * 1263 * Return codes: 1264 * None 1265 **/ 1266 static void 1267 lpfc_idle_stat_delay_work(struct work_struct *work) 1268 { 1269 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1270 struct lpfc_hba, 1271 idle_stat_delay_work); 1272 struct lpfc_queue *cq; 1273 struct lpfc_sli4_hdw_queue *hdwq; 1274 struct lpfc_idle_stat *idle_stat; 1275 u32 i, idle_percent; 1276 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1277 1278 if (phba->pport->load_flag & FC_UNLOADING) 1279 return; 1280 1281 if (phba->link_state == LPFC_HBA_ERROR || 1282 phba->pport->fc_flag & FC_OFFLINE_MODE || 1283 phba->cmf_active_mode != LPFC_CFG_OFF) 1284 goto requeue; 1285 1286 for_each_present_cpu(i) { 1287 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1288 cq = hdwq->io_cq; 1289 1290 /* Skip if we've already handled this cq's primary CPU */ 1291 if (cq->chann != i) 1292 continue; 1293 1294 idle_stat = &phba->sli4_hba.idle_stat[i]; 1295 1296 /* get_cpu_idle_time returns values as running counters. Thus, 1297 * to know the amount for this period, the prior counter values 1298 * need to be subtracted from the current counter values. 1299 * From there, the idle time stat can be calculated as a 1300 * percentage of 100 - the sum of the other consumption times. 1301 */ 1302 wall_idle = get_cpu_idle_time(i, &wall, 1); 1303 diff_idle = wall_idle - idle_stat->prev_idle; 1304 diff_wall = wall - idle_stat->prev_wall; 1305 1306 if (diff_wall <= diff_idle) 1307 busy_time = 0; 1308 else 1309 busy_time = diff_wall - diff_idle; 1310 1311 idle_percent = div64_u64(100 * busy_time, diff_wall); 1312 idle_percent = 100 - idle_percent; 1313 1314 if (idle_percent < 15) 1315 cq->poll_mode = LPFC_QUEUE_WORK; 1316 else 1317 cq->poll_mode = LPFC_IRQ_POLL; 1318 1319 idle_stat->prev_idle = wall_idle; 1320 idle_stat->prev_wall = wall; 1321 } 1322 1323 requeue: 1324 schedule_delayed_work(&phba->idle_stat_delay_work, 1325 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1326 } 1327 1328 static void 1329 lpfc_hb_eq_delay_work(struct work_struct *work) 1330 { 1331 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1332 struct lpfc_hba, eq_delay_work); 1333 struct lpfc_eq_intr_info *eqi, *eqi_new; 1334 struct lpfc_queue *eq, *eq_next; 1335 unsigned char *ena_delay = NULL; 1336 uint32_t usdelay; 1337 int i; 1338 1339 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1340 return; 1341 1342 if (phba->link_state == LPFC_HBA_ERROR || 1343 phba->pport->fc_flag & FC_OFFLINE_MODE) 1344 goto requeue; 1345 1346 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1347 GFP_KERNEL); 1348 if (!ena_delay) 1349 goto requeue; 1350 1351 for (i = 0; i < phba->cfg_irq_chann; i++) { 1352 /* Get the EQ corresponding to the IRQ vector */ 1353 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1354 if (!eq) 1355 continue; 1356 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1357 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1358 ena_delay[eq->last_cpu] = 1; 1359 } 1360 } 1361 1362 for_each_present_cpu(i) { 1363 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1364 if (ena_delay[i]) { 1365 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1366 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1367 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1368 } else { 1369 usdelay = 0; 1370 } 1371 1372 eqi->icnt = 0; 1373 1374 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1375 if (unlikely(eq->last_cpu != i)) { 1376 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1377 eq->last_cpu); 1378 list_move_tail(&eq->cpu_list, &eqi_new->list); 1379 continue; 1380 } 1381 if (usdelay != eq->q_mode) 1382 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1383 usdelay); 1384 } 1385 } 1386 1387 kfree(ena_delay); 1388 1389 requeue: 1390 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1391 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1392 } 1393 1394 /** 1395 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1396 * @phba: pointer to lpfc hba data structure. 1397 * 1398 * For each heartbeat, this routine does some heuristic methods to adjust 1399 * XRI distribution. The goal is to fully utilize free XRIs. 1400 **/ 1401 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1402 { 1403 u32 i; 1404 u32 hwq_count; 1405 1406 hwq_count = phba->cfg_hdw_queue; 1407 for (i = 0; i < hwq_count; i++) { 1408 /* Adjust XRIs in private pool */ 1409 lpfc_adjust_pvt_pool_count(phba, i); 1410 1411 /* Adjust high watermark */ 1412 lpfc_adjust_high_watermark(phba, i); 1413 1414 #ifdef LPFC_MXP_STAT 1415 /* Snapshot pbl, pvt and busy count */ 1416 lpfc_snapshot_mxp(phba, i); 1417 #endif 1418 } 1419 } 1420 1421 /** 1422 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1423 * @phba: pointer to lpfc hba data structure. 1424 * 1425 * If a HB mbox is not already in progrees, this routine will allocate 1426 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1427 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1428 **/ 1429 int 1430 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1431 { 1432 LPFC_MBOXQ_t *pmboxq; 1433 int retval; 1434 1435 /* Is a Heartbeat mbox already in progress */ 1436 if (phba->hba_flag & HBA_HBEAT_INP) 1437 return 0; 1438 1439 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1440 if (!pmboxq) 1441 return -ENOMEM; 1442 1443 lpfc_heart_beat(phba, pmboxq); 1444 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1445 pmboxq->vport = phba->pport; 1446 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1447 1448 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1449 mempool_free(pmboxq, phba->mbox_mem_pool); 1450 return -ENXIO; 1451 } 1452 phba->hba_flag |= HBA_HBEAT_INP; 1453 1454 return 0; 1455 } 1456 1457 /** 1458 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1459 * @phba: pointer to lpfc hba data structure. 1460 * 1461 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1462 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1463 * of the value of lpfc_enable_hba_heartbeat. 1464 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1465 * try to issue a MBX_HEARTBEAT mbox command. 1466 **/ 1467 void 1468 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1469 { 1470 if (phba->cfg_enable_hba_heartbeat) 1471 return; 1472 phba->hba_flag |= HBA_HBEAT_TMO; 1473 } 1474 1475 /** 1476 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1477 * @phba: pointer to lpfc hba data structure. 1478 * 1479 * This is the actual HBA-timer timeout handler to be invoked by the worker 1480 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1481 * handler performs any periodic operations needed for the device. If such 1482 * periodic event has already been attended to either in the interrupt handler 1483 * or by processing slow-ring or fast-ring events within the HBA-timer 1484 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1485 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1486 * is configured and there is no heart-beat mailbox command outstanding, a 1487 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1488 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1489 * to offline. 1490 **/ 1491 void 1492 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1493 { 1494 struct lpfc_vport **vports; 1495 struct lpfc_dmabuf *buf_ptr; 1496 int retval = 0; 1497 int i, tmo; 1498 struct lpfc_sli *psli = &phba->sli; 1499 LIST_HEAD(completions); 1500 1501 if (phba->cfg_xri_rebalancing) { 1502 /* Multi-XRI pools handler */ 1503 lpfc_hb_mxp_handler(phba); 1504 } 1505 1506 vports = lpfc_create_vport_work_array(phba); 1507 if (vports != NULL) 1508 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1509 lpfc_rcv_seq_check_edtov(vports[i]); 1510 lpfc_fdmi_change_check(vports[i]); 1511 } 1512 lpfc_destroy_vport_work_array(phba, vports); 1513 1514 if ((phba->link_state == LPFC_HBA_ERROR) || 1515 (phba->pport->load_flag & FC_UNLOADING) || 1516 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1517 return; 1518 1519 if (phba->elsbuf_cnt && 1520 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1521 spin_lock_irq(&phba->hbalock); 1522 list_splice_init(&phba->elsbuf, &completions); 1523 phba->elsbuf_cnt = 0; 1524 phba->elsbuf_prev_cnt = 0; 1525 spin_unlock_irq(&phba->hbalock); 1526 1527 while (!list_empty(&completions)) { 1528 list_remove_head(&completions, buf_ptr, 1529 struct lpfc_dmabuf, list); 1530 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1531 kfree(buf_ptr); 1532 } 1533 } 1534 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1535 1536 /* If there is no heart beat outstanding, issue a heartbeat command */ 1537 if (phba->cfg_enable_hba_heartbeat) { 1538 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1539 spin_lock_irq(&phba->pport->work_port_lock); 1540 if (time_after(phba->last_completion_time + 1541 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1542 jiffies)) { 1543 spin_unlock_irq(&phba->pport->work_port_lock); 1544 if (phba->hba_flag & HBA_HBEAT_INP) 1545 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1546 else 1547 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1548 goto out; 1549 } 1550 spin_unlock_irq(&phba->pport->work_port_lock); 1551 1552 /* Check if a MBX_HEARTBEAT is already in progress */ 1553 if (phba->hba_flag & HBA_HBEAT_INP) { 1554 /* 1555 * If heart beat timeout called with HBA_HBEAT_INP set 1556 * we need to give the hb mailbox cmd a chance to 1557 * complete or TMO. 1558 */ 1559 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1560 "0459 Adapter heartbeat still outstanding: " 1561 "last compl time was %d ms.\n", 1562 jiffies_to_msecs(jiffies 1563 - phba->last_completion_time)); 1564 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1565 } else { 1566 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1567 (list_empty(&psli->mboxq))) { 1568 1569 retval = lpfc_issue_hb_mbox(phba); 1570 if (retval) { 1571 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1572 goto out; 1573 } 1574 phba->skipped_hb = 0; 1575 } else if (time_before_eq(phba->last_completion_time, 1576 phba->skipped_hb)) { 1577 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1578 "2857 Last completion time not " 1579 " updated in %d ms\n", 1580 jiffies_to_msecs(jiffies 1581 - phba->last_completion_time)); 1582 } else 1583 phba->skipped_hb = jiffies; 1584 1585 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1586 goto out; 1587 } 1588 } else { 1589 /* Check to see if we want to force a MBX_HEARTBEAT */ 1590 if (phba->hba_flag & HBA_HBEAT_TMO) { 1591 retval = lpfc_issue_hb_mbox(phba); 1592 if (retval) 1593 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1594 else 1595 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1596 goto out; 1597 } 1598 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1599 } 1600 out: 1601 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1602 } 1603 1604 /** 1605 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1606 * @phba: pointer to lpfc hba data structure. 1607 * 1608 * This routine is called to bring the HBA offline when HBA hardware error 1609 * other than Port Error 6 has been detected. 1610 **/ 1611 static void 1612 lpfc_offline_eratt(struct lpfc_hba *phba) 1613 { 1614 struct lpfc_sli *psli = &phba->sli; 1615 1616 spin_lock_irq(&phba->hbalock); 1617 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1618 spin_unlock_irq(&phba->hbalock); 1619 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1620 1621 lpfc_offline(phba); 1622 lpfc_reset_barrier(phba); 1623 spin_lock_irq(&phba->hbalock); 1624 lpfc_sli_brdreset(phba); 1625 spin_unlock_irq(&phba->hbalock); 1626 lpfc_hba_down_post(phba); 1627 lpfc_sli_brdready(phba, HS_MBRDY); 1628 lpfc_unblock_mgmt_io(phba); 1629 phba->link_state = LPFC_HBA_ERROR; 1630 return; 1631 } 1632 1633 /** 1634 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1635 * @phba: pointer to lpfc hba data structure. 1636 * 1637 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1638 * other than Port Error 6 has been detected. 1639 **/ 1640 void 1641 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1642 { 1643 spin_lock_irq(&phba->hbalock); 1644 if (phba->link_state == LPFC_HBA_ERROR && 1645 phba->hba_flag & HBA_PCI_ERR) { 1646 spin_unlock_irq(&phba->hbalock); 1647 return; 1648 } 1649 phba->link_state = LPFC_HBA_ERROR; 1650 spin_unlock_irq(&phba->hbalock); 1651 1652 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1653 lpfc_sli_flush_io_rings(phba); 1654 lpfc_offline(phba); 1655 lpfc_hba_down_post(phba); 1656 lpfc_unblock_mgmt_io(phba); 1657 } 1658 1659 /** 1660 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1661 * @phba: pointer to lpfc hba data structure. 1662 * 1663 * This routine is invoked to handle the deferred HBA hardware error 1664 * conditions. This type of error is indicated by HBA by setting ER1 1665 * and another ER bit in the host status register. The driver will 1666 * wait until the ER1 bit clears before handling the error condition. 1667 **/ 1668 static void 1669 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1670 { 1671 uint32_t old_host_status = phba->work_hs; 1672 struct lpfc_sli *psli = &phba->sli; 1673 1674 /* If the pci channel is offline, ignore possible errors, 1675 * since we cannot communicate with the pci card anyway. 1676 */ 1677 if (pci_channel_offline(phba->pcidev)) { 1678 spin_lock_irq(&phba->hbalock); 1679 phba->hba_flag &= ~DEFER_ERATT; 1680 spin_unlock_irq(&phba->hbalock); 1681 return; 1682 } 1683 1684 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1685 "0479 Deferred Adapter Hardware Error " 1686 "Data: x%x x%x x%x\n", 1687 phba->work_hs, phba->work_status[0], 1688 phba->work_status[1]); 1689 1690 spin_lock_irq(&phba->hbalock); 1691 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1692 spin_unlock_irq(&phba->hbalock); 1693 1694 1695 /* 1696 * Firmware stops when it triggred erratt. That could cause the I/Os 1697 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1698 * SCSI layer retry it after re-establishing link. 1699 */ 1700 lpfc_sli_abort_fcp_rings(phba); 1701 1702 /* 1703 * There was a firmware error. Take the hba offline and then 1704 * attempt to restart it. 1705 */ 1706 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1707 lpfc_offline(phba); 1708 1709 /* Wait for the ER1 bit to clear.*/ 1710 while (phba->work_hs & HS_FFER1) { 1711 msleep(100); 1712 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1713 phba->work_hs = UNPLUG_ERR ; 1714 break; 1715 } 1716 /* If driver is unloading let the worker thread continue */ 1717 if (phba->pport->load_flag & FC_UNLOADING) { 1718 phba->work_hs = 0; 1719 break; 1720 } 1721 } 1722 1723 /* 1724 * This is to ptrotect against a race condition in which 1725 * first write to the host attention register clear the 1726 * host status register. 1727 */ 1728 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1729 phba->work_hs = old_host_status & ~HS_FFER1; 1730 1731 spin_lock_irq(&phba->hbalock); 1732 phba->hba_flag &= ~DEFER_ERATT; 1733 spin_unlock_irq(&phba->hbalock); 1734 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1735 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1736 } 1737 1738 static void 1739 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1740 { 1741 struct lpfc_board_event_header board_event; 1742 struct Scsi_Host *shost; 1743 1744 board_event.event_type = FC_REG_BOARD_EVENT; 1745 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1746 shost = lpfc_shost_from_vport(phba->pport); 1747 fc_host_post_vendor_event(shost, fc_get_event_number(), 1748 sizeof(board_event), 1749 (char *) &board_event, 1750 LPFC_NL_VENDOR_ID); 1751 } 1752 1753 /** 1754 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1755 * @phba: pointer to lpfc hba data structure. 1756 * 1757 * This routine is invoked to handle the following HBA hardware error 1758 * conditions: 1759 * 1 - HBA error attention interrupt 1760 * 2 - DMA ring index out of range 1761 * 3 - Mailbox command came back as unknown 1762 **/ 1763 static void 1764 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1765 { 1766 struct lpfc_vport *vport = phba->pport; 1767 struct lpfc_sli *psli = &phba->sli; 1768 uint32_t event_data; 1769 unsigned long temperature; 1770 struct temp_event temp_event_data; 1771 struct Scsi_Host *shost; 1772 1773 /* If the pci channel is offline, ignore possible errors, 1774 * since we cannot communicate with the pci card anyway. 1775 */ 1776 if (pci_channel_offline(phba->pcidev)) { 1777 spin_lock_irq(&phba->hbalock); 1778 phba->hba_flag &= ~DEFER_ERATT; 1779 spin_unlock_irq(&phba->hbalock); 1780 return; 1781 } 1782 1783 /* If resets are disabled then leave the HBA alone and return */ 1784 if (!phba->cfg_enable_hba_reset) 1785 return; 1786 1787 /* Send an internal error event to mgmt application */ 1788 lpfc_board_errevt_to_mgmt(phba); 1789 1790 if (phba->hba_flag & DEFER_ERATT) 1791 lpfc_handle_deferred_eratt(phba); 1792 1793 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1794 if (phba->work_hs & HS_FFER6) 1795 /* Re-establishing Link */ 1796 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1797 "1301 Re-establishing Link " 1798 "Data: x%x x%x x%x\n", 1799 phba->work_hs, phba->work_status[0], 1800 phba->work_status[1]); 1801 if (phba->work_hs & HS_FFER8) 1802 /* Device Zeroization */ 1803 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1804 "2861 Host Authentication device " 1805 "zeroization Data:x%x x%x x%x\n", 1806 phba->work_hs, phba->work_status[0], 1807 phba->work_status[1]); 1808 1809 spin_lock_irq(&phba->hbalock); 1810 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1811 spin_unlock_irq(&phba->hbalock); 1812 1813 /* 1814 * Firmware stops when it triggled erratt with HS_FFER6. 1815 * That could cause the I/Os dropped by the firmware. 1816 * Error iocb (I/O) on txcmplq and let the SCSI layer 1817 * retry it after re-establishing link. 1818 */ 1819 lpfc_sli_abort_fcp_rings(phba); 1820 1821 /* 1822 * There was a firmware error. Take the hba offline and then 1823 * attempt to restart it. 1824 */ 1825 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1826 lpfc_offline(phba); 1827 lpfc_sli_brdrestart(phba); 1828 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1829 lpfc_unblock_mgmt_io(phba); 1830 return; 1831 } 1832 lpfc_unblock_mgmt_io(phba); 1833 } else if (phba->work_hs & HS_CRIT_TEMP) { 1834 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1835 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1836 temp_event_data.event_code = LPFC_CRIT_TEMP; 1837 temp_event_data.data = (uint32_t)temperature; 1838 1839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1840 "0406 Adapter maximum temperature exceeded " 1841 "(%ld), taking this port offline " 1842 "Data: x%x x%x x%x\n", 1843 temperature, phba->work_hs, 1844 phba->work_status[0], phba->work_status[1]); 1845 1846 shost = lpfc_shost_from_vport(phba->pport); 1847 fc_host_post_vendor_event(shost, fc_get_event_number(), 1848 sizeof(temp_event_data), 1849 (char *) &temp_event_data, 1850 SCSI_NL_VID_TYPE_PCI 1851 | PCI_VENDOR_ID_EMULEX); 1852 1853 spin_lock_irq(&phba->hbalock); 1854 phba->over_temp_state = HBA_OVER_TEMP; 1855 spin_unlock_irq(&phba->hbalock); 1856 lpfc_offline_eratt(phba); 1857 1858 } else { 1859 /* The if clause above forces this code path when the status 1860 * failure is a value other than FFER6. Do not call the offline 1861 * twice. This is the adapter hardware error path. 1862 */ 1863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1864 "0457 Adapter Hardware Error " 1865 "Data: x%x x%x x%x\n", 1866 phba->work_hs, 1867 phba->work_status[0], phba->work_status[1]); 1868 1869 event_data = FC_REG_DUMP_EVENT; 1870 shost = lpfc_shost_from_vport(vport); 1871 fc_host_post_vendor_event(shost, fc_get_event_number(), 1872 sizeof(event_data), (char *) &event_data, 1873 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1874 1875 lpfc_offline_eratt(phba); 1876 } 1877 return; 1878 } 1879 1880 /** 1881 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1882 * @phba: pointer to lpfc hba data structure. 1883 * @mbx_action: flag for mailbox shutdown action. 1884 * @en_rn_msg: send reset/port recovery message. 1885 * This routine is invoked to perform an SLI4 port PCI function reset in 1886 * response to port status register polling attention. It waits for port 1887 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1888 * During this process, interrupt vectors are freed and later requested 1889 * for handling possible port resource change. 1890 **/ 1891 static int 1892 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1893 bool en_rn_msg) 1894 { 1895 int rc; 1896 uint32_t intr_mode; 1897 LPFC_MBOXQ_t *mboxq; 1898 1899 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1900 LPFC_SLI_INTF_IF_TYPE_2) { 1901 /* 1902 * On error status condition, driver need to wait for port 1903 * ready before performing reset. 1904 */ 1905 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1906 if (rc) 1907 return rc; 1908 } 1909 1910 /* need reset: attempt for port recovery */ 1911 if (en_rn_msg) 1912 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1913 "2887 Reset Needed: Attempting Port " 1914 "Recovery...\n"); 1915 1916 /* If we are no wait, the HBA has been reset and is not 1917 * functional, thus we should clear 1918 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1919 */ 1920 if (mbx_action == LPFC_MBX_NO_WAIT) { 1921 spin_lock_irq(&phba->hbalock); 1922 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1923 if (phba->sli.mbox_active) { 1924 mboxq = phba->sli.mbox_active; 1925 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1926 __lpfc_mbox_cmpl_put(phba, mboxq); 1927 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1928 phba->sli.mbox_active = NULL; 1929 } 1930 spin_unlock_irq(&phba->hbalock); 1931 } 1932 1933 lpfc_offline_prep(phba, mbx_action); 1934 lpfc_sli_flush_io_rings(phba); 1935 lpfc_offline(phba); 1936 /* release interrupt for possible resource change */ 1937 lpfc_sli4_disable_intr(phba); 1938 rc = lpfc_sli_brdrestart(phba); 1939 if (rc) { 1940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1941 "6309 Failed to restart board\n"); 1942 return rc; 1943 } 1944 /* request and enable interrupt */ 1945 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1946 if (intr_mode == LPFC_INTR_ERROR) { 1947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1948 "3175 Failed to enable interrupt\n"); 1949 return -EIO; 1950 } 1951 phba->intr_mode = intr_mode; 1952 rc = lpfc_online(phba); 1953 if (rc == 0) 1954 lpfc_unblock_mgmt_io(phba); 1955 1956 return rc; 1957 } 1958 1959 /** 1960 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1961 * @phba: pointer to lpfc hba data structure. 1962 * 1963 * This routine is invoked to handle the SLI4 HBA hardware error attention 1964 * conditions. 1965 **/ 1966 static void 1967 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1968 { 1969 struct lpfc_vport *vport = phba->pport; 1970 uint32_t event_data; 1971 struct Scsi_Host *shost; 1972 uint32_t if_type; 1973 struct lpfc_register portstat_reg = {0}; 1974 uint32_t reg_err1, reg_err2; 1975 uint32_t uerrlo_reg, uemasklo_reg; 1976 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1977 bool en_rn_msg = true; 1978 struct temp_event temp_event_data; 1979 struct lpfc_register portsmphr_reg; 1980 int rc, i; 1981 1982 /* If the pci channel is offline, ignore possible errors, since 1983 * we cannot communicate with the pci card anyway. 1984 */ 1985 if (pci_channel_offline(phba->pcidev)) { 1986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1987 "3166 pci channel is offline\n"); 1988 return; 1989 } 1990 1991 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1992 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1993 switch (if_type) { 1994 case LPFC_SLI_INTF_IF_TYPE_0: 1995 pci_rd_rc1 = lpfc_readl( 1996 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1997 &uerrlo_reg); 1998 pci_rd_rc2 = lpfc_readl( 1999 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 2000 &uemasklo_reg); 2001 /* consider PCI bus read error as pci_channel_offline */ 2002 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2003 return; 2004 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2005 lpfc_sli4_offline_eratt(phba); 2006 return; 2007 } 2008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2009 "7623 Checking UE recoverable"); 2010 2011 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 2012 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2013 &portsmphr_reg.word0)) 2014 continue; 2015 2016 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 2017 &portsmphr_reg); 2018 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2019 LPFC_PORT_SEM_UE_RECOVERABLE) 2020 break; 2021 /*Sleep for 1Sec, before checking SEMAPHORE */ 2022 msleep(1000); 2023 } 2024 2025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2026 "4827 smphr_port_status x%x : Waited %dSec", 2027 smphr_port_status, i); 2028 2029 /* Recoverable UE, reset the HBA device */ 2030 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2031 LPFC_PORT_SEM_UE_RECOVERABLE) { 2032 for (i = 0; i < 20; i++) { 2033 msleep(1000); 2034 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2035 &portsmphr_reg.word0) && 2036 (LPFC_POST_STAGE_PORT_READY == 2037 bf_get(lpfc_port_smphr_port_status, 2038 &portsmphr_reg))) { 2039 rc = lpfc_sli4_port_sta_fn_reset(phba, 2040 LPFC_MBX_NO_WAIT, en_rn_msg); 2041 if (rc == 0) 2042 return; 2043 lpfc_printf_log(phba, KERN_ERR, 2044 LOG_TRACE_EVENT, 2045 "4215 Failed to recover UE"); 2046 break; 2047 } 2048 } 2049 } 2050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2051 "7624 Firmware not ready: Failing UE recovery," 2052 " waited %dSec", i); 2053 phba->link_state = LPFC_HBA_ERROR; 2054 break; 2055 2056 case LPFC_SLI_INTF_IF_TYPE_2: 2057 case LPFC_SLI_INTF_IF_TYPE_6: 2058 pci_rd_rc1 = lpfc_readl( 2059 phba->sli4_hba.u.if_type2.STATUSregaddr, 2060 &portstat_reg.word0); 2061 /* consider PCI bus read error as pci_channel_offline */ 2062 if (pci_rd_rc1 == -EIO) { 2063 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2064 "3151 PCI bus read access failure: x%x\n", 2065 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2066 lpfc_sli4_offline_eratt(phba); 2067 return; 2068 } 2069 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2070 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2071 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2073 "2889 Port Overtemperature event, " 2074 "taking port offline Data: x%x x%x\n", 2075 reg_err1, reg_err2); 2076 2077 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2078 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2079 temp_event_data.event_code = LPFC_CRIT_TEMP; 2080 temp_event_data.data = 0xFFFFFFFF; 2081 2082 shost = lpfc_shost_from_vport(phba->pport); 2083 fc_host_post_vendor_event(shost, fc_get_event_number(), 2084 sizeof(temp_event_data), 2085 (char *)&temp_event_data, 2086 SCSI_NL_VID_TYPE_PCI 2087 | PCI_VENDOR_ID_EMULEX); 2088 2089 spin_lock_irq(&phba->hbalock); 2090 phba->over_temp_state = HBA_OVER_TEMP; 2091 spin_unlock_irq(&phba->hbalock); 2092 lpfc_sli4_offline_eratt(phba); 2093 return; 2094 } 2095 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2096 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2097 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2098 "3143 Port Down: Firmware Update " 2099 "Detected\n"); 2100 en_rn_msg = false; 2101 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2102 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2104 "3144 Port Down: Debug Dump\n"); 2105 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2106 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2108 "3145 Port Down: Provisioning\n"); 2109 2110 /* If resets are disabled then leave the HBA alone and return */ 2111 if (!phba->cfg_enable_hba_reset) 2112 return; 2113 2114 /* Check port status register for function reset */ 2115 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2116 en_rn_msg); 2117 if (rc == 0) { 2118 /* don't report event on forced debug dump */ 2119 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2120 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2121 return; 2122 else 2123 break; 2124 } 2125 /* fall through for not able to recover */ 2126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2127 "3152 Unrecoverable error\n"); 2128 phba->link_state = LPFC_HBA_ERROR; 2129 break; 2130 case LPFC_SLI_INTF_IF_TYPE_1: 2131 default: 2132 break; 2133 } 2134 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2135 "3123 Report dump event to upper layer\n"); 2136 /* Send an internal error event to mgmt application */ 2137 lpfc_board_errevt_to_mgmt(phba); 2138 2139 event_data = FC_REG_DUMP_EVENT; 2140 shost = lpfc_shost_from_vport(vport); 2141 fc_host_post_vendor_event(shost, fc_get_event_number(), 2142 sizeof(event_data), (char *) &event_data, 2143 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2144 } 2145 2146 /** 2147 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2148 * @phba: pointer to lpfc HBA data structure. 2149 * 2150 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2151 * routine from the API jump table function pointer from the lpfc_hba struct. 2152 * 2153 * Return codes 2154 * 0 - success. 2155 * Any other value - error. 2156 **/ 2157 void 2158 lpfc_handle_eratt(struct lpfc_hba *phba) 2159 { 2160 (*phba->lpfc_handle_eratt)(phba); 2161 } 2162 2163 /** 2164 * lpfc_handle_latt - The HBA link event handler 2165 * @phba: pointer to lpfc hba data structure. 2166 * 2167 * This routine is invoked from the worker thread to handle a HBA host 2168 * attention link event. SLI3 only. 2169 **/ 2170 void 2171 lpfc_handle_latt(struct lpfc_hba *phba) 2172 { 2173 struct lpfc_vport *vport = phba->pport; 2174 struct lpfc_sli *psli = &phba->sli; 2175 LPFC_MBOXQ_t *pmb; 2176 volatile uint32_t control; 2177 struct lpfc_dmabuf *mp; 2178 int rc = 0; 2179 2180 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2181 if (!pmb) { 2182 rc = 1; 2183 goto lpfc_handle_latt_err_exit; 2184 } 2185 2186 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2187 if (!mp) { 2188 rc = 2; 2189 goto lpfc_handle_latt_free_pmb; 2190 } 2191 2192 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2193 if (!mp->virt) { 2194 rc = 3; 2195 goto lpfc_handle_latt_free_mp; 2196 } 2197 2198 /* Cleanup any outstanding ELS commands */ 2199 lpfc_els_flush_all_cmd(phba); 2200 2201 psli->slistat.link_event++; 2202 lpfc_read_topology(phba, pmb, mp); 2203 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2204 pmb->vport = vport; 2205 /* Block ELS IOCBs until we have processed this mbox command */ 2206 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2207 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2208 if (rc == MBX_NOT_FINISHED) { 2209 rc = 4; 2210 goto lpfc_handle_latt_free_mbuf; 2211 } 2212 2213 /* Clear Link Attention in HA REG */ 2214 spin_lock_irq(&phba->hbalock); 2215 writel(HA_LATT, phba->HAregaddr); 2216 readl(phba->HAregaddr); /* flush */ 2217 spin_unlock_irq(&phba->hbalock); 2218 2219 return; 2220 2221 lpfc_handle_latt_free_mbuf: 2222 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2223 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2224 lpfc_handle_latt_free_mp: 2225 kfree(mp); 2226 lpfc_handle_latt_free_pmb: 2227 mempool_free(pmb, phba->mbox_mem_pool); 2228 lpfc_handle_latt_err_exit: 2229 /* Enable Link attention interrupts */ 2230 spin_lock_irq(&phba->hbalock); 2231 psli->sli_flag |= LPFC_PROCESS_LA; 2232 control = readl(phba->HCregaddr); 2233 control |= HC_LAINT_ENA; 2234 writel(control, phba->HCregaddr); 2235 readl(phba->HCregaddr); /* flush */ 2236 2237 /* Clear Link Attention in HA REG */ 2238 writel(HA_LATT, phba->HAregaddr); 2239 readl(phba->HAregaddr); /* flush */ 2240 spin_unlock_irq(&phba->hbalock); 2241 lpfc_linkdown(phba); 2242 phba->link_state = LPFC_HBA_ERROR; 2243 2244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2245 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2246 2247 return; 2248 } 2249 2250 /** 2251 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2252 * @phba: pointer to lpfc hba data structure. 2253 * @vpd: pointer to the vital product data. 2254 * @len: length of the vital product data in bytes. 2255 * 2256 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2257 * an array of characters. In this routine, the ModelName, ProgramType, and 2258 * ModelDesc, etc. fields of the phba data structure will be populated. 2259 * 2260 * Return codes 2261 * 0 - pointer to the VPD passed in is NULL 2262 * 1 - success 2263 **/ 2264 int 2265 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2266 { 2267 uint8_t lenlo, lenhi; 2268 int Length; 2269 int i, j; 2270 int finished = 0; 2271 int index = 0; 2272 2273 if (!vpd) 2274 return 0; 2275 2276 /* Vital Product */ 2277 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2278 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2279 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2280 (uint32_t) vpd[3]); 2281 while (!finished && (index < (len - 4))) { 2282 switch (vpd[index]) { 2283 case 0x82: 2284 case 0x91: 2285 index += 1; 2286 lenlo = vpd[index]; 2287 index += 1; 2288 lenhi = vpd[index]; 2289 index += 1; 2290 i = ((((unsigned short)lenhi) << 8) + lenlo); 2291 index += i; 2292 break; 2293 case 0x90: 2294 index += 1; 2295 lenlo = vpd[index]; 2296 index += 1; 2297 lenhi = vpd[index]; 2298 index += 1; 2299 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2300 if (Length > len - index) 2301 Length = len - index; 2302 while (Length > 0) { 2303 /* Look for Serial Number */ 2304 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2305 index += 2; 2306 i = vpd[index]; 2307 index += 1; 2308 j = 0; 2309 Length -= (3+i); 2310 while(i--) { 2311 phba->SerialNumber[j++] = vpd[index++]; 2312 if (j == 31) 2313 break; 2314 } 2315 phba->SerialNumber[j] = 0; 2316 continue; 2317 } 2318 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2319 phba->vpd_flag |= VPD_MODEL_DESC; 2320 index += 2; 2321 i = vpd[index]; 2322 index += 1; 2323 j = 0; 2324 Length -= (3+i); 2325 while(i--) { 2326 phba->ModelDesc[j++] = vpd[index++]; 2327 if (j == 255) 2328 break; 2329 } 2330 phba->ModelDesc[j] = 0; 2331 continue; 2332 } 2333 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2334 phba->vpd_flag |= VPD_MODEL_NAME; 2335 index += 2; 2336 i = vpd[index]; 2337 index += 1; 2338 j = 0; 2339 Length -= (3+i); 2340 while(i--) { 2341 phba->ModelName[j++] = vpd[index++]; 2342 if (j == 79) 2343 break; 2344 } 2345 phba->ModelName[j] = 0; 2346 continue; 2347 } 2348 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2349 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2350 index += 2; 2351 i = vpd[index]; 2352 index += 1; 2353 j = 0; 2354 Length -= (3+i); 2355 while(i--) { 2356 phba->ProgramType[j++] = vpd[index++]; 2357 if (j == 255) 2358 break; 2359 } 2360 phba->ProgramType[j] = 0; 2361 continue; 2362 } 2363 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2364 phba->vpd_flag |= VPD_PORT; 2365 index += 2; 2366 i = vpd[index]; 2367 index += 1; 2368 j = 0; 2369 Length -= (3+i); 2370 while(i--) { 2371 if ((phba->sli_rev == LPFC_SLI_REV4) && 2372 (phba->sli4_hba.pport_name_sta == 2373 LPFC_SLI4_PPNAME_GET)) { 2374 j++; 2375 index++; 2376 } else 2377 phba->Port[j++] = vpd[index++]; 2378 if (j == 19) 2379 break; 2380 } 2381 if ((phba->sli_rev != LPFC_SLI_REV4) || 2382 (phba->sli4_hba.pport_name_sta == 2383 LPFC_SLI4_PPNAME_NON)) 2384 phba->Port[j] = 0; 2385 continue; 2386 } 2387 else { 2388 index += 2; 2389 i = vpd[index]; 2390 index += 1; 2391 index += i; 2392 Length -= (3 + i); 2393 } 2394 } 2395 finished = 0; 2396 break; 2397 case 0x78: 2398 finished = 1; 2399 break; 2400 default: 2401 index ++; 2402 break; 2403 } 2404 } 2405 2406 return(1); 2407 } 2408 2409 /** 2410 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2411 * @phba: pointer to lpfc hba data structure. 2412 * @mdp: pointer to the data structure to hold the derived model name. 2413 * @descp: pointer to the data structure to hold the derived description. 2414 * 2415 * This routine retrieves HBA's description based on its registered PCI device 2416 * ID. The @descp passed into this function points to an array of 256 chars. It 2417 * shall be returned with the model name, maximum speed, and the host bus type. 2418 * The @mdp passed into this function points to an array of 80 chars. When the 2419 * function returns, the @mdp will be filled with the model name. 2420 **/ 2421 static void 2422 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2423 { 2424 lpfc_vpd_t *vp; 2425 uint16_t dev_id = phba->pcidev->device; 2426 int max_speed; 2427 int GE = 0; 2428 int oneConnect = 0; /* default is not a oneConnect */ 2429 struct { 2430 char *name; 2431 char *bus; 2432 char *function; 2433 } m = {"<Unknown>", "", ""}; 2434 2435 if (mdp && mdp[0] != '\0' 2436 && descp && descp[0] != '\0') 2437 return; 2438 2439 if (phba->lmt & LMT_64Gb) 2440 max_speed = 64; 2441 else if (phba->lmt & LMT_32Gb) 2442 max_speed = 32; 2443 else if (phba->lmt & LMT_16Gb) 2444 max_speed = 16; 2445 else if (phba->lmt & LMT_10Gb) 2446 max_speed = 10; 2447 else if (phba->lmt & LMT_8Gb) 2448 max_speed = 8; 2449 else if (phba->lmt & LMT_4Gb) 2450 max_speed = 4; 2451 else if (phba->lmt & LMT_2Gb) 2452 max_speed = 2; 2453 else if (phba->lmt & LMT_1Gb) 2454 max_speed = 1; 2455 else 2456 max_speed = 0; 2457 2458 vp = &phba->vpd; 2459 2460 switch (dev_id) { 2461 case PCI_DEVICE_ID_FIREFLY: 2462 m = (typeof(m)){"LP6000", "PCI", 2463 "Obsolete, Unsupported Fibre Channel Adapter"}; 2464 break; 2465 case PCI_DEVICE_ID_SUPERFLY: 2466 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2467 m = (typeof(m)){"LP7000", "PCI", ""}; 2468 else 2469 m = (typeof(m)){"LP7000E", "PCI", ""}; 2470 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2471 break; 2472 case PCI_DEVICE_ID_DRAGONFLY: 2473 m = (typeof(m)){"LP8000", "PCI", 2474 "Obsolete, Unsupported Fibre Channel Adapter"}; 2475 break; 2476 case PCI_DEVICE_ID_CENTAUR: 2477 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2478 m = (typeof(m)){"LP9002", "PCI", ""}; 2479 else 2480 m = (typeof(m)){"LP9000", "PCI", ""}; 2481 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2482 break; 2483 case PCI_DEVICE_ID_RFLY: 2484 m = (typeof(m)){"LP952", "PCI", 2485 "Obsolete, Unsupported Fibre Channel Adapter"}; 2486 break; 2487 case PCI_DEVICE_ID_PEGASUS: 2488 m = (typeof(m)){"LP9802", "PCI-X", 2489 "Obsolete, Unsupported Fibre Channel Adapter"}; 2490 break; 2491 case PCI_DEVICE_ID_THOR: 2492 m = (typeof(m)){"LP10000", "PCI-X", 2493 "Obsolete, Unsupported Fibre Channel Adapter"}; 2494 break; 2495 case PCI_DEVICE_ID_VIPER: 2496 m = (typeof(m)){"LPX1000", "PCI-X", 2497 "Obsolete, Unsupported Fibre Channel Adapter"}; 2498 break; 2499 case PCI_DEVICE_ID_PFLY: 2500 m = (typeof(m)){"LP982", "PCI-X", 2501 "Obsolete, Unsupported Fibre Channel Adapter"}; 2502 break; 2503 case PCI_DEVICE_ID_TFLY: 2504 m = (typeof(m)){"LP1050", "PCI-X", 2505 "Obsolete, Unsupported Fibre Channel Adapter"}; 2506 break; 2507 case PCI_DEVICE_ID_HELIOS: 2508 m = (typeof(m)){"LP11000", "PCI-X2", 2509 "Obsolete, Unsupported Fibre Channel Adapter"}; 2510 break; 2511 case PCI_DEVICE_ID_HELIOS_SCSP: 2512 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2513 "Obsolete, Unsupported Fibre Channel Adapter"}; 2514 break; 2515 case PCI_DEVICE_ID_HELIOS_DCSP: 2516 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2517 "Obsolete, Unsupported Fibre Channel Adapter"}; 2518 break; 2519 case PCI_DEVICE_ID_NEPTUNE: 2520 m = (typeof(m)){"LPe1000", "PCIe", 2521 "Obsolete, Unsupported Fibre Channel Adapter"}; 2522 break; 2523 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2524 m = (typeof(m)){"LPe1000-SP", "PCIe", 2525 "Obsolete, Unsupported Fibre Channel Adapter"}; 2526 break; 2527 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2528 m = (typeof(m)){"LPe1002-SP", "PCIe", 2529 "Obsolete, Unsupported Fibre Channel Adapter"}; 2530 break; 2531 case PCI_DEVICE_ID_BMID: 2532 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2533 break; 2534 case PCI_DEVICE_ID_BSMB: 2535 m = (typeof(m)){"LP111", "PCI-X2", 2536 "Obsolete, Unsupported Fibre Channel Adapter"}; 2537 break; 2538 case PCI_DEVICE_ID_ZEPHYR: 2539 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2540 break; 2541 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2542 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2543 break; 2544 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2545 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2546 GE = 1; 2547 break; 2548 case PCI_DEVICE_ID_ZMID: 2549 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2550 break; 2551 case PCI_DEVICE_ID_ZSMB: 2552 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2553 break; 2554 case PCI_DEVICE_ID_LP101: 2555 m = (typeof(m)){"LP101", "PCI-X", 2556 "Obsolete, Unsupported Fibre Channel Adapter"}; 2557 break; 2558 case PCI_DEVICE_ID_LP10000S: 2559 m = (typeof(m)){"LP10000-S", "PCI", 2560 "Obsolete, Unsupported Fibre Channel Adapter"}; 2561 break; 2562 case PCI_DEVICE_ID_LP11000S: 2563 m = (typeof(m)){"LP11000-S", "PCI-X2", 2564 "Obsolete, Unsupported Fibre Channel Adapter"}; 2565 break; 2566 case PCI_DEVICE_ID_LPE11000S: 2567 m = (typeof(m)){"LPe11000-S", "PCIe", 2568 "Obsolete, Unsupported Fibre Channel Adapter"}; 2569 break; 2570 case PCI_DEVICE_ID_SAT: 2571 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2572 break; 2573 case PCI_DEVICE_ID_SAT_MID: 2574 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2575 break; 2576 case PCI_DEVICE_ID_SAT_SMB: 2577 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2578 break; 2579 case PCI_DEVICE_ID_SAT_DCSP: 2580 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2581 break; 2582 case PCI_DEVICE_ID_SAT_SCSP: 2583 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2584 break; 2585 case PCI_DEVICE_ID_SAT_S: 2586 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2587 break; 2588 case PCI_DEVICE_ID_HORNET: 2589 m = (typeof(m)){"LP21000", "PCIe", 2590 "Obsolete, Unsupported FCoE Adapter"}; 2591 GE = 1; 2592 break; 2593 case PCI_DEVICE_ID_PROTEUS_VF: 2594 m = (typeof(m)){"LPev12000", "PCIe IOV", 2595 "Obsolete, Unsupported Fibre Channel Adapter"}; 2596 break; 2597 case PCI_DEVICE_ID_PROTEUS_PF: 2598 m = (typeof(m)){"LPev12000", "PCIe IOV", 2599 "Obsolete, Unsupported Fibre Channel Adapter"}; 2600 break; 2601 case PCI_DEVICE_ID_PROTEUS_S: 2602 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2603 "Obsolete, Unsupported Fibre Channel Adapter"}; 2604 break; 2605 case PCI_DEVICE_ID_TIGERSHARK: 2606 oneConnect = 1; 2607 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2608 break; 2609 case PCI_DEVICE_ID_TOMCAT: 2610 oneConnect = 1; 2611 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2612 break; 2613 case PCI_DEVICE_ID_FALCON: 2614 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2615 "EmulexSecure Fibre"}; 2616 break; 2617 case PCI_DEVICE_ID_BALIUS: 2618 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2619 "Obsolete, Unsupported Fibre Channel Adapter"}; 2620 break; 2621 case PCI_DEVICE_ID_LANCER_FC: 2622 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2623 break; 2624 case PCI_DEVICE_ID_LANCER_FC_VF: 2625 m = (typeof(m)){"LPe16000", "PCIe", 2626 "Obsolete, Unsupported Fibre Channel Adapter"}; 2627 break; 2628 case PCI_DEVICE_ID_LANCER_FCOE: 2629 oneConnect = 1; 2630 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2631 break; 2632 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2633 oneConnect = 1; 2634 m = (typeof(m)){"OCe15100", "PCIe", 2635 "Obsolete, Unsupported FCoE"}; 2636 break; 2637 case PCI_DEVICE_ID_LANCER_G6_FC: 2638 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2639 break; 2640 case PCI_DEVICE_ID_LANCER_G7_FC: 2641 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2642 break; 2643 case PCI_DEVICE_ID_LANCER_G7P_FC: 2644 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2645 break; 2646 case PCI_DEVICE_ID_SKYHAWK: 2647 case PCI_DEVICE_ID_SKYHAWK_VF: 2648 oneConnect = 1; 2649 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2650 break; 2651 default: 2652 m = (typeof(m)){"Unknown", "", ""}; 2653 break; 2654 } 2655 2656 if (mdp && mdp[0] == '\0') 2657 snprintf(mdp, 79,"%s", m.name); 2658 /* 2659 * oneConnect hba requires special processing, they are all initiators 2660 * and we put the port number on the end 2661 */ 2662 if (descp && descp[0] == '\0') { 2663 if (oneConnect) 2664 snprintf(descp, 255, 2665 "Emulex OneConnect %s, %s Initiator %s", 2666 m.name, m.function, 2667 phba->Port); 2668 else if (max_speed == 0) 2669 snprintf(descp, 255, 2670 "Emulex %s %s %s", 2671 m.name, m.bus, m.function); 2672 else 2673 snprintf(descp, 255, 2674 "Emulex %s %d%s %s %s", 2675 m.name, max_speed, (GE) ? "GE" : "Gb", 2676 m.bus, m.function); 2677 } 2678 } 2679 2680 /** 2681 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2682 * @phba: pointer to lpfc hba data structure. 2683 * @pring: pointer to a IOCB ring. 2684 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2685 * 2686 * This routine posts a given number of IOCBs with the associated DMA buffer 2687 * descriptors specified by the cnt argument to the given IOCB ring. 2688 * 2689 * Return codes 2690 * The number of IOCBs NOT able to be posted to the IOCB ring. 2691 **/ 2692 int 2693 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2694 { 2695 IOCB_t *icmd; 2696 struct lpfc_iocbq *iocb; 2697 struct lpfc_dmabuf *mp1, *mp2; 2698 2699 cnt += pring->missbufcnt; 2700 2701 /* While there are buffers to post */ 2702 while (cnt > 0) { 2703 /* Allocate buffer for command iocb */ 2704 iocb = lpfc_sli_get_iocbq(phba); 2705 if (iocb == NULL) { 2706 pring->missbufcnt = cnt; 2707 return cnt; 2708 } 2709 icmd = &iocb->iocb; 2710 2711 /* 2 buffers can be posted per command */ 2712 /* Allocate buffer to post */ 2713 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2714 if (mp1) 2715 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2716 if (!mp1 || !mp1->virt) { 2717 kfree(mp1); 2718 lpfc_sli_release_iocbq(phba, iocb); 2719 pring->missbufcnt = cnt; 2720 return cnt; 2721 } 2722 2723 INIT_LIST_HEAD(&mp1->list); 2724 /* Allocate buffer to post */ 2725 if (cnt > 1) { 2726 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2727 if (mp2) 2728 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2729 &mp2->phys); 2730 if (!mp2 || !mp2->virt) { 2731 kfree(mp2); 2732 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2733 kfree(mp1); 2734 lpfc_sli_release_iocbq(phba, iocb); 2735 pring->missbufcnt = cnt; 2736 return cnt; 2737 } 2738 2739 INIT_LIST_HEAD(&mp2->list); 2740 } else { 2741 mp2 = NULL; 2742 } 2743 2744 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2745 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2746 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2747 icmd->ulpBdeCount = 1; 2748 cnt--; 2749 if (mp2) { 2750 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2751 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2752 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2753 cnt--; 2754 icmd->ulpBdeCount = 2; 2755 } 2756 2757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2758 icmd->ulpLe = 1; 2759 2760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2761 IOCB_ERROR) { 2762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2763 kfree(mp1); 2764 cnt++; 2765 if (mp2) { 2766 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2767 kfree(mp2); 2768 cnt++; 2769 } 2770 lpfc_sli_release_iocbq(phba, iocb); 2771 pring->missbufcnt = cnt; 2772 return cnt; 2773 } 2774 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2775 if (mp2) 2776 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2777 } 2778 pring->missbufcnt = 0; 2779 return 0; 2780 } 2781 2782 /** 2783 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2784 * @phba: pointer to lpfc hba data structure. 2785 * 2786 * This routine posts initial receive IOCB buffers to the ELS ring. The 2787 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2788 * set to 64 IOCBs. SLI3 only. 2789 * 2790 * Return codes 2791 * 0 - success (currently always success) 2792 **/ 2793 static int 2794 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2795 { 2796 struct lpfc_sli *psli = &phba->sli; 2797 2798 /* Ring 0, ELS / CT buffers */ 2799 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2800 /* Ring 2 - FCP no buffers needed */ 2801 2802 return 0; 2803 } 2804 2805 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2806 2807 /** 2808 * lpfc_sha_init - Set up initial array of hash table entries 2809 * @HashResultPointer: pointer to an array as hash table. 2810 * 2811 * This routine sets up the initial values to the array of hash table entries 2812 * for the LC HBAs. 2813 **/ 2814 static void 2815 lpfc_sha_init(uint32_t * HashResultPointer) 2816 { 2817 HashResultPointer[0] = 0x67452301; 2818 HashResultPointer[1] = 0xEFCDAB89; 2819 HashResultPointer[2] = 0x98BADCFE; 2820 HashResultPointer[3] = 0x10325476; 2821 HashResultPointer[4] = 0xC3D2E1F0; 2822 } 2823 2824 /** 2825 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2826 * @HashResultPointer: pointer to an initial/result hash table. 2827 * @HashWorkingPointer: pointer to an working hash table. 2828 * 2829 * This routine iterates an initial hash table pointed by @HashResultPointer 2830 * with the values from the working hash table pointeed by @HashWorkingPointer. 2831 * The results are putting back to the initial hash table, returned through 2832 * the @HashResultPointer as the result hash table. 2833 **/ 2834 static void 2835 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2836 { 2837 int t; 2838 uint32_t TEMP; 2839 uint32_t A, B, C, D, E; 2840 t = 16; 2841 do { 2842 HashWorkingPointer[t] = 2843 S(1, 2844 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2845 8] ^ 2846 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2847 } while (++t <= 79); 2848 t = 0; 2849 A = HashResultPointer[0]; 2850 B = HashResultPointer[1]; 2851 C = HashResultPointer[2]; 2852 D = HashResultPointer[3]; 2853 E = HashResultPointer[4]; 2854 2855 do { 2856 if (t < 20) { 2857 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2858 } else if (t < 40) { 2859 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2860 } else if (t < 60) { 2861 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2862 } else { 2863 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2864 } 2865 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2866 E = D; 2867 D = C; 2868 C = S(30, B); 2869 B = A; 2870 A = TEMP; 2871 } while (++t <= 79); 2872 2873 HashResultPointer[0] += A; 2874 HashResultPointer[1] += B; 2875 HashResultPointer[2] += C; 2876 HashResultPointer[3] += D; 2877 HashResultPointer[4] += E; 2878 2879 } 2880 2881 /** 2882 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2883 * @RandomChallenge: pointer to the entry of host challenge random number array. 2884 * @HashWorking: pointer to the entry of the working hash array. 2885 * 2886 * This routine calculates the working hash array referred by @HashWorking 2887 * from the challenge random numbers associated with the host, referred by 2888 * @RandomChallenge. The result is put into the entry of the working hash 2889 * array and returned by reference through @HashWorking. 2890 **/ 2891 static void 2892 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2893 { 2894 *HashWorking = (*RandomChallenge ^ *HashWorking); 2895 } 2896 2897 /** 2898 * lpfc_hba_init - Perform special handling for LC HBA initialization 2899 * @phba: pointer to lpfc hba data structure. 2900 * @hbainit: pointer to an array of unsigned 32-bit integers. 2901 * 2902 * This routine performs the special handling for LC HBA initialization. 2903 **/ 2904 void 2905 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2906 { 2907 int t; 2908 uint32_t *HashWorking; 2909 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2910 2911 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2912 if (!HashWorking) 2913 return; 2914 2915 HashWorking[0] = HashWorking[78] = *pwwnn++; 2916 HashWorking[1] = HashWorking[79] = *pwwnn; 2917 2918 for (t = 0; t < 7; t++) 2919 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2920 2921 lpfc_sha_init(hbainit); 2922 lpfc_sha_iterate(hbainit, HashWorking); 2923 kfree(HashWorking); 2924 } 2925 2926 /** 2927 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2928 * @vport: pointer to a virtual N_Port data structure. 2929 * 2930 * This routine performs the necessary cleanups before deleting the @vport. 2931 * It invokes the discovery state machine to perform necessary state 2932 * transitions and to release the ndlps associated with the @vport. Note, 2933 * the physical port is treated as @vport 0. 2934 **/ 2935 void 2936 lpfc_cleanup(struct lpfc_vport *vport) 2937 { 2938 struct lpfc_hba *phba = vport->phba; 2939 struct lpfc_nodelist *ndlp, *next_ndlp; 2940 int i = 0; 2941 2942 if (phba->link_state > LPFC_LINK_DOWN) 2943 lpfc_port_link_failure(vport); 2944 2945 /* Clean up VMID resources */ 2946 if (lpfc_is_vmid_enabled(phba)) 2947 lpfc_vmid_vport_cleanup(vport); 2948 2949 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2950 if (vport->port_type != LPFC_PHYSICAL_PORT && 2951 ndlp->nlp_DID == Fabric_DID) { 2952 /* Just free up ndlp with Fabric_DID for vports */ 2953 lpfc_nlp_put(ndlp); 2954 continue; 2955 } 2956 2957 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2958 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2959 lpfc_nlp_put(ndlp); 2960 continue; 2961 } 2962 2963 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2964 * DEVICE_RM event. 2965 */ 2966 if (ndlp->nlp_type & NLP_FABRIC && 2967 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2968 lpfc_disc_state_machine(vport, ndlp, NULL, 2969 NLP_EVT_DEVICE_RECOVERY); 2970 2971 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2972 lpfc_disc_state_machine(vport, ndlp, NULL, 2973 NLP_EVT_DEVICE_RM); 2974 } 2975 2976 /* At this point, ALL ndlp's should be gone 2977 * because of the previous NLP_EVT_DEVICE_RM. 2978 * Lets wait for this to happen, if needed. 2979 */ 2980 while (!list_empty(&vport->fc_nodes)) { 2981 if (i++ > 3000) { 2982 lpfc_printf_vlog(vport, KERN_ERR, 2983 LOG_TRACE_EVENT, 2984 "0233 Nodelist not empty\n"); 2985 list_for_each_entry_safe(ndlp, next_ndlp, 2986 &vport->fc_nodes, nlp_listp) { 2987 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2988 LOG_TRACE_EVENT, 2989 "0282 did:x%x ndlp:x%px " 2990 "refcnt:%d xflags x%x nflag x%x\n", 2991 ndlp->nlp_DID, (void *)ndlp, 2992 kref_read(&ndlp->kref), 2993 ndlp->fc4_xpt_flags, 2994 ndlp->nlp_flag); 2995 } 2996 break; 2997 } 2998 2999 /* Wait for any activity on ndlps to settle */ 3000 msleep(10); 3001 } 3002 lpfc_cleanup_vports_rrqs(vport, NULL); 3003 } 3004 3005 /** 3006 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 3007 * @vport: pointer to a virtual N_Port data structure. 3008 * 3009 * This routine stops all the timers associated with a @vport. This function 3010 * is invoked before disabling or deleting a @vport. Note that the physical 3011 * port is treated as @vport 0. 3012 **/ 3013 void 3014 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3015 { 3016 del_timer_sync(&vport->els_tmofunc); 3017 del_timer_sync(&vport->delayed_disc_tmo); 3018 lpfc_can_disctmo(vport); 3019 return; 3020 } 3021 3022 /** 3023 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3024 * @phba: pointer to lpfc hba data structure. 3025 * 3026 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 3027 * caller of this routine should already hold the host lock. 3028 **/ 3029 void 3030 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3031 { 3032 /* Clear pending FCF rediscovery wait flag */ 3033 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3034 3035 /* Now, try to stop the timer */ 3036 del_timer(&phba->fcf.redisc_wait); 3037 } 3038 3039 /** 3040 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3041 * @phba: pointer to lpfc hba data structure. 3042 * 3043 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3044 * checks whether the FCF rediscovery wait timer is pending with the host 3045 * lock held before proceeding with disabling the timer and clearing the 3046 * wait timer pendig flag. 3047 **/ 3048 void 3049 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3050 { 3051 spin_lock_irq(&phba->hbalock); 3052 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3053 /* FCF rediscovery timer already fired or stopped */ 3054 spin_unlock_irq(&phba->hbalock); 3055 return; 3056 } 3057 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3058 /* Clear failover in progress flags */ 3059 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3060 spin_unlock_irq(&phba->hbalock); 3061 } 3062 3063 /** 3064 * lpfc_cmf_stop - Stop CMF processing 3065 * @phba: pointer to lpfc hba data structure. 3066 * 3067 * This is called when the link goes down or if CMF mode is turned OFF. 3068 * It is also called when going offline or unloaded just before the 3069 * congestion info buffer is unregistered. 3070 **/ 3071 void 3072 lpfc_cmf_stop(struct lpfc_hba *phba) 3073 { 3074 int cpu; 3075 struct lpfc_cgn_stat *cgs; 3076 3077 /* We only do something if CMF is enabled */ 3078 if (!phba->sli4_hba.pc_sli4_params.cmf) 3079 return; 3080 3081 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3082 "6221 Stop CMF / Cancel Timer\n"); 3083 3084 /* Cancel the CMF timer */ 3085 hrtimer_cancel(&phba->cmf_timer); 3086 3087 /* Zero CMF counters */ 3088 atomic_set(&phba->cmf_busy, 0); 3089 for_each_present_cpu(cpu) { 3090 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3091 atomic64_set(&cgs->total_bytes, 0); 3092 atomic64_set(&cgs->rcv_bytes, 0); 3093 atomic_set(&cgs->rx_io_cnt, 0); 3094 atomic64_set(&cgs->rx_latency, 0); 3095 } 3096 atomic_set(&phba->cmf_bw_wait, 0); 3097 3098 /* Resume any blocked IO - Queue unblock on workqueue */ 3099 queue_work(phba->wq, &phba->unblock_request_work); 3100 } 3101 3102 static inline uint64_t 3103 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3104 { 3105 uint64_t rate = lpfc_sli_port_speed_get(phba); 3106 3107 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3108 } 3109 3110 void 3111 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3112 { 3113 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3114 "6223 Signal CMF init\n"); 3115 3116 /* Use the new fc_linkspeed to recalculate */ 3117 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3118 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3119 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3120 phba->cmf_interval_rate, 1000); 3121 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3122 3123 /* This is a signal to firmware to sync up CMF BW with link speed */ 3124 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3125 } 3126 3127 /** 3128 * lpfc_cmf_start - Start CMF processing 3129 * @phba: pointer to lpfc hba data structure. 3130 * 3131 * This is called when the link comes up or if CMF mode is turned OFF 3132 * to Monitor or Managed. 3133 **/ 3134 void 3135 lpfc_cmf_start(struct lpfc_hba *phba) 3136 { 3137 struct lpfc_cgn_stat *cgs; 3138 int cpu; 3139 3140 /* We only do something if CMF is enabled */ 3141 if (!phba->sli4_hba.pc_sli4_params.cmf || 3142 phba->cmf_active_mode == LPFC_CFG_OFF) 3143 return; 3144 3145 /* Reinitialize congestion buffer info */ 3146 lpfc_init_congestion_buf(phba); 3147 3148 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3149 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3150 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3151 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3152 3153 atomic_set(&phba->cmf_busy, 0); 3154 for_each_present_cpu(cpu) { 3155 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3156 atomic64_set(&cgs->total_bytes, 0); 3157 atomic64_set(&cgs->rcv_bytes, 0); 3158 atomic_set(&cgs->rx_io_cnt, 0); 3159 atomic64_set(&cgs->rx_latency, 0); 3160 } 3161 phba->cmf_latency.tv_sec = 0; 3162 phba->cmf_latency.tv_nsec = 0; 3163 3164 lpfc_cmf_signal_init(phba); 3165 3166 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3167 "6222 Start CMF / Timer\n"); 3168 3169 phba->cmf_timer_cnt = 0; 3170 hrtimer_start(&phba->cmf_timer, 3171 ktime_set(0, LPFC_CMF_INTERVAL * 1000000), 3172 HRTIMER_MODE_REL); 3173 /* Setup for latency check in IO cmpl routines */ 3174 ktime_get_real_ts64(&phba->cmf_latency); 3175 3176 atomic_set(&phba->cmf_bw_wait, 0); 3177 atomic_set(&phba->cmf_stop_io, 0); 3178 } 3179 3180 /** 3181 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3182 * @phba: pointer to lpfc hba data structure. 3183 * 3184 * This routine stops all the timers associated with a HBA. This function is 3185 * invoked before either putting a HBA offline or unloading the driver. 3186 **/ 3187 void 3188 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3189 { 3190 if (phba->pport) 3191 lpfc_stop_vport_timers(phba->pport); 3192 cancel_delayed_work_sync(&phba->eq_delay_work); 3193 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3194 del_timer_sync(&phba->sli.mbox_tmo); 3195 del_timer_sync(&phba->fabric_block_timer); 3196 del_timer_sync(&phba->eratt_poll); 3197 del_timer_sync(&phba->hb_tmofunc); 3198 if (phba->sli_rev == LPFC_SLI_REV4) { 3199 del_timer_sync(&phba->rrq_tmr); 3200 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3201 } 3202 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3203 3204 switch (phba->pci_dev_grp) { 3205 case LPFC_PCI_DEV_LP: 3206 /* Stop any LightPulse device specific driver timers */ 3207 del_timer_sync(&phba->fcp_poll_timer); 3208 break; 3209 case LPFC_PCI_DEV_OC: 3210 /* Stop any OneConnect device specific driver timers */ 3211 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3212 break; 3213 default: 3214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3215 "0297 Invalid device group (x%x)\n", 3216 phba->pci_dev_grp); 3217 break; 3218 } 3219 return; 3220 } 3221 3222 /** 3223 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3224 * @phba: pointer to lpfc hba data structure. 3225 * @mbx_action: flag for mailbox no wait action. 3226 * 3227 * This routine marks a HBA's management interface as blocked. Once the HBA's 3228 * management interface is marked as blocked, all the user space access to 3229 * the HBA, whether they are from sysfs interface or libdfc interface will 3230 * all be blocked. The HBA is set to block the management interface when the 3231 * driver prepares the HBA interface for online or offline. 3232 **/ 3233 static void 3234 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3235 { 3236 unsigned long iflag; 3237 uint8_t actcmd = MBX_HEARTBEAT; 3238 unsigned long timeout; 3239 3240 spin_lock_irqsave(&phba->hbalock, iflag); 3241 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3242 spin_unlock_irqrestore(&phba->hbalock, iflag); 3243 if (mbx_action == LPFC_MBX_NO_WAIT) 3244 return; 3245 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3246 spin_lock_irqsave(&phba->hbalock, iflag); 3247 if (phba->sli.mbox_active) { 3248 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3249 /* Determine how long we might wait for the active mailbox 3250 * command to be gracefully completed by firmware. 3251 */ 3252 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3253 phba->sli.mbox_active) * 1000) + jiffies; 3254 } 3255 spin_unlock_irqrestore(&phba->hbalock, iflag); 3256 3257 /* Wait for the outstnading mailbox command to complete */ 3258 while (phba->sli.mbox_active) { 3259 /* Check active mailbox complete status every 2ms */ 3260 msleep(2); 3261 if (time_after(jiffies, timeout)) { 3262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3263 "2813 Mgmt IO is Blocked %x " 3264 "- mbox cmd %x still active\n", 3265 phba->sli.sli_flag, actcmd); 3266 break; 3267 } 3268 } 3269 } 3270 3271 /** 3272 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3273 * @phba: pointer to lpfc hba data structure. 3274 * 3275 * Allocate RPIs for all active remote nodes. This is needed whenever 3276 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3277 * is to fixup the temporary rpi assignments. 3278 **/ 3279 void 3280 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3281 { 3282 struct lpfc_nodelist *ndlp, *next_ndlp; 3283 struct lpfc_vport **vports; 3284 int i, rpi; 3285 3286 if (phba->sli_rev != LPFC_SLI_REV4) 3287 return; 3288 3289 vports = lpfc_create_vport_work_array(phba); 3290 if (vports == NULL) 3291 return; 3292 3293 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3294 if (vports[i]->load_flag & FC_UNLOADING) 3295 continue; 3296 3297 list_for_each_entry_safe(ndlp, next_ndlp, 3298 &vports[i]->fc_nodes, 3299 nlp_listp) { 3300 rpi = lpfc_sli4_alloc_rpi(phba); 3301 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3302 /* TODO print log? */ 3303 continue; 3304 } 3305 ndlp->nlp_rpi = rpi; 3306 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3307 LOG_NODE | LOG_DISCOVERY, 3308 "0009 Assign RPI x%x to ndlp x%px " 3309 "DID:x%06x flg:x%x\n", 3310 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3311 ndlp->nlp_flag); 3312 } 3313 } 3314 lpfc_destroy_vport_work_array(phba, vports); 3315 } 3316 3317 /** 3318 * lpfc_create_expedite_pool - create expedite pool 3319 * @phba: pointer to lpfc hba data structure. 3320 * 3321 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3322 * to expedite pool. Mark them as expedite. 3323 **/ 3324 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3325 { 3326 struct lpfc_sli4_hdw_queue *qp; 3327 struct lpfc_io_buf *lpfc_ncmd; 3328 struct lpfc_io_buf *lpfc_ncmd_next; 3329 struct lpfc_epd_pool *epd_pool; 3330 unsigned long iflag; 3331 3332 epd_pool = &phba->epd_pool; 3333 qp = &phba->sli4_hba.hdwq[0]; 3334 3335 spin_lock_init(&epd_pool->lock); 3336 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3337 spin_lock(&epd_pool->lock); 3338 INIT_LIST_HEAD(&epd_pool->list); 3339 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3340 &qp->lpfc_io_buf_list_put, list) { 3341 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3342 lpfc_ncmd->expedite = true; 3343 qp->put_io_bufs--; 3344 epd_pool->count++; 3345 if (epd_pool->count >= XRI_BATCH) 3346 break; 3347 } 3348 spin_unlock(&epd_pool->lock); 3349 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3350 } 3351 3352 /** 3353 * lpfc_destroy_expedite_pool - destroy expedite pool 3354 * @phba: pointer to lpfc hba data structure. 3355 * 3356 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3357 * of HWQ 0. Clear the mark. 3358 **/ 3359 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3360 { 3361 struct lpfc_sli4_hdw_queue *qp; 3362 struct lpfc_io_buf *lpfc_ncmd; 3363 struct lpfc_io_buf *lpfc_ncmd_next; 3364 struct lpfc_epd_pool *epd_pool; 3365 unsigned long iflag; 3366 3367 epd_pool = &phba->epd_pool; 3368 qp = &phba->sli4_hba.hdwq[0]; 3369 3370 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3371 spin_lock(&epd_pool->lock); 3372 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3373 &epd_pool->list, list) { 3374 list_move_tail(&lpfc_ncmd->list, 3375 &qp->lpfc_io_buf_list_put); 3376 lpfc_ncmd->flags = false; 3377 qp->put_io_bufs++; 3378 epd_pool->count--; 3379 } 3380 spin_unlock(&epd_pool->lock); 3381 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3382 } 3383 3384 /** 3385 * lpfc_create_multixri_pools - create multi-XRI pools 3386 * @phba: pointer to lpfc hba data structure. 3387 * 3388 * This routine initialize public, private per HWQ. Then, move XRIs from 3389 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3390 * Initialized. 3391 **/ 3392 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3393 { 3394 u32 i, j; 3395 u32 hwq_count; 3396 u32 count_per_hwq; 3397 struct lpfc_io_buf *lpfc_ncmd; 3398 struct lpfc_io_buf *lpfc_ncmd_next; 3399 unsigned long iflag; 3400 struct lpfc_sli4_hdw_queue *qp; 3401 struct lpfc_multixri_pool *multixri_pool; 3402 struct lpfc_pbl_pool *pbl_pool; 3403 struct lpfc_pvt_pool *pvt_pool; 3404 3405 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3406 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3407 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3408 phba->sli4_hba.io_xri_cnt); 3409 3410 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3411 lpfc_create_expedite_pool(phba); 3412 3413 hwq_count = phba->cfg_hdw_queue; 3414 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3415 3416 for (i = 0; i < hwq_count; i++) { 3417 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3418 3419 if (!multixri_pool) { 3420 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3421 "1238 Failed to allocate memory for " 3422 "multixri_pool\n"); 3423 3424 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3425 lpfc_destroy_expedite_pool(phba); 3426 3427 j = 0; 3428 while (j < i) { 3429 qp = &phba->sli4_hba.hdwq[j]; 3430 kfree(qp->p_multixri_pool); 3431 j++; 3432 } 3433 phba->cfg_xri_rebalancing = 0; 3434 return; 3435 } 3436 3437 qp = &phba->sli4_hba.hdwq[i]; 3438 qp->p_multixri_pool = multixri_pool; 3439 3440 multixri_pool->xri_limit = count_per_hwq; 3441 multixri_pool->rrb_next_hwqid = i; 3442 3443 /* Deal with public free xri pool */ 3444 pbl_pool = &multixri_pool->pbl_pool; 3445 spin_lock_init(&pbl_pool->lock); 3446 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3447 spin_lock(&pbl_pool->lock); 3448 INIT_LIST_HEAD(&pbl_pool->list); 3449 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3450 &qp->lpfc_io_buf_list_put, list) { 3451 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3452 qp->put_io_bufs--; 3453 pbl_pool->count++; 3454 } 3455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3456 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3457 pbl_pool->count, i); 3458 spin_unlock(&pbl_pool->lock); 3459 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3460 3461 /* Deal with private free xri pool */ 3462 pvt_pool = &multixri_pool->pvt_pool; 3463 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3464 pvt_pool->low_watermark = XRI_BATCH; 3465 spin_lock_init(&pvt_pool->lock); 3466 spin_lock_irqsave(&pvt_pool->lock, iflag); 3467 INIT_LIST_HEAD(&pvt_pool->list); 3468 pvt_pool->count = 0; 3469 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3470 } 3471 } 3472 3473 /** 3474 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3475 * @phba: pointer to lpfc hba data structure. 3476 * 3477 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3478 **/ 3479 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3480 { 3481 u32 i; 3482 u32 hwq_count; 3483 struct lpfc_io_buf *lpfc_ncmd; 3484 struct lpfc_io_buf *lpfc_ncmd_next; 3485 unsigned long iflag; 3486 struct lpfc_sli4_hdw_queue *qp; 3487 struct lpfc_multixri_pool *multixri_pool; 3488 struct lpfc_pbl_pool *pbl_pool; 3489 struct lpfc_pvt_pool *pvt_pool; 3490 3491 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3492 lpfc_destroy_expedite_pool(phba); 3493 3494 if (!(phba->pport->load_flag & FC_UNLOADING)) 3495 lpfc_sli_flush_io_rings(phba); 3496 3497 hwq_count = phba->cfg_hdw_queue; 3498 3499 for (i = 0; i < hwq_count; i++) { 3500 qp = &phba->sli4_hba.hdwq[i]; 3501 multixri_pool = qp->p_multixri_pool; 3502 if (!multixri_pool) 3503 continue; 3504 3505 qp->p_multixri_pool = NULL; 3506 3507 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3508 3509 /* Deal with public free xri pool */ 3510 pbl_pool = &multixri_pool->pbl_pool; 3511 spin_lock(&pbl_pool->lock); 3512 3513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3514 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3515 pbl_pool->count, i); 3516 3517 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3518 &pbl_pool->list, list) { 3519 list_move_tail(&lpfc_ncmd->list, 3520 &qp->lpfc_io_buf_list_put); 3521 qp->put_io_bufs++; 3522 pbl_pool->count--; 3523 } 3524 3525 INIT_LIST_HEAD(&pbl_pool->list); 3526 pbl_pool->count = 0; 3527 3528 spin_unlock(&pbl_pool->lock); 3529 3530 /* Deal with private free xri pool */ 3531 pvt_pool = &multixri_pool->pvt_pool; 3532 spin_lock(&pvt_pool->lock); 3533 3534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3535 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3536 pvt_pool->count, i); 3537 3538 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3539 &pvt_pool->list, list) { 3540 list_move_tail(&lpfc_ncmd->list, 3541 &qp->lpfc_io_buf_list_put); 3542 qp->put_io_bufs++; 3543 pvt_pool->count--; 3544 } 3545 3546 INIT_LIST_HEAD(&pvt_pool->list); 3547 pvt_pool->count = 0; 3548 3549 spin_unlock(&pvt_pool->lock); 3550 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3551 3552 kfree(multixri_pool); 3553 } 3554 } 3555 3556 /** 3557 * lpfc_online - Initialize and bring a HBA online 3558 * @phba: pointer to lpfc hba data structure. 3559 * 3560 * This routine initializes the HBA and brings a HBA online. During this 3561 * process, the management interface is blocked to prevent user space access 3562 * to the HBA interfering with the driver initialization. 3563 * 3564 * Return codes 3565 * 0 - successful 3566 * 1 - failed 3567 **/ 3568 int 3569 lpfc_online(struct lpfc_hba *phba) 3570 { 3571 struct lpfc_vport *vport; 3572 struct lpfc_vport **vports; 3573 int i, error = 0; 3574 bool vpis_cleared = false; 3575 3576 if (!phba) 3577 return 0; 3578 vport = phba->pport; 3579 3580 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3581 return 0; 3582 3583 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3584 "0458 Bring Adapter online\n"); 3585 3586 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3587 3588 if (phba->sli_rev == LPFC_SLI_REV4) { 3589 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3590 lpfc_unblock_mgmt_io(phba); 3591 return 1; 3592 } 3593 spin_lock_irq(&phba->hbalock); 3594 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3595 vpis_cleared = true; 3596 spin_unlock_irq(&phba->hbalock); 3597 3598 /* Reestablish the local initiator port. 3599 * The offline process destroyed the previous lport. 3600 */ 3601 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3602 !phba->nvmet_support) { 3603 error = lpfc_nvme_create_localport(phba->pport); 3604 if (error) 3605 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3606 "6132 NVME restore reg failed " 3607 "on nvmei error x%x\n", error); 3608 } 3609 } else { 3610 lpfc_sli_queue_init(phba); 3611 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3612 lpfc_unblock_mgmt_io(phba); 3613 return 1; 3614 } 3615 } 3616 3617 vports = lpfc_create_vport_work_array(phba); 3618 if (vports != NULL) { 3619 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3620 struct Scsi_Host *shost; 3621 shost = lpfc_shost_from_vport(vports[i]); 3622 spin_lock_irq(shost->host_lock); 3623 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3624 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3625 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3626 if (phba->sli_rev == LPFC_SLI_REV4) { 3627 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3628 if ((vpis_cleared) && 3629 (vports[i]->port_type != 3630 LPFC_PHYSICAL_PORT)) 3631 vports[i]->vpi = 0; 3632 } 3633 spin_unlock_irq(shost->host_lock); 3634 } 3635 } 3636 lpfc_destroy_vport_work_array(phba, vports); 3637 3638 if (phba->cfg_xri_rebalancing) 3639 lpfc_create_multixri_pools(phba); 3640 3641 lpfc_cpuhp_add(phba); 3642 3643 lpfc_unblock_mgmt_io(phba); 3644 return 0; 3645 } 3646 3647 /** 3648 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3649 * @phba: pointer to lpfc hba data structure. 3650 * 3651 * This routine marks a HBA's management interface as not blocked. Once the 3652 * HBA's management interface is marked as not blocked, all the user space 3653 * access to the HBA, whether they are from sysfs interface or libdfc 3654 * interface will be allowed. The HBA is set to block the management interface 3655 * when the driver prepares the HBA interface for online or offline and then 3656 * set to unblock the management interface afterwards. 3657 **/ 3658 void 3659 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3660 { 3661 unsigned long iflag; 3662 3663 spin_lock_irqsave(&phba->hbalock, iflag); 3664 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3665 spin_unlock_irqrestore(&phba->hbalock, iflag); 3666 } 3667 3668 /** 3669 * lpfc_offline_prep - Prepare a HBA to be brought offline 3670 * @phba: pointer to lpfc hba data structure. 3671 * @mbx_action: flag for mailbox shutdown action. 3672 * 3673 * This routine is invoked to prepare a HBA to be brought offline. It performs 3674 * unregistration login to all the nodes on all vports and flushes the mailbox 3675 * queue to make it ready to be brought offline. 3676 **/ 3677 void 3678 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3679 { 3680 struct lpfc_vport *vport = phba->pport; 3681 struct lpfc_nodelist *ndlp, *next_ndlp; 3682 struct lpfc_vport **vports; 3683 struct Scsi_Host *shost; 3684 int i; 3685 int offline = 0; 3686 3687 if (vport->fc_flag & FC_OFFLINE_MODE) 3688 return; 3689 3690 lpfc_block_mgmt_io(phba, mbx_action); 3691 3692 lpfc_linkdown(phba); 3693 3694 offline = pci_channel_offline(phba->pcidev); 3695 3696 /* Issue an unreg_login to all nodes on all vports */ 3697 vports = lpfc_create_vport_work_array(phba); 3698 if (vports != NULL) { 3699 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3700 if (vports[i]->load_flag & FC_UNLOADING) 3701 continue; 3702 shost = lpfc_shost_from_vport(vports[i]); 3703 spin_lock_irq(shost->host_lock); 3704 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3705 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3706 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3707 spin_unlock_irq(shost->host_lock); 3708 3709 shost = lpfc_shost_from_vport(vports[i]); 3710 list_for_each_entry_safe(ndlp, next_ndlp, 3711 &vports[i]->fc_nodes, 3712 nlp_listp) { 3713 3714 spin_lock_irq(&ndlp->lock); 3715 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3716 spin_unlock_irq(&ndlp->lock); 3717 3718 if (offline) { 3719 spin_lock_irq(&ndlp->lock); 3720 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3721 NLP_RPI_REGISTERED); 3722 spin_unlock_irq(&ndlp->lock); 3723 } else { 3724 lpfc_unreg_rpi(vports[i], ndlp); 3725 } 3726 /* 3727 * Whenever an SLI4 port goes offline, free the 3728 * RPI. Get a new RPI when the adapter port 3729 * comes back online. 3730 */ 3731 if (phba->sli_rev == LPFC_SLI_REV4) { 3732 lpfc_printf_vlog(vports[i], KERN_INFO, 3733 LOG_NODE | LOG_DISCOVERY, 3734 "0011 Free RPI x%x on " 3735 "ndlp: x%px did x%x\n", 3736 ndlp->nlp_rpi, ndlp, 3737 ndlp->nlp_DID); 3738 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3739 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3740 } 3741 3742 if (ndlp->nlp_type & NLP_FABRIC) { 3743 lpfc_disc_state_machine(vports[i], ndlp, 3744 NULL, NLP_EVT_DEVICE_RECOVERY); 3745 3746 /* Don't remove the node unless the node 3747 * has been unregistered with the 3748 * transport, and we're not in recovery 3749 * before dev_loss_tmo triggered. 3750 * Otherwise, let dev_loss take care of 3751 * the node. 3752 */ 3753 if (!(ndlp->save_flags & 3754 NLP_IN_RECOV_POST_DEV_LOSS) && 3755 !(ndlp->fc4_xpt_flags & 3756 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3757 lpfc_disc_state_machine 3758 (vports[i], ndlp, 3759 NULL, 3760 NLP_EVT_DEVICE_RM); 3761 } 3762 } 3763 } 3764 } 3765 lpfc_destroy_vport_work_array(phba, vports); 3766 3767 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3768 3769 if (phba->wq) 3770 flush_workqueue(phba->wq); 3771 } 3772 3773 /** 3774 * lpfc_offline - Bring a HBA offline 3775 * @phba: pointer to lpfc hba data structure. 3776 * 3777 * This routine actually brings a HBA offline. It stops all the timers 3778 * associated with the HBA, brings down the SLI layer, and eventually 3779 * marks the HBA as in offline state for the upper layer protocol. 3780 **/ 3781 void 3782 lpfc_offline(struct lpfc_hba *phba) 3783 { 3784 struct Scsi_Host *shost; 3785 struct lpfc_vport **vports; 3786 int i; 3787 3788 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3789 return; 3790 3791 /* stop port and all timers associated with this hba */ 3792 lpfc_stop_port(phba); 3793 3794 /* Tear down the local and target port registrations. The 3795 * nvme transports need to cleanup. 3796 */ 3797 lpfc_nvmet_destroy_targetport(phba); 3798 lpfc_nvme_destroy_localport(phba->pport); 3799 3800 vports = lpfc_create_vport_work_array(phba); 3801 if (vports != NULL) 3802 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3803 lpfc_stop_vport_timers(vports[i]); 3804 lpfc_destroy_vport_work_array(phba, vports); 3805 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3806 "0460 Bring Adapter offline\n"); 3807 /* Bring down the SLI Layer and cleanup. The HBA is offline 3808 now. */ 3809 lpfc_sli_hba_down(phba); 3810 spin_lock_irq(&phba->hbalock); 3811 phba->work_ha = 0; 3812 spin_unlock_irq(&phba->hbalock); 3813 vports = lpfc_create_vport_work_array(phba); 3814 if (vports != NULL) 3815 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3816 shost = lpfc_shost_from_vport(vports[i]); 3817 spin_lock_irq(shost->host_lock); 3818 vports[i]->work_port_events = 0; 3819 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3820 spin_unlock_irq(shost->host_lock); 3821 } 3822 lpfc_destroy_vport_work_array(phba, vports); 3823 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3824 * in hba_unset 3825 */ 3826 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3827 __lpfc_cpuhp_remove(phba); 3828 3829 if (phba->cfg_xri_rebalancing) 3830 lpfc_destroy_multixri_pools(phba); 3831 } 3832 3833 /** 3834 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3835 * @phba: pointer to lpfc hba data structure. 3836 * 3837 * This routine is to free all the SCSI buffers and IOCBs from the driver 3838 * list back to kernel. It is called from lpfc_pci_remove_one to free 3839 * the internal resources before the device is removed from the system. 3840 **/ 3841 static void 3842 lpfc_scsi_free(struct lpfc_hba *phba) 3843 { 3844 struct lpfc_io_buf *sb, *sb_next; 3845 3846 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3847 return; 3848 3849 spin_lock_irq(&phba->hbalock); 3850 3851 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3852 3853 spin_lock(&phba->scsi_buf_list_put_lock); 3854 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3855 list) { 3856 list_del(&sb->list); 3857 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3858 sb->dma_handle); 3859 kfree(sb); 3860 phba->total_scsi_bufs--; 3861 } 3862 spin_unlock(&phba->scsi_buf_list_put_lock); 3863 3864 spin_lock(&phba->scsi_buf_list_get_lock); 3865 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3866 list) { 3867 list_del(&sb->list); 3868 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3869 sb->dma_handle); 3870 kfree(sb); 3871 phba->total_scsi_bufs--; 3872 } 3873 spin_unlock(&phba->scsi_buf_list_get_lock); 3874 spin_unlock_irq(&phba->hbalock); 3875 } 3876 3877 /** 3878 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3879 * @phba: pointer to lpfc hba data structure. 3880 * 3881 * This routine is to free all the IO buffers and IOCBs from the driver 3882 * list back to kernel. It is called from lpfc_pci_remove_one to free 3883 * the internal resources before the device is removed from the system. 3884 **/ 3885 void 3886 lpfc_io_free(struct lpfc_hba *phba) 3887 { 3888 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3889 struct lpfc_sli4_hdw_queue *qp; 3890 int idx; 3891 3892 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3893 qp = &phba->sli4_hba.hdwq[idx]; 3894 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3895 spin_lock(&qp->io_buf_list_put_lock); 3896 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3897 &qp->lpfc_io_buf_list_put, 3898 list) { 3899 list_del(&lpfc_ncmd->list); 3900 qp->put_io_bufs--; 3901 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3902 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3903 if (phba->cfg_xpsgl && !phba->nvmet_support) 3904 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3905 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3906 kfree(lpfc_ncmd); 3907 qp->total_io_bufs--; 3908 } 3909 spin_unlock(&qp->io_buf_list_put_lock); 3910 3911 spin_lock(&qp->io_buf_list_get_lock); 3912 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3913 &qp->lpfc_io_buf_list_get, 3914 list) { 3915 list_del(&lpfc_ncmd->list); 3916 qp->get_io_bufs--; 3917 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3918 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3919 if (phba->cfg_xpsgl && !phba->nvmet_support) 3920 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3921 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3922 kfree(lpfc_ncmd); 3923 qp->total_io_bufs--; 3924 } 3925 spin_unlock(&qp->io_buf_list_get_lock); 3926 } 3927 } 3928 3929 /** 3930 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3931 * @phba: pointer to lpfc hba data structure. 3932 * 3933 * This routine first calculates the sizes of the current els and allocated 3934 * scsi sgl lists, and then goes through all sgls to updates the physical 3935 * XRIs assigned due to port function reset. During port initialization, the 3936 * current els and allocated scsi sgl lists are 0s. 3937 * 3938 * Return codes 3939 * 0 - successful (for now, it always returns 0) 3940 **/ 3941 int 3942 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3943 { 3944 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3945 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3946 LIST_HEAD(els_sgl_list); 3947 int rc; 3948 3949 /* 3950 * update on pci function's els xri-sgl list 3951 */ 3952 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3953 3954 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3955 /* els xri-sgl expanded */ 3956 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3957 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3958 "3157 ELS xri-sgl count increased from " 3959 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3960 els_xri_cnt); 3961 /* allocate the additional els sgls */ 3962 for (i = 0; i < xri_cnt; i++) { 3963 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3964 GFP_KERNEL); 3965 if (sglq_entry == NULL) { 3966 lpfc_printf_log(phba, KERN_ERR, 3967 LOG_TRACE_EVENT, 3968 "2562 Failure to allocate an " 3969 "ELS sgl entry:%d\n", i); 3970 rc = -ENOMEM; 3971 goto out_free_mem; 3972 } 3973 sglq_entry->buff_type = GEN_BUFF_TYPE; 3974 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3975 &sglq_entry->phys); 3976 if (sglq_entry->virt == NULL) { 3977 kfree(sglq_entry); 3978 lpfc_printf_log(phba, KERN_ERR, 3979 LOG_TRACE_EVENT, 3980 "2563 Failure to allocate an " 3981 "ELS mbuf:%d\n", i); 3982 rc = -ENOMEM; 3983 goto out_free_mem; 3984 } 3985 sglq_entry->sgl = sglq_entry->virt; 3986 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3987 sglq_entry->state = SGL_FREED; 3988 list_add_tail(&sglq_entry->list, &els_sgl_list); 3989 } 3990 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 3991 list_splice_init(&els_sgl_list, 3992 &phba->sli4_hba.lpfc_els_sgl_list); 3993 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 3994 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3995 /* els xri-sgl shrinked */ 3996 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3997 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3998 "3158 ELS xri-sgl count decreased from " 3999 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4000 els_xri_cnt); 4001 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4002 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 4003 &els_sgl_list); 4004 /* release extra els sgls from list */ 4005 for (i = 0; i < xri_cnt; i++) { 4006 list_remove_head(&els_sgl_list, 4007 sglq_entry, struct lpfc_sglq, list); 4008 if (sglq_entry) { 4009 __lpfc_mbuf_free(phba, sglq_entry->virt, 4010 sglq_entry->phys); 4011 kfree(sglq_entry); 4012 } 4013 } 4014 list_splice_init(&els_sgl_list, 4015 &phba->sli4_hba.lpfc_els_sgl_list); 4016 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4017 } else 4018 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4019 "3163 ELS xri-sgl count unchanged: %d\n", 4020 els_xri_cnt); 4021 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 4022 4023 /* update xris to els sgls on the list */ 4024 sglq_entry = NULL; 4025 sglq_entry_next = NULL; 4026 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4027 &phba->sli4_hba.lpfc_els_sgl_list, list) { 4028 lxri = lpfc_sli4_next_xritag(phba); 4029 if (lxri == NO_XRI) { 4030 lpfc_printf_log(phba, KERN_ERR, 4031 LOG_TRACE_EVENT, 4032 "2400 Failed to allocate xri for " 4033 "ELS sgl\n"); 4034 rc = -ENOMEM; 4035 goto out_free_mem; 4036 } 4037 sglq_entry->sli4_lxritag = lxri; 4038 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4039 } 4040 return 0; 4041 4042 out_free_mem: 4043 lpfc_free_els_sgl_list(phba); 4044 return rc; 4045 } 4046 4047 /** 4048 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4049 * @phba: pointer to lpfc hba data structure. 4050 * 4051 * This routine first calculates the sizes of the current els and allocated 4052 * scsi sgl lists, and then goes through all sgls to updates the physical 4053 * XRIs assigned due to port function reset. During port initialization, the 4054 * current els and allocated scsi sgl lists are 0s. 4055 * 4056 * Return codes 4057 * 0 - successful (for now, it always returns 0) 4058 **/ 4059 int 4060 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4061 { 4062 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4063 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4064 uint16_t nvmet_xri_cnt; 4065 LIST_HEAD(nvmet_sgl_list); 4066 int rc; 4067 4068 /* 4069 * update on pci function's nvmet xri-sgl list 4070 */ 4071 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4072 4073 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4074 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4075 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4076 /* els xri-sgl expanded */ 4077 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4078 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4079 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4080 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4081 /* allocate the additional nvmet sgls */ 4082 for (i = 0; i < xri_cnt; i++) { 4083 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4084 GFP_KERNEL); 4085 if (sglq_entry == NULL) { 4086 lpfc_printf_log(phba, KERN_ERR, 4087 LOG_TRACE_EVENT, 4088 "6303 Failure to allocate an " 4089 "NVMET sgl entry:%d\n", i); 4090 rc = -ENOMEM; 4091 goto out_free_mem; 4092 } 4093 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4094 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4095 &sglq_entry->phys); 4096 if (sglq_entry->virt == NULL) { 4097 kfree(sglq_entry); 4098 lpfc_printf_log(phba, KERN_ERR, 4099 LOG_TRACE_EVENT, 4100 "6304 Failure to allocate an " 4101 "NVMET buf:%d\n", i); 4102 rc = -ENOMEM; 4103 goto out_free_mem; 4104 } 4105 sglq_entry->sgl = sglq_entry->virt; 4106 memset(sglq_entry->sgl, 0, 4107 phba->cfg_sg_dma_buf_size); 4108 sglq_entry->state = SGL_FREED; 4109 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4110 } 4111 spin_lock_irq(&phba->hbalock); 4112 spin_lock(&phba->sli4_hba.sgl_list_lock); 4113 list_splice_init(&nvmet_sgl_list, 4114 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4115 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4116 spin_unlock_irq(&phba->hbalock); 4117 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4118 /* nvmet xri-sgl shrunk */ 4119 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4120 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4121 "6305 NVMET xri-sgl count decreased from " 4122 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4123 nvmet_xri_cnt); 4124 spin_lock_irq(&phba->hbalock); 4125 spin_lock(&phba->sli4_hba.sgl_list_lock); 4126 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4127 &nvmet_sgl_list); 4128 /* release extra nvmet sgls from list */ 4129 for (i = 0; i < xri_cnt; i++) { 4130 list_remove_head(&nvmet_sgl_list, 4131 sglq_entry, struct lpfc_sglq, list); 4132 if (sglq_entry) { 4133 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4134 sglq_entry->phys); 4135 kfree(sglq_entry); 4136 } 4137 } 4138 list_splice_init(&nvmet_sgl_list, 4139 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4140 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4141 spin_unlock_irq(&phba->hbalock); 4142 } else 4143 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4144 "6306 NVMET xri-sgl count unchanged: %d\n", 4145 nvmet_xri_cnt); 4146 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4147 4148 /* update xris to nvmet sgls on the list */ 4149 sglq_entry = NULL; 4150 sglq_entry_next = NULL; 4151 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4152 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4153 lxri = lpfc_sli4_next_xritag(phba); 4154 if (lxri == NO_XRI) { 4155 lpfc_printf_log(phba, KERN_ERR, 4156 LOG_TRACE_EVENT, 4157 "6307 Failed to allocate xri for " 4158 "NVMET sgl\n"); 4159 rc = -ENOMEM; 4160 goto out_free_mem; 4161 } 4162 sglq_entry->sli4_lxritag = lxri; 4163 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4164 } 4165 return 0; 4166 4167 out_free_mem: 4168 lpfc_free_nvmet_sgl_list(phba); 4169 return rc; 4170 } 4171 4172 int 4173 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4174 { 4175 LIST_HEAD(blist); 4176 struct lpfc_sli4_hdw_queue *qp; 4177 struct lpfc_io_buf *lpfc_cmd; 4178 struct lpfc_io_buf *iobufp, *prev_iobufp; 4179 int idx, cnt, xri, inserted; 4180 4181 cnt = 0; 4182 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4183 qp = &phba->sli4_hba.hdwq[idx]; 4184 spin_lock_irq(&qp->io_buf_list_get_lock); 4185 spin_lock(&qp->io_buf_list_put_lock); 4186 4187 /* Take everything off the get and put lists */ 4188 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4189 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4190 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4191 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4192 cnt += qp->get_io_bufs + qp->put_io_bufs; 4193 qp->get_io_bufs = 0; 4194 qp->put_io_bufs = 0; 4195 qp->total_io_bufs = 0; 4196 spin_unlock(&qp->io_buf_list_put_lock); 4197 spin_unlock_irq(&qp->io_buf_list_get_lock); 4198 } 4199 4200 /* 4201 * Take IO buffers off blist and put on cbuf sorted by XRI. 4202 * This is because POST_SGL takes a sequential range of XRIs 4203 * to post to the firmware. 4204 */ 4205 for (idx = 0; idx < cnt; idx++) { 4206 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4207 if (!lpfc_cmd) 4208 return cnt; 4209 if (idx == 0) { 4210 list_add_tail(&lpfc_cmd->list, cbuf); 4211 continue; 4212 } 4213 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4214 inserted = 0; 4215 prev_iobufp = NULL; 4216 list_for_each_entry(iobufp, cbuf, list) { 4217 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4218 if (prev_iobufp) 4219 list_add(&lpfc_cmd->list, 4220 &prev_iobufp->list); 4221 else 4222 list_add(&lpfc_cmd->list, cbuf); 4223 inserted = 1; 4224 break; 4225 } 4226 prev_iobufp = iobufp; 4227 } 4228 if (!inserted) 4229 list_add_tail(&lpfc_cmd->list, cbuf); 4230 } 4231 return cnt; 4232 } 4233 4234 int 4235 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4236 { 4237 struct lpfc_sli4_hdw_queue *qp; 4238 struct lpfc_io_buf *lpfc_cmd; 4239 int idx, cnt; 4240 4241 qp = phba->sli4_hba.hdwq; 4242 cnt = 0; 4243 while (!list_empty(cbuf)) { 4244 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4245 list_remove_head(cbuf, lpfc_cmd, 4246 struct lpfc_io_buf, list); 4247 if (!lpfc_cmd) 4248 return cnt; 4249 cnt++; 4250 qp = &phba->sli4_hba.hdwq[idx]; 4251 lpfc_cmd->hdwq_no = idx; 4252 lpfc_cmd->hdwq = qp; 4253 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; 4254 spin_lock(&qp->io_buf_list_put_lock); 4255 list_add_tail(&lpfc_cmd->list, 4256 &qp->lpfc_io_buf_list_put); 4257 qp->put_io_bufs++; 4258 qp->total_io_bufs++; 4259 spin_unlock(&qp->io_buf_list_put_lock); 4260 } 4261 } 4262 return cnt; 4263 } 4264 4265 /** 4266 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4267 * @phba: pointer to lpfc hba data structure. 4268 * 4269 * This routine first calculates the sizes of the current els and allocated 4270 * scsi sgl lists, and then goes through all sgls to updates the physical 4271 * XRIs assigned due to port function reset. During port initialization, the 4272 * current els and allocated scsi sgl lists are 0s. 4273 * 4274 * Return codes 4275 * 0 - successful (for now, it always returns 0) 4276 **/ 4277 int 4278 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4279 { 4280 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4281 uint16_t i, lxri, els_xri_cnt; 4282 uint16_t io_xri_cnt, io_xri_max; 4283 LIST_HEAD(io_sgl_list); 4284 int rc, cnt; 4285 4286 /* 4287 * update on pci function's allocated nvme xri-sgl list 4288 */ 4289 4290 /* maximum number of xris available for nvme buffers */ 4291 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4292 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4293 phba->sli4_hba.io_xri_max = io_xri_max; 4294 4295 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4296 "6074 Current allocated XRI sgl count:%d, " 4297 "maximum XRI count:%d\n", 4298 phba->sli4_hba.io_xri_cnt, 4299 phba->sli4_hba.io_xri_max); 4300 4301 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4302 4303 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4304 /* max nvme xri shrunk below the allocated nvme buffers */ 4305 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4306 phba->sli4_hba.io_xri_max; 4307 /* release the extra allocated nvme buffers */ 4308 for (i = 0; i < io_xri_cnt; i++) { 4309 list_remove_head(&io_sgl_list, lpfc_ncmd, 4310 struct lpfc_io_buf, list); 4311 if (lpfc_ncmd) { 4312 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4313 lpfc_ncmd->data, 4314 lpfc_ncmd->dma_handle); 4315 kfree(lpfc_ncmd); 4316 } 4317 } 4318 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4319 } 4320 4321 /* update xris associated to remaining allocated nvme buffers */ 4322 lpfc_ncmd = NULL; 4323 lpfc_ncmd_next = NULL; 4324 phba->sli4_hba.io_xri_cnt = cnt; 4325 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4326 &io_sgl_list, list) { 4327 lxri = lpfc_sli4_next_xritag(phba); 4328 if (lxri == NO_XRI) { 4329 lpfc_printf_log(phba, KERN_ERR, 4330 LOG_TRACE_EVENT, 4331 "6075 Failed to allocate xri for " 4332 "nvme buffer\n"); 4333 rc = -ENOMEM; 4334 goto out_free_mem; 4335 } 4336 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4337 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4338 } 4339 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4340 return 0; 4341 4342 out_free_mem: 4343 lpfc_io_free(phba); 4344 return rc; 4345 } 4346 4347 /** 4348 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4349 * @phba: Pointer to lpfc hba data structure. 4350 * @num_to_alloc: The requested number of buffers to allocate. 4351 * 4352 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4353 * the nvme buffer contains all the necessary information needed to initiate 4354 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4355 * them on a list, it post them to the port by using SGL block post. 4356 * 4357 * Return codes: 4358 * int - number of IO buffers that were allocated and posted. 4359 * 0 = failure, less than num_to_alloc is a partial failure. 4360 **/ 4361 int 4362 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4363 { 4364 struct lpfc_io_buf *lpfc_ncmd; 4365 struct lpfc_iocbq *pwqeq; 4366 uint16_t iotag, lxri = 0; 4367 int bcnt, num_posted; 4368 LIST_HEAD(prep_nblist); 4369 LIST_HEAD(post_nblist); 4370 LIST_HEAD(nvme_nblist); 4371 4372 phba->sli4_hba.io_xri_cnt = 0; 4373 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4374 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4375 if (!lpfc_ncmd) 4376 break; 4377 /* 4378 * Get memory from the pci pool to map the virt space to 4379 * pci bus space for an I/O. The DMA buffer includes the 4380 * number of SGE's necessary to support the sg_tablesize. 4381 */ 4382 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4383 GFP_KERNEL, 4384 &lpfc_ncmd->dma_handle); 4385 if (!lpfc_ncmd->data) { 4386 kfree(lpfc_ncmd); 4387 break; 4388 } 4389 4390 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4391 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4392 } else { 4393 /* 4394 * 4K Page alignment is CRITICAL to BlockGuard, double 4395 * check to be sure. 4396 */ 4397 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4398 (((unsigned long)(lpfc_ncmd->data) & 4399 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4400 lpfc_printf_log(phba, KERN_ERR, 4401 LOG_TRACE_EVENT, 4402 "3369 Memory alignment err: " 4403 "addr=%lx\n", 4404 (unsigned long)lpfc_ncmd->data); 4405 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4406 lpfc_ncmd->data, 4407 lpfc_ncmd->dma_handle); 4408 kfree(lpfc_ncmd); 4409 break; 4410 } 4411 } 4412 4413 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4414 4415 lxri = lpfc_sli4_next_xritag(phba); 4416 if (lxri == NO_XRI) { 4417 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4418 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4419 kfree(lpfc_ncmd); 4420 break; 4421 } 4422 pwqeq = &lpfc_ncmd->cur_iocbq; 4423 4424 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4425 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4426 if (iotag == 0) { 4427 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4428 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4429 kfree(lpfc_ncmd); 4430 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4431 "6121 Failed to allocate IOTAG for" 4432 " XRI:0x%x\n", lxri); 4433 lpfc_sli4_free_xri(phba, lxri); 4434 break; 4435 } 4436 pwqeq->sli4_lxritag = lxri; 4437 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4438 pwqeq->context1 = lpfc_ncmd; 4439 4440 /* Initialize local short-hand pointers. */ 4441 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4442 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4443 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4444 spin_lock_init(&lpfc_ncmd->buf_lock); 4445 4446 /* add the nvme buffer to a post list */ 4447 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4448 phba->sli4_hba.io_xri_cnt++; 4449 } 4450 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4451 "6114 Allocate %d out of %d requested new NVME " 4452 "buffers\n", bcnt, num_to_alloc); 4453 4454 /* post the list of nvme buffer sgls to port if available */ 4455 if (!list_empty(&post_nblist)) 4456 num_posted = lpfc_sli4_post_io_sgl_list( 4457 phba, &post_nblist, bcnt); 4458 else 4459 num_posted = 0; 4460 4461 return num_posted; 4462 } 4463 4464 static uint64_t 4465 lpfc_get_wwpn(struct lpfc_hba *phba) 4466 { 4467 uint64_t wwn; 4468 int rc; 4469 LPFC_MBOXQ_t *mboxq; 4470 MAILBOX_t *mb; 4471 4472 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4473 GFP_KERNEL); 4474 if (!mboxq) 4475 return (uint64_t)-1; 4476 4477 /* First get WWN of HBA instance */ 4478 lpfc_read_nv(phba, mboxq); 4479 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4480 if (rc != MBX_SUCCESS) { 4481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4482 "6019 Mailbox failed , mbxCmd x%x " 4483 "READ_NV, mbxStatus x%x\n", 4484 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4485 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4486 mempool_free(mboxq, phba->mbox_mem_pool); 4487 return (uint64_t) -1; 4488 } 4489 mb = &mboxq->u.mb; 4490 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4491 /* wwn is WWPN of HBA instance */ 4492 mempool_free(mboxq, phba->mbox_mem_pool); 4493 if (phba->sli_rev == LPFC_SLI_REV4) 4494 return be64_to_cpu(wwn); 4495 else 4496 return rol64(wwn, 32); 4497 } 4498 4499 /** 4500 * lpfc_vmid_res_alloc - Allocates resources for VMID 4501 * @phba: pointer to lpfc hba data structure. 4502 * @vport: pointer to vport data structure 4503 * 4504 * This routine allocated the resources needed for the VMID. 4505 * 4506 * Return codes 4507 * 0 on Success 4508 * Non-0 on Failure 4509 */ 4510 static int 4511 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4512 { 4513 /* VMID feature is supported only on SLI4 */ 4514 if (phba->sli_rev == LPFC_SLI_REV3) { 4515 phba->cfg_vmid_app_header = 0; 4516 phba->cfg_vmid_priority_tagging = 0; 4517 } 4518 4519 if (lpfc_is_vmid_enabled(phba)) { 4520 vport->vmid = 4521 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4522 GFP_KERNEL); 4523 if (!vport->vmid) 4524 return -ENOMEM; 4525 4526 rwlock_init(&vport->vmid_lock); 4527 4528 /* Set the VMID parameters for the vport */ 4529 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4530 vport->vmid_inactivity_timeout = 4531 phba->cfg_vmid_inactivity_timeout; 4532 vport->max_vmid = phba->cfg_max_vmid; 4533 vport->cur_vmid_cnt = 0; 4534 4535 vport->vmid_priority_range = bitmap_zalloc 4536 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4537 4538 if (!vport->vmid_priority_range) { 4539 kfree(vport->vmid); 4540 return -ENOMEM; 4541 } 4542 4543 hash_init(vport->hash_table); 4544 } 4545 return 0; 4546 } 4547 4548 /** 4549 * lpfc_create_port - Create an FC port 4550 * @phba: pointer to lpfc hba data structure. 4551 * @instance: a unique integer ID to this FC port. 4552 * @dev: pointer to the device data structure. 4553 * 4554 * This routine creates a FC port for the upper layer protocol. The FC port 4555 * can be created on top of either a physical port or a virtual port provided 4556 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4557 * and associates the FC port created before adding the shost into the SCSI 4558 * layer. 4559 * 4560 * Return codes 4561 * @vport - pointer to the virtual N_Port data structure. 4562 * NULL - port create failed. 4563 **/ 4564 struct lpfc_vport * 4565 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4566 { 4567 struct lpfc_vport *vport; 4568 struct Scsi_Host *shost = NULL; 4569 struct scsi_host_template *template; 4570 int error = 0; 4571 int i; 4572 uint64_t wwn; 4573 bool use_no_reset_hba = false; 4574 int rc; 4575 4576 if (lpfc_no_hba_reset_cnt) { 4577 if (phba->sli_rev < LPFC_SLI_REV4 && 4578 dev == &phba->pcidev->dev) { 4579 /* Reset the port first */ 4580 lpfc_sli_brdrestart(phba); 4581 rc = lpfc_sli_chipset_init(phba); 4582 if (rc) 4583 return NULL; 4584 } 4585 wwn = lpfc_get_wwpn(phba); 4586 } 4587 4588 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4589 if (wwn == lpfc_no_hba_reset[i]) { 4590 lpfc_printf_log(phba, KERN_ERR, 4591 LOG_TRACE_EVENT, 4592 "6020 Setting use_no_reset port=%llx\n", 4593 wwn); 4594 use_no_reset_hba = true; 4595 break; 4596 } 4597 } 4598 4599 /* Seed template for SCSI host registration */ 4600 if (dev == &phba->pcidev->dev) { 4601 template = &phba->port_template; 4602 4603 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4604 /* Seed physical port template */ 4605 memcpy(template, &lpfc_template, sizeof(*template)); 4606 4607 if (use_no_reset_hba) 4608 /* template is for a no reset SCSI Host */ 4609 template->eh_host_reset_handler = NULL; 4610 4611 /* Template for all vports this physical port creates */ 4612 memcpy(&phba->vport_template, &lpfc_template, 4613 sizeof(*template)); 4614 phba->vport_template.shost_groups = lpfc_vport_groups; 4615 phba->vport_template.eh_bus_reset_handler = NULL; 4616 phba->vport_template.eh_host_reset_handler = NULL; 4617 phba->vport_template.vendor_id = 0; 4618 4619 /* Initialize the host templates with updated value */ 4620 if (phba->sli_rev == LPFC_SLI_REV4) { 4621 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4622 phba->vport_template.sg_tablesize = 4623 phba->cfg_scsi_seg_cnt; 4624 } else { 4625 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4626 phba->vport_template.sg_tablesize = 4627 phba->cfg_sg_seg_cnt; 4628 } 4629 4630 } else { 4631 /* NVMET is for physical port only */ 4632 memcpy(template, &lpfc_template_nvme, 4633 sizeof(*template)); 4634 } 4635 } else { 4636 template = &phba->vport_template; 4637 } 4638 4639 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4640 if (!shost) 4641 goto out; 4642 4643 vport = (struct lpfc_vport *) shost->hostdata; 4644 vport->phba = phba; 4645 vport->load_flag |= FC_LOADING; 4646 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4647 vport->fc_rscn_flush = 0; 4648 lpfc_get_vport_cfgparam(vport); 4649 4650 /* Adjust value in vport */ 4651 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4652 4653 shost->unique_id = instance; 4654 shost->max_id = LPFC_MAX_TARGET; 4655 shost->max_lun = vport->cfg_max_luns; 4656 shost->this_id = -1; 4657 shost->max_cmd_len = 16; 4658 4659 if (phba->sli_rev == LPFC_SLI_REV4) { 4660 if (!phba->cfg_fcp_mq_threshold || 4661 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4662 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4663 4664 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4665 phba->cfg_fcp_mq_threshold); 4666 4667 shost->dma_boundary = 4668 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4669 4670 if (phba->cfg_xpsgl && !phba->nvmet_support) 4671 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4672 else 4673 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4674 } else 4675 /* SLI-3 has a limited number of hardware queues (3), 4676 * thus there is only one for FCP processing. 4677 */ 4678 shost->nr_hw_queues = 1; 4679 4680 /* 4681 * Set initial can_queue value since 0 is no longer supported and 4682 * scsi_add_host will fail. This will be adjusted later based on the 4683 * max xri value determined in hba setup. 4684 */ 4685 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4686 if (dev != &phba->pcidev->dev) { 4687 shost->transportt = lpfc_vport_transport_template; 4688 vport->port_type = LPFC_NPIV_PORT; 4689 } else { 4690 shost->transportt = lpfc_transport_template; 4691 vport->port_type = LPFC_PHYSICAL_PORT; 4692 } 4693 4694 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4695 "9081 CreatePort TMPLATE type %x TBLsize %d " 4696 "SEGcnt %d/%d\n", 4697 vport->port_type, shost->sg_tablesize, 4698 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4699 4700 /* Allocate the resources for VMID */ 4701 rc = lpfc_vmid_res_alloc(phba, vport); 4702 4703 if (rc) 4704 goto out; 4705 4706 /* Initialize all internally managed lists. */ 4707 INIT_LIST_HEAD(&vport->fc_nodes); 4708 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4709 spin_lock_init(&vport->work_port_lock); 4710 4711 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4712 4713 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4714 4715 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4716 4717 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4718 lpfc_setup_bg(phba, shost); 4719 4720 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4721 if (error) 4722 goto out_put_shost; 4723 4724 spin_lock_irq(&phba->port_list_lock); 4725 list_add_tail(&vport->listentry, &phba->port_list); 4726 spin_unlock_irq(&phba->port_list_lock); 4727 return vport; 4728 4729 out_put_shost: 4730 kfree(vport->vmid); 4731 bitmap_free(vport->vmid_priority_range); 4732 scsi_host_put(shost); 4733 out: 4734 return NULL; 4735 } 4736 4737 /** 4738 * destroy_port - destroy an FC port 4739 * @vport: pointer to an lpfc virtual N_Port data structure. 4740 * 4741 * This routine destroys a FC port from the upper layer protocol. All the 4742 * resources associated with the port are released. 4743 **/ 4744 void 4745 destroy_port(struct lpfc_vport *vport) 4746 { 4747 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4748 struct lpfc_hba *phba = vport->phba; 4749 4750 lpfc_debugfs_terminate(vport); 4751 fc_remove_host(shost); 4752 scsi_remove_host(shost); 4753 4754 spin_lock_irq(&phba->port_list_lock); 4755 list_del_init(&vport->listentry); 4756 spin_unlock_irq(&phba->port_list_lock); 4757 4758 lpfc_cleanup(vport); 4759 return; 4760 } 4761 4762 /** 4763 * lpfc_get_instance - Get a unique integer ID 4764 * 4765 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4766 * uses the kernel idr facility to perform the task. 4767 * 4768 * Return codes: 4769 * instance - a unique integer ID allocated as the new instance. 4770 * -1 - lpfc get instance failed. 4771 **/ 4772 int 4773 lpfc_get_instance(void) 4774 { 4775 int ret; 4776 4777 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4778 return ret < 0 ? -1 : ret; 4779 } 4780 4781 /** 4782 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4783 * @shost: pointer to SCSI host data structure. 4784 * @time: elapsed time of the scan in jiffies. 4785 * 4786 * This routine is called by the SCSI layer with a SCSI host to determine 4787 * whether the scan host is finished. 4788 * 4789 * Note: there is no scan_start function as adapter initialization will have 4790 * asynchronously kicked off the link initialization. 4791 * 4792 * Return codes 4793 * 0 - SCSI host scan is not over yet. 4794 * 1 - SCSI host scan is over. 4795 **/ 4796 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4797 { 4798 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4799 struct lpfc_hba *phba = vport->phba; 4800 int stat = 0; 4801 4802 spin_lock_irq(shost->host_lock); 4803 4804 if (vport->load_flag & FC_UNLOADING) { 4805 stat = 1; 4806 goto finished; 4807 } 4808 if (time >= msecs_to_jiffies(30 * 1000)) { 4809 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4810 "0461 Scanning longer than 30 " 4811 "seconds. Continuing initialization\n"); 4812 stat = 1; 4813 goto finished; 4814 } 4815 if (time >= msecs_to_jiffies(15 * 1000) && 4816 phba->link_state <= LPFC_LINK_DOWN) { 4817 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4818 "0465 Link down longer than 15 " 4819 "seconds. Continuing initialization\n"); 4820 stat = 1; 4821 goto finished; 4822 } 4823 4824 if (vport->port_state != LPFC_VPORT_READY) 4825 goto finished; 4826 if (vport->num_disc_nodes || vport->fc_prli_sent) 4827 goto finished; 4828 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4829 goto finished; 4830 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4831 goto finished; 4832 4833 stat = 1; 4834 4835 finished: 4836 spin_unlock_irq(shost->host_lock); 4837 return stat; 4838 } 4839 4840 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4841 { 4842 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4843 struct lpfc_hba *phba = vport->phba; 4844 4845 fc_host_supported_speeds(shost) = 0; 4846 /* 4847 * Avoid reporting supported link speed for FCoE as it can't be 4848 * controlled via FCoE. 4849 */ 4850 if (phba->hba_flag & HBA_FCOE_MODE) 4851 return; 4852 4853 if (phba->lmt & LMT_256Gb) 4854 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4855 if (phba->lmt & LMT_128Gb) 4856 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4857 if (phba->lmt & LMT_64Gb) 4858 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4859 if (phba->lmt & LMT_32Gb) 4860 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4861 if (phba->lmt & LMT_16Gb) 4862 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4863 if (phba->lmt & LMT_10Gb) 4864 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4865 if (phba->lmt & LMT_8Gb) 4866 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4867 if (phba->lmt & LMT_4Gb) 4868 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4869 if (phba->lmt & LMT_2Gb) 4870 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4871 if (phba->lmt & LMT_1Gb) 4872 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4873 } 4874 4875 /** 4876 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4877 * @shost: pointer to SCSI host data structure. 4878 * 4879 * This routine initializes a given SCSI host attributes on a FC port. The 4880 * SCSI host can be either on top of a physical port or a virtual port. 4881 **/ 4882 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4883 { 4884 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4885 struct lpfc_hba *phba = vport->phba; 4886 /* 4887 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4888 */ 4889 4890 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4891 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4892 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4893 4894 memset(fc_host_supported_fc4s(shost), 0, 4895 sizeof(fc_host_supported_fc4s(shost))); 4896 fc_host_supported_fc4s(shost)[2] = 1; 4897 fc_host_supported_fc4s(shost)[7] = 1; 4898 4899 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4900 sizeof fc_host_symbolic_name(shost)); 4901 4902 lpfc_host_supported_speeds_set(shost); 4903 4904 fc_host_maxframe_size(shost) = 4905 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4906 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4907 4908 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4909 4910 /* This value is also unchanging */ 4911 memset(fc_host_active_fc4s(shost), 0, 4912 sizeof(fc_host_active_fc4s(shost))); 4913 fc_host_active_fc4s(shost)[2] = 1; 4914 fc_host_active_fc4s(shost)[7] = 1; 4915 4916 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4917 spin_lock_irq(shost->host_lock); 4918 vport->load_flag &= ~FC_LOADING; 4919 spin_unlock_irq(shost->host_lock); 4920 } 4921 4922 /** 4923 * lpfc_stop_port_s3 - Stop SLI3 device port 4924 * @phba: pointer to lpfc hba data structure. 4925 * 4926 * This routine is invoked to stop an SLI3 device port, it stops the device 4927 * from generating interrupts and stops the device driver's timers for the 4928 * device. 4929 **/ 4930 static void 4931 lpfc_stop_port_s3(struct lpfc_hba *phba) 4932 { 4933 /* Clear all interrupt enable conditions */ 4934 writel(0, phba->HCregaddr); 4935 readl(phba->HCregaddr); /* flush */ 4936 /* Clear all pending interrupts */ 4937 writel(0xffffffff, phba->HAregaddr); 4938 readl(phba->HAregaddr); /* flush */ 4939 4940 /* Reset some HBA SLI setup states */ 4941 lpfc_stop_hba_timers(phba); 4942 phba->pport->work_port_events = 0; 4943 } 4944 4945 /** 4946 * lpfc_stop_port_s4 - Stop SLI4 device port 4947 * @phba: pointer to lpfc hba data structure. 4948 * 4949 * This routine is invoked to stop an SLI4 device port, it stops the device 4950 * from generating interrupts and stops the device driver's timers for the 4951 * device. 4952 **/ 4953 static void 4954 lpfc_stop_port_s4(struct lpfc_hba *phba) 4955 { 4956 /* Reset some HBA SLI4 setup states */ 4957 lpfc_stop_hba_timers(phba); 4958 if (phba->pport) 4959 phba->pport->work_port_events = 0; 4960 phba->sli4_hba.intr_enable = 0; 4961 } 4962 4963 /** 4964 * lpfc_stop_port - Wrapper function for stopping hba port 4965 * @phba: Pointer to HBA context object. 4966 * 4967 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4968 * the API jump table function pointer from the lpfc_hba struct. 4969 **/ 4970 void 4971 lpfc_stop_port(struct lpfc_hba *phba) 4972 { 4973 phba->lpfc_stop_port(phba); 4974 4975 if (phba->wq) 4976 flush_workqueue(phba->wq); 4977 } 4978 4979 /** 4980 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4981 * @phba: Pointer to hba for which this call is being executed. 4982 * 4983 * This routine starts the timer waiting for the FCF rediscovery to complete. 4984 **/ 4985 void 4986 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4987 { 4988 unsigned long fcf_redisc_wait_tmo = 4989 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4990 /* Start fcf rediscovery wait period timer */ 4991 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4992 spin_lock_irq(&phba->hbalock); 4993 /* Allow action to new fcf asynchronous event */ 4994 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4995 /* Mark the FCF rediscovery pending state */ 4996 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4997 spin_unlock_irq(&phba->hbalock); 4998 } 4999 5000 /** 5001 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 5002 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5003 * 5004 * This routine is invoked when waiting for FCF table rediscover has been 5005 * timed out. If new FCF record(s) has (have) been discovered during the 5006 * wait period, a new FCF event shall be added to the FCOE async event 5007 * list, and then worker thread shall be waked up for processing from the 5008 * worker thread context. 5009 **/ 5010 static void 5011 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 5012 { 5013 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 5014 5015 /* Don't send FCF rediscovery event if timer cancelled */ 5016 spin_lock_irq(&phba->hbalock); 5017 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 5018 spin_unlock_irq(&phba->hbalock); 5019 return; 5020 } 5021 /* Clear FCF rediscovery timer pending flag */ 5022 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 5023 /* FCF rediscovery event to worker thread */ 5024 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 5025 spin_unlock_irq(&phba->hbalock); 5026 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 5027 "2776 FCF rediscover quiescent timer expired\n"); 5028 /* wake up worker thread */ 5029 lpfc_worker_wake_up(phba); 5030 } 5031 5032 /** 5033 * lpfc_vmid_poll - VMID timeout detection 5034 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5035 * 5036 * This routine is invoked when there is no I/O on by a VM for the specified 5037 * amount of time. When this situation is detected, the VMID has to be 5038 * deregistered from the switch and all the local resources freed. The VMID 5039 * will be reassigned to the VM once the I/O begins. 5040 **/ 5041 static void 5042 lpfc_vmid_poll(struct timer_list *t) 5043 { 5044 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5045 u32 wake_up = 0; 5046 5047 /* check if there is a need to issue QFPA */ 5048 if (phba->pport->vmid_priority_tagging) { 5049 wake_up = 1; 5050 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5051 } 5052 5053 /* Is the vmid inactivity timer enabled */ 5054 if (phba->pport->vmid_inactivity_timeout || 5055 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 5056 wake_up = 1; 5057 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5058 } 5059 5060 if (wake_up) 5061 lpfc_worker_wake_up(phba); 5062 5063 /* restart the timer for the next iteration */ 5064 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5065 LPFC_VMID_TIMER)); 5066 } 5067 5068 /** 5069 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5070 * @phba: pointer to lpfc hba data structure. 5071 * @acqe_link: pointer to the async link completion queue entry. 5072 * 5073 * This routine is to parse the SLI4 link-attention link fault code. 5074 **/ 5075 static void 5076 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5077 struct lpfc_acqe_link *acqe_link) 5078 { 5079 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5080 case LPFC_ASYNC_LINK_FAULT_NONE: 5081 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5082 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5083 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5084 break; 5085 default: 5086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5087 "0398 Unknown link fault code: x%x\n", 5088 bf_get(lpfc_acqe_link_fault, acqe_link)); 5089 break; 5090 } 5091 } 5092 5093 /** 5094 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5095 * @phba: pointer to lpfc hba data structure. 5096 * @acqe_link: pointer to the async link completion queue entry. 5097 * 5098 * This routine is to parse the SLI4 link attention type and translate it 5099 * into the base driver's link attention type coding. 5100 * 5101 * Return: Link attention type in terms of base driver's coding. 5102 **/ 5103 static uint8_t 5104 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5105 struct lpfc_acqe_link *acqe_link) 5106 { 5107 uint8_t att_type; 5108 5109 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5110 case LPFC_ASYNC_LINK_STATUS_DOWN: 5111 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5112 att_type = LPFC_ATT_LINK_DOWN; 5113 break; 5114 case LPFC_ASYNC_LINK_STATUS_UP: 5115 /* Ignore physical link up events - wait for logical link up */ 5116 att_type = LPFC_ATT_RESERVED; 5117 break; 5118 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5119 att_type = LPFC_ATT_LINK_UP; 5120 break; 5121 default: 5122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5123 "0399 Invalid link attention type: x%x\n", 5124 bf_get(lpfc_acqe_link_status, acqe_link)); 5125 att_type = LPFC_ATT_RESERVED; 5126 break; 5127 } 5128 return att_type; 5129 } 5130 5131 /** 5132 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5133 * @phba: pointer to lpfc hba data structure. 5134 * 5135 * This routine is to get an SLI3 FC port's link speed in Mbps. 5136 * 5137 * Return: link speed in terms of Mbps. 5138 **/ 5139 uint32_t 5140 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5141 { 5142 uint32_t link_speed; 5143 5144 if (!lpfc_is_link_up(phba)) 5145 return 0; 5146 5147 if (phba->sli_rev <= LPFC_SLI_REV3) { 5148 switch (phba->fc_linkspeed) { 5149 case LPFC_LINK_SPEED_1GHZ: 5150 link_speed = 1000; 5151 break; 5152 case LPFC_LINK_SPEED_2GHZ: 5153 link_speed = 2000; 5154 break; 5155 case LPFC_LINK_SPEED_4GHZ: 5156 link_speed = 4000; 5157 break; 5158 case LPFC_LINK_SPEED_8GHZ: 5159 link_speed = 8000; 5160 break; 5161 case LPFC_LINK_SPEED_10GHZ: 5162 link_speed = 10000; 5163 break; 5164 case LPFC_LINK_SPEED_16GHZ: 5165 link_speed = 16000; 5166 break; 5167 default: 5168 link_speed = 0; 5169 } 5170 } else { 5171 if (phba->sli4_hba.link_state.logical_speed) 5172 link_speed = 5173 phba->sli4_hba.link_state.logical_speed; 5174 else 5175 link_speed = phba->sli4_hba.link_state.speed; 5176 } 5177 return link_speed; 5178 } 5179 5180 /** 5181 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5182 * @phba: pointer to lpfc hba data structure. 5183 * @evt_code: asynchronous event code. 5184 * @speed_code: asynchronous event link speed code. 5185 * 5186 * This routine is to parse the giving SLI4 async event link speed code into 5187 * value of Mbps for the link speed. 5188 * 5189 * Return: link speed in terms of Mbps. 5190 **/ 5191 static uint32_t 5192 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5193 uint8_t speed_code) 5194 { 5195 uint32_t port_speed; 5196 5197 switch (evt_code) { 5198 case LPFC_TRAILER_CODE_LINK: 5199 switch (speed_code) { 5200 case LPFC_ASYNC_LINK_SPEED_ZERO: 5201 port_speed = 0; 5202 break; 5203 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5204 port_speed = 10; 5205 break; 5206 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5207 port_speed = 100; 5208 break; 5209 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5210 port_speed = 1000; 5211 break; 5212 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5213 port_speed = 10000; 5214 break; 5215 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5216 port_speed = 20000; 5217 break; 5218 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5219 port_speed = 25000; 5220 break; 5221 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5222 port_speed = 40000; 5223 break; 5224 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5225 port_speed = 100000; 5226 break; 5227 default: 5228 port_speed = 0; 5229 } 5230 break; 5231 case LPFC_TRAILER_CODE_FC: 5232 switch (speed_code) { 5233 case LPFC_FC_LA_SPEED_UNKNOWN: 5234 port_speed = 0; 5235 break; 5236 case LPFC_FC_LA_SPEED_1G: 5237 port_speed = 1000; 5238 break; 5239 case LPFC_FC_LA_SPEED_2G: 5240 port_speed = 2000; 5241 break; 5242 case LPFC_FC_LA_SPEED_4G: 5243 port_speed = 4000; 5244 break; 5245 case LPFC_FC_LA_SPEED_8G: 5246 port_speed = 8000; 5247 break; 5248 case LPFC_FC_LA_SPEED_10G: 5249 port_speed = 10000; 5250 break; 5251 case LPFC_FC_LA_SPEED_16G: 5252 port_speed = 16000; 5253 break; 5254 case LPFC_FC_LA_SPEED_32G: 5255 port_speed = 32000; 5256 break; 5257 case LPFC_FC_LA_SPEED_64G: 5258 port_speed = 64000; 5259 break; 5260 case LPFC_FC_LA_SPEED_128G: 5261 port_speed = 128000; 5262 break; 5263 case LPFC_FC_LA_SPEED_256G: 5264 port_speed = 256000; 5265 break; 5266 default: 5267 port_speed = 0; 5268 } 5269 break; 5270 default: 5271 port_speed = 0; 5272 } 5273 return port_speed; 5274 } 5275 5276 /** 5277 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5278 * @phba: pointer to lpfc hba data structure. 5279 * @acqe_link: pointer to the async link completion queue entry. 5280 * 5281 * This routine is to handle the SLI4 asynchronous FCoE link event. 5282 **/ 5283 static void 5284 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5285 struct lpfc_acqe_link *acqe_link) 5286 { 5287 struct lpfc_dmabuf *mp; 5288 LPFC_MBOXQ_t *pmb; 5289 MAILBOX_t *mb; 5290 struct lpfc_mbx_read_top *la; 5291 uint8_t att_type; 5292 int rc; 5293 5294 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5295 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5296 return; 5297 phba->fcoe_eventtag = acqe_link->event_tag; 5298 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5299 if (!pmb) { 5300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5301 "0395 The mboxq allocation failed\n"); 5302 return; 5303 } 5304 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5305 if (!mp) { 5306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5307 "0396 The lpfc_dmabuf allocation failed\n"); 5308 goto out_free_pmb; 5309 } 5310 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5311 if (!mp->virt) { 5312 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5313 "0397 The mbuf allocation failed\n"); 5314 goto out_free_dmabuf; 5315 } 5316 5317 /* Cleanup any outstanding ELS commands */ 5318 lpfc_els_flush_all_cmd(phba); 5319 5320 /* Block ELS IOCBs until we have done process link event */ 5321 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5322 5323 /* Update link event statistics */ 5324 phba->sli.slistat.link_event++; 5325 5326 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5327 lpfc_read_topology(phba, pmb, mp); 5328 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5329 pmb->vport = phba->pport; 5330 5331 /* Keep the link status for extra SLI4 state machine reference */ 5332 phba->sli4_hba.link_state.speed = 5333 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5334 bf_get(lpfc_acqe_link_speed, acqe_link)); 5335 phba->sli4_hba.link_state.duplex = 5336 bf_get(lpfc_acqe_link_duplex, acqe_link); 5337 phba->sli4_hba.link_state.status = 5338 bf_get(lpfc_acqe_link_status, acqe_link); 5339 phba->sli4_hba.link_state.type = 5340 bf_get(lpfc_acqe_link_type, acqe_link); 5341 phba->sli4_hba.link_state.number = 5342 bf_get(lpfc_acqe_link_number, acqe_link); 5343 phba->sli4_hba.link_state.fault = 5344 bf_get(lpfc_acqe_link_fault, acqe_link); 5345 phba->sli4_hba.link_state.logical_speed = 5346 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5347 5348 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5349 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5350 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5351 "Logical speed:%dMbps Fault:%d\n", 5352 phba->sli4_hba.link_state.speed, 5353 phba->sli4_hba.link_state.topology, 5354 phba->sli4_hba.link_state.status, 5355 phba->sli4_hba.link_state.type, 5356 phba->sli4_hba.link_state.number, 5357 phba->sli4_hba.link_state.logical_speed, 5358 phba->sli4_hba.link_state.fault); 5359 /* 5360 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5361 * topology info. Note: Optional for non FC-AL ports. 5362 */ 5363 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5364 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5365 if (rc == MBX_NOT_FINISHED) { 5366 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5367 goto out_free_dmabuf; 5368 } 5369 return; 5370 } 5371 /* 5372 * For FCoE Mode: fill in all the topology information we need and call 5373 * the READ_TOPOLOGY completion routine to continue without actually 5374 * sending the READ_TOPOLOGY mailbox command to the port. 5375 */ 5376 /* Initialize completion status */ 5377 mb = &pmb->u.mb; 5378 mb->mbxStatus = MBX_SUCCESS; 5379 5380 /* Parse port fault information field */ 5381 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5382 5383 /* Parse and translate link attention fields */ 5384 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5385 la->eventTag = acqe_link->event_tag; 5386 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5387 bf_set(lpfc_mbx_read_top_link_spd, la, 5388 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5389 5390 /* Fake the the following irrelvant fields */ 5391 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5392 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5393 bf_set(lpfc_mbx_read_top_il, la, 0); 5394 bf_set(lpfc_mbx_read_top_pb, la, 0); 5395 bf_set(lpfc_mbx_read_top_fa, la, 0); 5396 bf_set(lpfc_mbx_read_top_mm, la, 0); 5397 5398 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5399 lpfc_mbx_cmpl_read_topology(phba, pmb); 5400 5401 return; 5402 5403 out_free_dmabuf: 5404 kfree(mp); 5405 out_free_pmb: 5406 mempool_free(pmb, phba->mbox_mem_pool); 5407 } 5408 5409 /** 5410 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5411 * topology. 5412 * @phba: pointer to lpfc hba data structure. 5413 * @speed_code: asynchronous event link speed code. 5414 * 5415 * This routine is to parse the giving SLI4 async event link speed code into 5416 * value of Read topology link speed. 5417 * 5418 * Return: link speed in terms of Read topology. 5419 **/ 5420 static uint8_t 5421 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5422 { 5423 uint8_t port_speed; 5424 5425 switch (speed_code) { 5426 case LPFC_FC_LA_SPEED_1G: 5427 port_speed = LPFC_LINK_SPEED_1GHZ; 5428 break; 5429 case LPFC_FC_LA_SPEED_2G: 5430 port_speed = LPFC_LINK_SPEED_2GHZ; 5431 break; 5432 case LPFC_FC_LA_SPEED_4G: 5433 port_speed = LPFC_LINK_SPEED_4GHZ; 5434 break; 5435 case LPFC_FC_LA_SPEED_8G: 5436 port_speed = LPFC_LINK_SPEED_8GHZ; 5437 break; 5438 case LPFC_FC_LA_SPEED_16G: 5439 port_speed = LPFC_LINK_SPEED_16GHZ; 5440 break; 5441 case LPFC_FC_LA_SPEED_32G: 5442 port_speed = LPFC_LINK_SPEED_32GHZ; 5443 break; 5444 case LPFC_FC_LA_SPEED_64G: 5445 port_speed = LPFC_LINK_SPEED_64GHZ; 5446 break; 5447 case LPFC_FC_LA_SPEED_128G: 5448 port_speed = LPFC_LINK_SPEED_128GHZ; 5449 break; 5450 case LPFC_FC_LA_SPEED_256G: 5451 port_speed = LPFC_LINK_SPEED_256GHZ; 5452 break; 5453 default: 5454 port_speed = 0; 5455 break; 5456 } 5457 5458 return port_speed; 5459 } 5460 5461 void 5462 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5463 { 5464 struct rxtable_entry *entry; 5465 int cnt = 0, head, tail, last, start; 5466 5467 head = atomic_read(&phba->rxtable_idx_head); 5468 tail = atomic_read(&phba->rxtable_idx_tail); 5469 if (!phba->rxtable || head == tail) { 5470 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 5471 "4411 Rxtable is empty\n"); 5472 return; 5473 } 5474 last = tail; 5475 start = head; 5476 5477 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ 5478 while (start != last) { 5479 if (start) 5480 start--; 5481 else 5482 start = LPFC_MAX_RXMONITOR_ENTRY - 1; 5483 entry = &phba->rxtable[start]; 5484 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5485 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " 5486 "Lat %lld ASz %lld Info %02d BWUtil %d " 5487 "Int %d slot %d\n", 5488 cnt, entry->max_bytes_per_interval, 5489 entry->total_bytes, entry->rcv_bytes, 5490 entry->avg_io_latency, entry->avg_io_size, 5491 entry->cmf_info, entry->timer_utilization, 5492 entry->timer_interval, start); 5493 cnt++; 5494 if (cnt >= LPFC_MAX_RXMONITOR_DUMP) 5495 return; 5496 } 5497 } 5498 5499 /** 5500 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5501 * @phba: pointer to lpfc hba data structure. 5502 * @dtag: FPIN descriptor received 5503 * 5504 * Increment the FPIN received counter/time when it happens. 5505 */ 5506 void 5507 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5508 { 5509 struct lpfc_cgn_info *cp; 5510 struct tm broken; 5511 struct timespec64 cur_time; 5512 u32 cnt; 5513 u16 value; 5514 5515 /* Make sure we have a congestion info buffer */ 5516 if (!phba->cgn_i) 5517 return; 5518 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5519 ktime_get_real_ts64(&cur_time); 5520 time64_to_tm(cur_time.tv_sec, 0, &broken); 5521 5522 /* Update congestion statistics */ 5523 switch (dtag) { 5524 case ELS_DTAG_LNK_INTEGRITY: 5525 cnt = le32_to_cpu(cp->link_integ_notification); 5526 cnt++; 5527 cp->link_integ_notification = cpu_to_le32(cnt); 5528 5529 cp->cgn_stat_lnk_month = broken.tm_mon + 1; 5530 cp->cgn_stat_lnk_day = broken.tm_mday; 5531 cp->cgn_stat_lnk_year = broken.tm_year - 100; 5532 cp->cgn_stat_lnk_hour = broken.tm_hour; 5533 cp->cgn_stat_lnk_min = broken.tm_min; 5534 cp->cgn_stat_lnk_sec = broken.tm_sec; 5535 break; 5536 case ELS_DTAG_DELIVERY: 5537 cnt = le32_to_cpu(cp->delivery_notification); 5538 cnt++; 5539 cp->delivery_notification = cpu_to_le32(cnt); 5540 5541 cp->cgn_stat_del_month = broken.tm_mon + 1; 5542 cp->cgn_stat_del_day = broken.tm_mday; 5543 cp->cgn_stat_del_year = broken.tm_year - 100; 5544 cp->cgn_stat_del_hour = broken.tm_hour; 5545 cp->cgn_stat_del_min = broken.tm_min; 5546 cp->cgn_stat_del_sec = broken.tm_sec; 5547 break; 5548 case ELS_DTAG_PEER_CONGEST: 5549 cnt = le32_to_cpu(cp->cgn_peer_notification); 5550 cnt++; 5551 cp->cgn_peer_notification = cpu_to_le32(cnt); 5552 5553 cp->cgn_stat_peer_month = broken.tm_mon + 1; 5554 cp->cgn_stat_peer_day = broken.tm_mday; 5555 cp->cgn_stat_peer_year = broken.tm_year - 100; 5556 cp->cgn_stat_peer_hour = broken.tm_hour; 5557 cp->cgn_stat_peer_min = broken.tm_min; 5558 cp->cgn_stat_peer_sec = broken.tm_sec; 5559 break; 5560 case ELS_DTAG_CONGESTION: 5561 cnt = le32_to_cpu(cp->cgn_notification); 5562 cnt++; 5563 cp->cgn_notification = cpu_to_le32(cnt); 5564 5565 cp->cgn_stat_cgn_month = broken.tm_mon + 1; 5566 cp->cgn_stat_cgn_day = broken.tm_mday; 5567 cp->cgn_stat_cgn_year = broken.tm_year - 100; 5568 cp->cgn_stat_cgn_hour = broken.tm_hour; 5569 cp->cgn_stat_cgn_min = broken.tm_min; 5570 cp->cgn_stat_cgn_sec = broken.tm_sec; 5571 } 5572 if (phba->cgn_fpin_frequency && 5573 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5574 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5575 cp->cgn_stat_npm = value; 5576 } 5577 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5578 LPFC_CGN_CRC32_SEED); 5579 cp->cgn_info_crc = cpu_to_le32(value); 5580 } 5581 5582 /** 5583 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer 5584 * @phba: pointer to lpfc hba data structure. 5585 * 5586 * Save the congestion event data every minute. 5587 * On the hour collapse all the minute data into hour data. Every day 5588 * collapse all the hour data into daily data. Separate driver 5589 * and fabrc congestion event counters that will be saved out 5590 * to the registered congestion buffer every minute. 5591 */ 5592 static void 5593 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) 5594 { 5595 struct lpfc_cgn_info *cp; 5596 struct tm broken; 5597 struct timespec64 cur_time; 5598 uint32_t i, index; 5599 uint16_t value, mvalue; 5600 uint64_t bps; 5601 uint32_t mbps; 5602 uint32_t dvalue, wvalue, lvalue, avalue; 5603 uint64_t latsum; 5604 __le16 *ptr; 5605 __le32 *lptr; 5606 __le16 *mptr; 5607 5608 /* Make sure we have a congestion info buffer */ 5609 if (!phba->cgn_i) 5610 return; 5611 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5612 5613 if (time_before(jiffies, phba->cgn_evt_timestamp)) 5614 return; 5615 phba->cgn_evt_timestamp = jiffies + 5616 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5617 phba->cgn_evt_minute++; 5618 5619 /* We should get to this point in the routine on 1 minute intervals */ 5620 5621 ktime_get_real_ts64(&cur_time); 5622 time64_to_tm(cur_time.tv_sec, 0, &broken); 5623 5624 if (phba->cgn_fpin_frequency && 5625 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5626 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5627 cp->cgn_stat_npm = value; 5628 } 5629 5630 /* Read and clear the latency counters for this minute */ 5631 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5632 latsum = atomic64_read(&phba->cgn_latency_evt); 5633 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5634 atomic64_set(&phba->cgn_latency_evt, 0); 5635 5636 /* We need to store MB/sec bandwidth in the congestion information. 5637 * block_cnt is count of 512 byte blocks for the entire minute, 5638 * bps will get bytes per sec before finally converting to MB/sec. 5639 */ 5640 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5641 phba->rx_block_cnt = 0; 5642 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5643 5644 /* Every minute */ 5645 /* cgn parameters */ 5646 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5647 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5648 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5649 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5650 5651 /* Fill in default LUN qdepth */ 5652 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5653 cp->cgn_lunq = cpu_to_le16(value); 5654 5655 /* Record congestion buffer info - every minute 5656 * cgn_driver_evt_cnt (Driver events) 5657 * cgn_fabric_warn_cnt (Congestion Warnings) 5658 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5659 * cgn_fabric_alarm_cnt (Congestion Alarms) 5660 */ 5661 index = ++cp->cgn_index_minute; 5662 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5663 cp->cgn_index_minute = 0; 5664 index = 0; 5665 } 5666 5667 /* Get the number of driver events in this sample and reset counter */ 5668 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5669 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5670 5671 /* Get the number of warning events - FPIN and Signal for this minute */ 5672 wvalue = 0; 5673 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5674 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5675 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5676 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5677 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5678 5679 /* Get the number of alarm events - FPIN and Signal for this minute */ 5680 avalue = 0; 5681 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5682 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5683 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5684 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5685 5686 /* Collect the driver, warning, alarm and latency counts for this 5687 * minute into the driver congestion buffer. 5688 */ 5689 ptr = &cp->cgn_drvr_min[index]; 5690 value = (uint16_t)dvalue; 5691 *ptr = cpu_to_le16(value); 5692 5693 ptr = &cp->cgn_warn_min[index]; 5694 value = (uint16_t)wvalue; 5695 *ptr = cpu_to_le16(value); 5696 5697 ptr = &cp->cgn_alarm_min[index]; 5698 value = (uint16_t)avalue; 5699 *ptr = cpu_to_le16(value); 5700 5701 lptr = &cp->cgn_latency_min[index]; 5702 if (lvalue) { 5703 lvalue = (uint32_t)div_u64(latsum, lvalue); 5704 *lptr = cpu_to_le32(lvalue); 5705 } else { 5706 *lptr = 0; 5707 } 5708 5709 /* Collect the bandwidth value into the driver's congesion buffer. */ 5710 mptr = &cp->cgn_bw_min[index]; 5711 *mptr = cpu_to_le16(mvalue); 5712 5713 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5714 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5715 index, dvalue, wvalue, *lptr, mvalue, avalue); 5716 5717 /* Every hour */ 5718 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5719 /* Record congestion buffer info - every hour 5720 * Collapse all minutes into an hour 5721 */ 5722 index = ++cp->cgn_index_hour; 5723 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5724 cp->cgn_index_hour = 0; 5725 index = 0; 5726 } 5727 5728 dvalue = 0; 5729 wvalue = 0; 5730 lvalue = 0; 5731 avalue = 0; 5732 mvalue = 0; 5733 mbps = 0; 5734 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5735 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5736 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5737 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5738 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5739 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5740 } 5741 if (lvalue) /* Avg of latency averages */ 5742 lvalue /= LPFC_MIN_HOUR; 5743 if (mbps) /* Avg of Bandwidth averages */ 5744 mvalue = mbps / LPFC_MIN_HOUR; 5745 5746 lptr = &cp->cgn_drvr_hr[index]; 5747 *lptr = cpu_to_le32(dvalue); 5748 lptr = &cp->cgn_warn_hr[index]; 5749 *lptr = cpu_to_le32(wvalue); 5750 lptr = &cp->cgn_latency_hr[index]; 5751 *lptr = cpu_to_le32(lvalue); 5752 mptr = &cp->cgn_bw_hr[index]; 5753 *mptr = cpu_to_le16(mvalue); 5754 lptr = &cp->cgn_alarm_hr[index]; 5755 *lptr = cpu_to_le32(avalue); 5756 5757 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5758 "2419 Congestion Info - hour " 5759 "(%d): %d %d %d %d %d\n", 5760 index, dvalue, wvalue, lvalue, mvalue, avalue); 5761 } 5762 5763 /* Every day */ 5764 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5765 /* Record congestion buffer info - every hour 5766 * Collapse all hours into a day. Rotate days 5767 * after LPFC_MAX_CGN_DAYS. 5768 */ 5769 index = ++cp->cgn_index_day; 5770 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5771 cp->cgn_index_day = 0; 5772 index = 0; 5773 } 5774 5775 /* Anytime we overwrite daily index 0, after we wrap, 5776 * we will be overwriting the oldest day, so we must 5777 * update the congestion data start time for that day. 5778 * That start time should have previously been saved after 5779 * we wrote the last days worth of data. 5780 */ 5781 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { 5782 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); 5783 5784 cp->cgn_info_month = broken.tm_mon + 1; 5785 cp->cgn_info_day = broken.tm_mday; 5786 cp->cgn_info_year = broken.tm_year - 100; 5787 cp->cgn_info_hour = broken.tm_hour; 5788 cp->cgn_info_minute = broken.tm_min; 5789 cp->cgn_info_second = broken.tm_sec; 5790 5791 lpfc_printf_log 5792 (phba, KERN_INFO, LOG_CGN_MGMT, 5793 "2646 CGNInfo idx0 Start Time: " 5794 "%d/%d/%d %d:%d:%d\n", 5795 cp->cgn_info_day, cp->cgn_info_month, 5796 cp->cgn_info_year, cp->cgn_info_hour, 5797 cp->cgn_info_minute, cp->cgn_info_second); 5798 } 5799 5800 dvalue = 0; 5801 wvalue = 0; 5802 lvalue = 0; 5803 mvalue = 0; 5804 mbps = 0; 5805 avalue = 0; 5806 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5807 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5808 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5809 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5810 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5811 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5812 } 5813 if (lvalue) /* Avg of latency averages */ 5814 lvalue /= LPFC_HOUR_DAY; 5815 if (mbps) /* Avg of Bandwidth averages */ 5816 mvalue = mbps / LPFC_HOUR_DAY; 5817 5818 lptr = &cp->cgn_drvr_day[index]; 5819 *lptr = cpu_to_le32(dvalue); 5820 lptr = &cp->cgn_warn_day[index]; 5821 *lptr = cpu_to_le32(wvalue); 5822 lptr = &cp->cgn_latency_day[index]; 5823 *lptr = cpu_to_le32(lvalue); 5824 mptr = &cp->cgn_bw_day[index]; 5825 *mptr = cpu_to_le16(mvalue); 5826 lptr = &cp->cgn_alarm_day[index]; 5827 *lptr = cpu_to_le32(avalue); 5828 5829 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5830 "2420 Congestion Info - daily (%d): " 5831 "%d %d %d %d %d\n", 5832 index, dvalue, wvalue, lvalue, mvalue, avalue); 5833 5834 /* We just wrote LPFC_MAX_CGN_DAYS of data, 5835 * so we are wrapped on any data after this. 5836 * Save this as the start time for the next day. 5837 */ 5838 if (index == (LPFC_MAX_CGN_DAYS - 1)) { 5839 phba->hba_flag |= HBA_CGN_DAY_WRAP; 5840 ktime_get_real_ts64(&phba->cgn_daily_ts); 5841 } 5842 } 5843 5844 /* Use the frequency found in the last rcv'ed FPIN */ 5845 value = phba->cgn_fpin_frequency; 5846 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) 5847 cp->cgn_warn_freq = cpu_to_le16(value); 5848 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) 5849 cp->cgn_alarm_freq = cpu_to_le16(value); 5850 5851 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 5852 * are received by the HBA 5853 */ 5854 value = phba->cgn_sig_freq; 5855 5856 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5857 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5858 cp->cgn_warn_freq = cpu_to_le16(value); 5859 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5860 cp->cgn_alarm_freq = cpu_to_le16(value); 5861 5862 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5863 LPFC_CGN_CRC32_SEED); 5864 cp->cgn_info_crc = cpu_to_le32(lvalue); 5865 } 5866 5867 /** 5868 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5869 * @phba: The Hba for which this call is being executed. 5870 * 5871 * The routine calculates the latency from the beginning of the CMF timer 5872 * interval to the current point in time. It is called from IO completion 5873 * when we exceed our Bandwidth limitation for the time interval. 5874 */ 5875 uint32_t 5876 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5877 { 5878 struct timespec64 cmpl_time; 5879 uint32_t msec = 0; 5880 5881 ktime_get_real_ts64(&cmpl_time); 5882 5883 /* This routine works on a ms granularity so sec and usec are 5884 * converted accordingly. 5885 */ 5886 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5887 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5888 NSEC_PER_MSEC; 5889 } else { 5890 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5891 msec = (cmpl_time.tv_sec - 5892 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5893 msec += ((cmpl_time.tv_nsec - 5894 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5895 } else { 5896 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5897 1) * MSEC_PER_SEC; 5898 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5899 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5900 } 5901 } 5902 return msec; 5903 } 5904 5905 /** 5906 * lpfc_cmf_timer - This is the timer function for one congestion 5907 * rate interval. 5908 * @timer: Pointer to the high resolution timer that expired 5909 */ 5910 static enum hrtimer_restart 5911 lpfc_cmf_timer(struct hrtimer *timer) 5912 { 5913 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5914 cmf_timer); 5915 struct rxtable_entry *entry; 5916 uint32_t io_cnt; 5917 uint32_t head, tail; 5918 uint32_t busy, max_read; 5919 uint64_t total, rcv, lat, mbpi, extra, cnt; 5920 int timer_interval = LPFC_CMF_INTERVAL; 5921 uint32_t ms; 5922 struct lpfc_cgn_stat *cgs; 5923 int cpu; 5924 5925 /* Only restart the timer if congestion mgmt is on */ 5926 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5927 !phba->cmf_latency.tv_sec) { 5928 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5929 "6224 CMF timer exit: %d %lld\n", 5930 phba->cmf_active_mode, 5931 (uint64_t)phba->cmf_latency.tv_sec); 5932 return HRTIMER_NORESTART; 5933 } 5934 5935 /* If pport is not ready yet, just exit and wait for 5936 * the next timer cycle to hit. 5937 */ 5938 if (!phba->pport) 5939 goto skip; 5940 5941 /* Do not block SCSI IO while in the timer routine since 5942 * total_bytes will be cleared 5943 */ 5944 atomic_set(&phba->cmf_stop_io, 1); 5945 5946 /* First we need to calculate the actual ms between 5947 * the last timer interrupt and this one. We ask for 5948 * LPFC_CMF_INTERVAL, however the actual time may 5949 * vary depending on system overhead. 5950 */ 5951 ms = lpfc_calc_cmf_latency(phba); 5952 5953 5954 /* Immediately after we calculate the time since the last 5955 * timer interrupt, set the start time for the next 5956 * interrupt 5957 */ 5958 ktime_get_real_ts64(&phba->cmf_latency); 5959 5960 phba->cmf_link_byte_count = 5961 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 5962 5963 /* Collect all the stats from the prior timer interval */ 5964 total = 0; 5965 io_cnt = 0; 5966 lat = 0; 5967 rcv = 0; 5968 for_each_present_cpu(cpu) { 5969 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 5970 total += atomic64_xchg(&cgs->total_bytes, 0); 5971 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 5972 lat += atomic64_xchg(&cgs->rx_latency, 0); 5973 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 5974 } 5975 5976 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 5977 * returned from the last CMF_SYNC_WQE issued, from 5978 * cmf_last_sync_bw. This will be the target BW for 5979 * this next timer interval. 5980 */ 5981 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 5982 phba->link_state != LPFC_LINK_DOWN && 5983 phba->hba_flag & HBA_SETUP) { 5984 mbpi = phba->cmf_last_sync_bw; 5985 phba->cmf_last_sync_bw = 0; 5986 extra = 0; 5987 5988 /* Calculate any extra bytes needed to account for the 5989 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 5990 * calculate the adjustment needed for total to reflect 5991 * a full LPFC_CMF_INTERVAL. 5992 */ 5993 if (ms && ms < LPFC_CMF_INTERVAL) { 5994 cnt = div_u64(total, ms); /* bytes per ms */ 5995 cnt *= LPFC_CMF_INTERVAL; /* what total should be */ 5996 5997 /* If the timeout is scheduled to be shorter, 5998 * this value may skew the data, so cap it at mbpi. 5999 */ 6000 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) 6001 cnt = mbpi; 6002 6003 extra = cnt - total; 6004 } 6005 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 6006 } else { 6007 /* For Monitor mode or link down we want mbpi 6008 * to be the full link speed 6009 */ 6010 mbpi = phba->cmf_link_byte_count; 6011 extra = 0; 6012 } 6013 phba->cmf_timer_cnt++; 6014 6015 if (io_cnt) { 6016 /* Update congestion info buffer latency in us */ 6017 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 6018 atomic64_add(lat, &phba->cgn_latency_evt); 6019 } 6020 busy = atomic_xchg(&phba->cmf_busy, 0); 6021 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 6022 6023 /* Calculate MBPI for the next timer interval */ 6024 if (mbpi) { 6025 if (mbpi > phba->cmf_link_byte_count || 6026 phba->cmf_active_mode == LPFC_CFG_MONITOR) 6027 mbpi = phba->cmf_link_byte_count; 6028 6029 /* Change max_bytes_per_interval to what the prior 6030 * CMF_SYNC_WQE cmpl indicated. 6031 */ 6032 if (mbpi != phba->cmf_max_bytes_per_interval) 6033 phba->cmf_max_bytes_per_interval = mbpi; 6034 } 6035 6036 /* Save rxmonitor information for debug */ 6037 if (phba->rxtable) { 6038 head = atomic_xchg(&phba->rxtable_idx_head, 6039 LPFC_RXMONITOR_TABLE_IN_USE); 6040 entry = &phba->rxtable[head]; 6041 entry->total_bytes = total; 6042 entry->cmf_bytes = total + extra; 6043 entry->rcv_bytes = rcv; 6044 entry->cmf_busy = busy; 6045 entry->cmf_info = phba->cmf_active_info; 6046 if (io_cnt) { 6047 entry->avg_io_latency = div_u64(lat, io_cnt); 6048 entry->avg_io_size = div_u64(rcv, io_cnt); 6049 } else { 6050 entry->avg_io_latency = 0; 6051 entry->avg_io_size = 0; 6052 } 6053 entry->max_read_cnt = max_read; 6054 entry->io_cnt = io_cnt; 6055 entry->max_bytes_per_interval = mbpi; 6056 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6057 entry->timer_utilization = phba->cmf_last_ts; 6058 else 6059 entry->timer_utilization = ms; 6060 entry->timer_interval = ms; 6061 phba->cmf_last_ts = 0; 6062 6063 /* Increment rxtable index */ 6064 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6065 tail = atomic_read(&phba->rxtable_idx_tail); 6066 if (head == tail) { 6067 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6068 atomic_set(&phba->rxtable_idx_tail, tail); 6069 } 6070 atomic_set(&phba->rxtable_idx_head, head); 6071 } 6072 6073 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6074 /* If Monitor mode, check if we are oversubscribed 6075 * against the full line rate. 6076 */ 6077 if (mbpi && total > mbpi) 6078 atomic_inc(&phba->cgn_driver_evt_cnt); 6079 } 6080 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6081 6082 /* Each minute save Fabric and Driver congestion information */ 6083 lpfc_cgn_save_evt_cnt(phba); 6084 6085 phba->hba_flag &= ~HBA_SHORT_CMF; 6086 6087 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the 6088 * minute, adjust our next timer interval, if needed, to ensure a 6089 * 1 minute granularity when we get the next timer interrupt. 6090 */ 6091 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), 6092 phba->cgn_evt_timestamp)) { 6093 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - 6094 jiffies); 6095 if (timer_interval <= 0) 6096 timer_interval = LPFC_CMF_INTERVAL; 6097 else 6098 phba->hba_flag |= HBA_SHORT_CMF; 6099 6100 /* If we adjust timer_interval, max_bytes_per_interval 6101 * needs to be adjusted as well. 6102 */ 6103 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 6104 timer_interval, 1000); 6105 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) 6106 phba->cmf_max_bytes_per_interval = 6107 phba->cmf_link_byte_count; 6108 } 6109 6110 /* Since total_bytes has already been zero'ed, its okay to unblock 6111 * after max_bytes_per_interval is setup. 6112 */ 6113 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6114 queue_work(phba->wq, &phba->unblock_request_work); 6115 6116 /* SCSI IO is now unblocked */ 6117 atomic_set(&phba->cmf_stop_io, 0); 6118 6119 skip: 6120 hrtimer_forward_now(timer, 6121 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6122 return HRTIMER_RESTART; 6123 } 6124 6125 #define trunk_link_status(__idx)\ 6126 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6127 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6128 "Link up" : "Link down") : "NA" 6129 /* Did port __idx reported an error */ 6130 #define trunk_port_fault(__idx)\ 6131 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6132 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6133 6134 static void 6135 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6136 struct lpfc_acqe_fc_la *acqe_fc) 6137 { 6138 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6139 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6140 6141 phba->sli4_hba.link_state.speed = 6142 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6143 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6144 6145 phba->sli4_hba.link_state.logical_speed = 6146 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6147 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6148 phba->fc_linkspeed = 6149 lpfc_async_link_speed_to_read_top( 6150 phba, 6151 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6152 6153 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6154 phba->trunk_link.link0.state = 6155 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6156 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6157 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6158 } 6159 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6160 phba->trunk_link.link1.state = 6161 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6162 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6163 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6164 } 6165 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6166 phba->trunk_link.link2.state = 6167 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6168 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6169 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6170 } 6171 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6172 phba->trunk_link.link3.state = 6173 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6174 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6175 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6176 } 6177 6178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6179 "2910 Async FC Trunking Event - Speed:%d\n" 6180 "\tLogical speed:%d " 6181 "port0: %s port1: %s port2: %s port3: %s\n", 6182 phba->sli4_hba.link_state.speed, 6183 phba->sli4_hba.link_state.logical_speed, 6184 trunk_link_status(0), trunk_link_status(1), 6185 trunk_link_status(2), trunk_link_status(3)); 6186 6187 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6188 lpfc_cmf_signal_init(phba); 6189 6190 if (port_fault) 6191 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6192 "3202 trunk error:0x%x (%s) seen on port0:%s " 6193 /* 6194 * SLI-4: We have only 0xA error codes 6195 * defined as of now. print an appropriate 6196 * message in case driver needs to be updated. 6197 */ 6198 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6199 "UNDEFINED. update driver." : trunk_errmsg[err], 6200 trunk_port_fault(0), trunk_port_fault(1), 6201 trunk_port_fault(2), trunk_port_fault(3)); 6202 } 6203 6204 6205 /** 6206 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6207 * @phba: pointer to lpfc hba data structure. 6208 * @acqe_fc: pointer to the async fc completion queue entry. 6209 * 6210 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6211 * that the event was received and then issue a read_topology mailbox command so 6212 * that the rest of the driver will treat it the same as SLI3. 6213 **/ 6214 static void 6215 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6216 { 6217 struct lpfc_dmabuf *mp; 6218 LPFC_MBOXQ_t *pmb; 6219 MAILBOX_t *mb; 6220 struct lpfc_mbx_read_top *la; 6221 int rc; 6222 6223 if (bf_get(lpfc_trailer_type, acqe_fc) != 6224 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6226 "2895 Non FC link Event detected.(%d)\n", 6227 bf_get(lpfc_trailer_type, acqe_fc)); 6228 return; 6229 } 6230 6231 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6232 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6233 lpfc_update_trunk_link_status(phba, acqe_fc); 6234 return; 6235 } 6236 6237 /* Keep the link status for extra SLI4 state machine reference */ 6238 phba->sli4_hba.link_state.speed = 6239 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6240 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6241 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6242 phba->sli4_hba.link_state.topology = 6243 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6244 phba->sli4_hba.link_state.status = 6245 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6246 phba->sli4_hba.link_state.type = 6247 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6248 phba->sli4_hba.link_state.number = 6249 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6250 phba->sli4_hba.link_state.fault = 6251 bf_get(lpfc_acqe_link_fault, acqe_fc); 6252 6253 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6254 LPFC_FC_LA_TYPE_LINK_DOWN) 6255 phba->sli4_hba.link_state.logical_speed = 0; 6256 else if (!phba->sli4_hba.conf_trunk) 6257 phba->sli4_hba.link_state.logical_speed = 6258 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6259 6260 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6261 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6262 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6263 "%dMbps Fault:%d\n", 6264 phba->sli4_hba.link_state.speed, 6265 phba->sli4_hba.link_state.topology, 6266 phba->sli4_hba.link_state.status, 6267 phba->sli4_hba.link_state.type, 6268 phba->sli4_hba.link_state.number, 6269 phba->sli4_hba.link_state.logical_speed, 6270 phba->sli4_hba.link_state.fault); 6271 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6272 if (!pmb) { 6273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6274 "2897 The mboxq allocation failed\n"); 6275 return; 6276 } 6277 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6278 if (!mp) { 6279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6280 "2898 The lpfc_dmabuf allocation failed\n"); 6281 goto out_free_pmb; 6282 } 6283 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 6284 if (!mp->virt) { 6285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6286 "2899 The mbuf allocation failed\n"); 6287 goto out_free_dmabuf; 6288 } 6289 6290 /* Cleanup any outstanding ELS commands */ 6291 lpfc_els_flush_all_cmd(phba); 6292 6293 /* Block ELS IOCBs until we have done process link event */ 6294 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6295 6296 /* Update link event statistics */ 6297 phba->sli.slistat.link_event++; 6298 6299 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6300 lpfc_read_topology(phba, pmb, mp); 6301 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6302 pmb->vport = phba->pport; 6303 6304 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6305 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6306 6307 switch (phba->sli4_hba.link_state.status) { 6308 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6309 phba->link_flag |= LS_MDS_LINK_DOWN; 6310 break; 6311 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6312 phba->link_flag |= LS_MDS_LOOPBACK; 6313 break; 6314 default: 6315 break; 6316 } 6317 6318 /* Initialize completion status */ 6319 mb = &pmb->u.mb; 6320 mb->mbxStatus = MBX_SUCCESS; 6321 6322 /* Parse port fault information field */ 6323 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6324 6325 /* Parse and translate link attention fields */ 6326 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6327 la->eventTag = acqe_fc->event_tag; 6328 6329 if (phba->sli4_hba.link_state.status == 6330 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6331 bf_set(lpfc_mbx_read_top_att_type, la, 6332 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6333 } else { 6334 bf_set(lpfc_mbx_read_top_att_type, la, 6335 LPFC_FC_LA_TYPE_LINK_DOWN); 6336 } 6337 /* Invoke the mailbox command callback function */ 6338 lpfc_mbx_cmpl_read_topology(phba, pmb); 6339 6340 return; 6341 } 6342 6343 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6344 if (rc == MBX_NOT_FINISHED) { 6345 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6346 goto out_free_dmabuf; 6347 } 6348 return; 6349 6350 out_free_dmabuf: 6351 kfree(mp); 6352 out_free_pmb: 6353 mempool_free(pmb, phba->mbox_mem_pool); 6354 } 6355 6356 /** 6357 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6358 * @phba: pointer to lpfc hba data structure. 6359 * @acqe_sli: pointer to the async SLI completion queue entry. 6360 * 6361 * This routine is to handle the SLI4 asynchronous SLI events. 6362 **/ 6363 static void 6364 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6365 { 6366 char port_name; 6367 char message[128]; 6368 uint8_t status; 6369 uint8_t evt_type; 6370 uint8_t operational = 0; 6371 struct temp_event temp_event_data; 6372 struct lpfc_acqe_misconfigured_event *misconfigured; 6373 struct lpfc_acqe_cgn_signal *cgn_signal; 6374 struct Scsi_Host *shost; 6375 struct lpfc_vport **vports; 6376 int rc, i, cnt; 6377 6378 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6379 6380 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6381 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6382 "x%08x x%08x x%08x\n", evt_type, 6383 acqe_sli->event_data1, acqe_sli->event_data2, 6384 acqe_sli->reserved, acqe_sli->trailer); 6385 6386 port_name = phba->Port[0]; 6387 if (port_name == 0x00) 6388 port_name = '?'; /* get port name is empty */ 6389 6390 switch (evt_type) { 6391 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6392 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6393 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6394 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6395 6396 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6397 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6398 acqe_sli->event_data1, port_name); 6399 6400 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6401 shost = lpfc_shost_from_vport(phba->pport); 6402 fc_host_post_vendor_event(shost, fc_get_event_number(), 6403 sizeof(temp_event_data), 6404 (char *)&temp_event_data, 6405 SCSI_NL_VID_TYPE_PCI 6406 | PCI_VENDOR_ID_EMULEX); 6407 break; 6408 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6409 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6410 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6411 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6412 6413 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6414 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6415 acqe_sli->event_data1, port_name); 6416 6417 shost = lpfc_shost_from_vport(phba->pport); 6418 fc_host_post_vendor_event(shost, fc_get_event_number(), 6419 sizeof(temp_event_data), 6420 (char *)&temp_event_data, 6421 SCSI_NL_VID_TYPE_PCI 6422 | PCI_VENDOR_ID_EMULEX); 6423 break; 6424 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6425 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6426 &acqe_sli->event_data1; 6427 6428 /* fetch the status for this port */ 6429 switch (phba->sli4_hba.lnk_info.lnk_no) { 6430 case LPFC_LINK_NUMBER_0: 6431 status = bf_get(lpfc_sli_misconfigured_port0_state, 6432 &misconfigured->theEvent); 6433 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6434 &misconfigured->theEvent); 6435 break; 6436 case LPFC_LINK_NUMBER_1: 6437 status = bf_get(lpfc_sli_misconfigured_port1_state, 6438 &misconfigured->theEvent); 6439 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6440 &misconfigured->theEvent); 6441 break; 6442 case LPFC_LINK_NUMBER_2: 6443 status = bf_get(lpfc_sli_misconfigured_port2_state, 6444 &misconfigured->theEvent); 6445 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6446 &misconfigured->theEvent); 6447 break; 6448 case LPFC_LINK_NUMBER_3: 6449 status = bf_get(lpfc_sli_misconfigured_port3_state, 6450 &misconfigured->theEvent); 6451 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6452 &misconfigured->theEvent); 6453 break; 6454 default: 6455 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6456 "3296 " 6457 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6458 "event: Invalid link %d", 6459 phba->sli4_hba.lnk_info.lnk_no); 6460 return; 6461 } 6462 6463 /* Skip if optic state unchanged */ 6464 if (phba->sli4_hba.lnk_info.optic_state == status) 6465 return; 6466 6467 switch (status) { 6468 case LPFC_SLI_EVENT_STATUS_VALID: 6469 sprintf(message, "Physical Link is functional"); 6470 break; 6471 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6472 sprintf(message, "Optics faulted/incorrectly " 6473 "installed/not installed - Reseat optics, " 6474 "if issue not resolved, replace."); 6475 break; 6476 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6477 sprintf(message, 6478 "Optics of two types installed - Remove one " 6479 "optic or install matching pair of optics."); 6480 break; 6481 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6482 sprintf(message, "Incompatible optics - Replace with " 6483 "compatible optics for card to function."); 6484 break; 6485 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6486 sprintf(message, "Unqualified optics - Replace with " 6487 "Avago optics for Warranty and Technical " 6488 "Support - Link is%s operational", 6489 (operational) ? " not" : ""); 6490 break; 6491 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6492 sprintf(message, "Uncertified optics - Replace with " 6493 "Avago-certified optics to enable link " 6494 "operation - Link is%s operational", 6495 (operational) ? " not" : ""); 6496 break; 6497 default: 6498 /* firmware is reporting a status we don't know about */ 6499 sprintf(message, "Unknown event status x%02x", status); 6500 break; 6501 } 6502 6503 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6504 rc = lpfc_sli4_read_config(phba); 6505 if (rc) { 6506 phba->lmt = 0; 6507 lpfc_printf_log(phba, KERN_ERR, 6508 LOG_TRACE_EVENT, 6509 "3194 Unable to retrieve supported " 6510 "speeds, rc = 0x%x\n", rc); 6511 } 6512 rc = lpfc_sli4_refresh_params(phba); 6513 if (rc) { 6514 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6515 "3174 Unable to update pls support, " 6516 "rc x%x\n", rc); 6517 } 6518 vports = lpfc_create_vport_work_array(phba); 6519 if (vports != NULL) { 6520 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6521 i++) { 6522 shost = lpfc_shost_from_vport(vports[i]); 6523 lpfc_host_supported_speeds_set(shost); 6524 } 6525 } 6526 lpfc_destroy_vport_work_array(phba, vports); 6527 6528 phba->sli4_hba.lnk_info.optic_state = status; 6529 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6530 "3176 Port Name %c %s\n", port_name, message); 6531 break; 6532 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6533 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6534 "3192 Remote DPort Test Initiated - " 6535 "Event Data1:x%08x Event Data2: x%08x\n", 6536 acqe_sli->event_data1, acqe_sli->event_data2); 6537 break; 6538 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6539 /* Call FW to obtain active parms */ 6540 lpfc_sli4_cgn_parm_chg_evt(phba); 6541 break; 6542 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6543 /* Misconfigured WWN. Reports that the SLI Port is configured 6544 * to use FA-WWN, but the attached device doesn’t support it. 6545 * No driver action is required. 6546 * Event Data1 - N.A, Event Data2 - N.A 6547 */ 6548 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 6549 "2699 Misconfigured FA-WWN - Attached device does " 6550 "not support FA-WWN\n"); 6551 break; 6552 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6553 /* EEPROM failure. No driver action is required */ 6554 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6555 "2518 EEPROM failure - " 6556 "Event Data1: x%08x Event Data2: x%08x\n", 6557 acqe_sli->event_data1, acqe_sli->event_data2); 6558 break; 6559 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6560 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6561 break; 6562 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6563 &acqe_sli->event_data1; 6564 phba->cgn_acqe_cnt++; 6565 6566 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6567 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6568 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6569 6570 /* no threshold for CMF, even 1 signal will trigger an event */ 6571 6572 /* Alarm overrides warning, so check that first */ 6573 if (cgn_signal->alarm_cnt) { 6574 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6575 /* Keep track of alarm cnt for cgn_info */ 6576 atomic_add(cgn_signal->alarm_cnt, 6577 &phba->cgn_fabric_alarm_cnt); 6578 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6579 atomic_add(cgn_signal->alarm_cnt, 6580 &phba->cgn_sync_alarm_cnt); 6581 } 6582 } else if (cnt) { 6583 /* signal action needs to be taken */ 6584 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6585 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6586 /* Keep track of warning cnt for cgn_info */ 6587 atomic_add(cnt, &phba->cgn_fabric_warn_cnt); 6588 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6589 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6590 } 6591 } 6592 break; 6593 default: 6594 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6595 "3193 Unrecognized SLI event, type: 0x%x", 6596 evt_type); 6597 break; 6598 } 6599 } 6600 6601 /** 6602 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6603 * @vport: pointer to vport data structure. 6604 * 6605 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6606 * response to a CVL event. 6607 * 6608 * Return the pointer to the ndlp with the vport if successful, otherwise 6609 * return NULL. 6610 **/ 6611 static struct lpfc_nodelist * 6612 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6613 { 6614 struct lpfc_nodelist *ndlp; 6615 struct Scsi_Host *shost; 6616 struct lpfc_hba *phba; 6617 6618 if (!vport) 6619 return NULL; 6620 phba = vport->phba; 6621 if (!phba) 6622 return NULL; 6623 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6624 if (!ndlp) { 6625 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6626 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6627 if (!ndlp) 6628 return NULL; 6629 /* Set the node type */ 6630 ndlp->nlp_type |= NLP_FABRIC; 6631 /* Put ndlp onto node list */ 6632 lpfc_enqueue_node(vport, ndlp); 6633 } 6634 if ((phba->pport->port_state < LPFC_FLOGI) && 6635 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6636 return NULL; 6637 /* If virtual link is not yet instantiated ignore CVL */ 6638 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6639 && (vport->port_state != LPFC_VPORT_FAILED)) 6640 return NULL; 6641 shost = lpfc_shost_from_vport(vport); 6642 if (!shost) 6643 return NULL; 6644 lpfc_linkdown_port(vport); 6645 lpfc_cleanup_pending_mbox(vport); 6646 spin_lock_irq(shost->host_lock); 6647 vport->fc_flag |= FC_VPORT_CVL_RCVD; 6648 spin_unlock_irq(shost->host_lock); 6649 6650 return ndlp; 6651 } 6652 6653 /** 6654 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6655 * @phba: pointer to lpfc hba data structure. 6656 * 6657 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6658 * response to a FCF dead event. 6659 **/ 6660 static void 6661 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6662 { 6663 struct lpfc_vport **vports; 6664 int i; 6665 6666 vports = lpfc_create_vport_work_array(phba); 6667 if (vports) 6668 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6669 lpfc_sli4_perform_vport_cvl(vports[i]); 6670 lpfc_destroy_vport_work_array(phba, vports); 6671 } 6672 6673 /** 6674 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6675 * @phba: pointer to lpfc hba data structure. 6676 * @acqe_fip: pointer to the async fcoe completion queue entry. 6677 * 6678 * This routine is to handle the SLI4 asynchronous fcoe event. 6679 **/ 6680 static void 6681 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6682 struct lpfc_acqe_fip *acqe_fip) 6683 { 6684 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6685 int rc; 6686 struct lpfc_vport *vport; 6687 struct lpfc_nodelist *ndlp; 6688 int active_vlink_present; 6689 struct lpfc_vport **vports; 6690 int i; 6691 6692 phba->fc_eventTag = acqe_fip->event_tag; 6693 phba->fcoe_eventtag = acqe_fip->event_tag; 6694 switch (event_type) { 6695 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6696 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6697 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6698 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6699 "2546 New FCF event, evt_tag:x%x, " 6700 "index:x%x\n", 6701 acqe_fip->event_tag, 6702 acqe_fip->index); 6703 else 6704 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6705 LOG_DISCOVERY, 6706 "2788 FCF param modified event, " 6707 "evt_tag:x%x, index:x%x\n", 6708 acqe_fip->event_tag, 6709 acqe_fip->index); 6710 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6711 /* 6712 * During period of FCF discovery, read the FCF 6713 * table record indexed by the event to update 6714 * FCF roundrobin failover eligible FCF bmask. 6715 */ 6716 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6717 LOG_DISCOVERY, 6718 "2779 Read FCF (x%x) for updating " 6719 "roundrobin FCF failover bmask\n", 6720 acqe_fip->index); 6721 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6722 } 6723 6724 /* If the FCF discovery is in progress, do nothing. */ 6725 spin_lock_irq(&phba->hbalock); 6726 if (phba->hba_flag & FCF_TS_INPROG) { 6727 spin_unlock_irq(&phba->hbalock); 6728 break; 6729 } 6730 /* If fast FCF failover rescan event is pending, do nothing */ 6731 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6732 spin_unlock_irq(&phba->hbalock); 6733 break; 6734 } 6735 6736 /* If the FCF has been in discovered state, do nothing. */ 6737 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6738 spin_unlock_irq(&phba->hbalock); 6739 break; 6740 } 6741 spin_unlock_irq(&phba->hbalock); 6742 6743 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6744 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6745 "2770 Start FCF table scan per async FCF " 6746 "event, evt_tag:x%x, index:x%x\n", 6747 acqe_fip->event_tag, acqe_fip->index); 6748 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6749 LPFC_FCOE_FCF_GET_FIRST); 6750 if (rc) 6751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6752 "2547 Issue FCF scan read FCF mailbox " 6753 "command failed (x%x)\n", rc); 6754 break; 6755 6756 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6758 "2548 FCF Table full count 0x%x tag 0x%x\n", 6759 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6760 acqe_fip->event_tag); 6761 break; 6762 6763 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6764 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6765 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6766 "2549 FCF (x%x) disconnected from network, " 6767 "tag:x%x\n", acqe_fip->index, 6768 acqe_fip->event_tag); 6769 /* 6770 * If we are in the middle of FCF failover process, clear 6771 * the corresponding FCF bit in the roundrobin bitmap. 6772 */ 6773 spin_lock_irq(&phba->hbalock); 6774 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6775 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6776 spin_unlock_irq(&phba->hbalock); 6777 /* Update FLOGI FCF failover eligible FCF bmask */ 6778 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6779 break; 6780 } 6781 spin_unlock_irq(&phba->hbalock); 6782 6783 /* If the event is not for currently used fcf do nothing */ 6784 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6785 break; 6786 6787 /* 6788 * Otherwise, request the port to rediscover the entire FCF 6789 * table for a fast recovery from case that the current FCF 6790 * is no longer valid as we are not in the middle of FCF 6791 * failover process already. 6792 */ 6793 spin_lock_irq(&phba->hbalock); 6794 /* Mark the fast failover process in progress */ 6795 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6796 spin_unlock_irq(&phba->hbalock); 6797 6798 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6799 "2771 Start FCF fast failover process due to " 6800 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6801 "\n", acqe_fip->event_tag, acqe_fip->index); 6802 rc = lpfc_sli4_redisc_fcf_table(phba); 6803 if (rc) { 6804 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6805 LOG_TRACE_EVENT, 6806 "2772 Issue FCF rediscover mailbox " 6807 "command failed, fail through to FCF " 6808 "dead event\n"); 6809 spin_lock_irq(&phba->hbalock); 6810 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6811 spin_unlock_irq(&phba->hbalock); 6812 /* 6813 * Last resort will fail over by treating this 6814 * as a link down to FCF registration. 6815 */ 6816 lpfc_sli4_fcf_dead_failthrough(phba); 6817 } else { 6818 /* Reset FCF roundrobin bmask for new discovery */ 6819 lpfc_sli4_clear_fcf_rr_bmask(phba); 6820 /* 6821 * Handling fast FCF failover to a DEAD FCF event is 6822 * considered equalivant to receiving CVL to all vports. 6823 */ 6824 lpfc_sli4_perform_all_vport_cvl(phba); 6825 } 6826 break; 6827 case LPFC_FIP_EVENT_TYPE_CVL: 6828 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6829 lpfc_printf_log(phba, KERN_ERR, 6830 LOG_TRACE_EVENT, 6831 "2718 Clear Virtual Link Received for VPI 0x%x" 6832 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6833 6834 vport = lpfc_find_vport_by_vpid(phba, 6835 acqe_fip->index); 6836 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6837 if (!ndlp) 6838 break; 6839 active_vlink_present = 0; 6840 6841 vports = lpfc_create_vport_work_array(phba); 6842 if (vports) { 6843 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6844 i++) { 6845 if ((!(vports[i]->fc_flag & 6846 FC_VPORT_CVL_RCVD)) && 6847 (vports[i]->port_state > LPFC_FDISC)) { 6848 active_vlink_present = 1; 6849 break; 6850 } 6851 } 6852 lpfc_destroy_vport_work_array(phba, vports); 6853 } 6854 6855 /* 6856 * Don't re-instantiate if vport is marked for deletion. 6857 * If we are here first then vport_delete is going to wait 6858 * for discovery to complete. 6859 */ 6860 if (!(vport->load_flag & FC_UNLOADING) && 6861 active_vlink_present) { 6862 /* 6863 * If there are other active VLinks present, 6864 * re-instantiate the Vlink using FDISC. 6865 */ 6866 mod_timer(&ndlp->nlp_delayfunc, 6867 jiffies + msecs_to_jiffies(1000)); 6868 spin_lock_irq(&ndlp->lock); 6869 ndlp->nlp_flag |= NLP_DELAY_TMO; 6870 spin_unlock_irq(&ndlp->lock); 6871 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6872 vport->port_state = LPFC_FDISC; 6873 } else { 6874 /* 6875 * Otherwise, we request port to rediscover 6876 * the entire FCF table for a fast recovery 6877 * from possible case that the current FCF 6878 * is no longer valid if we are not already 6879 * in the FCF failover process. 6880 */ 6881 spin_lock_irq(&phba->hbalock); 6882 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6883 spin_unlock_irq(&phba->hbalock); 6884 break; 6885 } 6886 /* Mark the fast failover process in progress */ 6887 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6888 spin_unlock_irq(&phba->hbalock); 6889 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6890 LOG_DISCOVERY, 6891 "2773 Start FCF failover per CVL, " 6892 "evt_tag:x%x\n", acqe_fip->event_tag); 6893 rc = lpfc_sli4_redisc_fcf_table(phba); 6894 if (rc) { 6895 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6896 LOG_TRACE_EVENT, 6897 "2774 Issue FCF rediscover " 6898 "mailbox command failed, " 6899 "through to CVL event\n"); 6900 spin_lock_irq(&phba->hbalock); 6901 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6902 spin_unlock_irq(&phba->hbalock); 6903 /* 6904 * Last resort will be re-try on the 6905 * the current registered FCF entry. 6906 */ 6907 lpfc_retry_pport_discovery(phba); 6908 } else 6909 /* 6910 * Reset FCF roundrobin bmask for new 6911 * discovery. 6912 */ 6913 lpfc_sli4_clear_fcf_rr_bmask(phba); 6914 } 6915 break; 6916 default: 6917 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6918 "0288 Unknown FCoE event type 0x%x event tag " 6919 "0x%x\n", event_type, acqe_fip->event_tag); 6920 break; 6921 } 6922 } 6923 6924 /** 6925 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6926 * @phba: pointer to lpfc hba data structure. 6927 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6928 * 6929 * This routine is to handle the SLI4 asynchronous dcbx event. 6930 **/ 6931 static void 6932 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6933 struct lpfc_acqe_dcbx *acqe_dcbx) 6934 { 6935 phba->fc_eventTag = acqe_dcbx->event_tag; 6936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6937 "0290 The SLI4 DCBX asynchronous event is not " 6938 "handled yet\n"); 6939 } 6940 6941 /** 6942 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6943 * @phba: pointer to lpfc hba data structure. 6944 * @acqe_grp5: pointer to the async grp5 completion queue entry. 6945 * 6946 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 6947 * is an asynchronous notified of a logical link speed change. The Port 6948 * reports the logical link speed in units of 10Mbps. 6949 **/ 6950 static void 6951 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 6952 struct lpfc_acqe_grp5 *acqe_grp5) 6953 { 6954 uint16_t prev_ll_spd; 6955 6956 phba->fc_eventTag = acqe_grp5->event_tag; 6957 phba->fcoe_eventtag = acqe_grp5->event_tag; 6958 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 6959 phba->sli4_hba.link_state.logical_speed = 6960 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 6961 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6962 "2789 GRP5 Async Event: Updating logical link speed " 6963 "from %dMbps to %dMbps\n", prev_ll_spd, 6964 phba->sli4_hba.link_state.logical_speed); 6965 } 6966 6967 /** 6968 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 6969 * @phba: pointer to lpfc hba data structure. 6970 * 6971 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 6972 * is an asynchronous notification of a request to reset CM stats. 6973 **/ 6974 static void 6975 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 6976 { 6977 if (!phba->cgn_i) 6978 return; 6979 lpfc_init_congestion_stat(phba); 6980 } 6981 6982 /** 6983 * lpfc_cgn_params_val - Validate FW congestion parameters. 6984 * @phba: pointer to lpfc hba data structure. 6985 * @p_cfg_param: pointer to FW provided congestion parameters. 6986 * 6987 * This routine validates the congestion parameters passed 6988 * by the FW to the driver via an ACQE event. 6989 **/ 6990 static void 6991 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 6992 { 6993 spin_lock_irq(&phba->hbalock); 6994 6995 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 6996 LPFC_CFG_MONITOR)) { 6997 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 6998 "6225 CMF mode param out of range: %d\n", 6999 p_cfg_param->cgn_param_mode); 7000 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 7001 } 7002 7003 spin_unlock_irq(&phba->hbalock); 7004 } 7005 7006 /** 7007 * lpfc_cgn_params_parse - Process a FW cong parm change event 7008 * @phba: pointer to lpfc hba data structure. 7009 * @p_cgn_param: pointer to a data buffer with the FW cong params. 7010 * @len: the size of pdata in bytes. 7011 * 7012 * This routine validates the congestion management buffer signature 7013 * from the FW, validates the contents and makes corrections for 7014 * valid, in-range values. If the signature magic is correct and 7015 * after parameter validation, the contents are copied to the driver's 7016 * @phba structure. If the magic is incorrect, an error message is 7017 * logged. 7018 **/ 7019 static void 7020 lpfc_cgn_params_parse(struct lpfc_hba *phba, 7021 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 7022 { 7023 struct lpfc_cgn_info *cp; 7024 uint32_t crc, oldmode; 7025 7026 /* Make sure the FW has encoded the correct magic number to 7027 * validate the congestion parameter in FW memory. 7028 */ 7029 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 7030 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7031 "4668 FW cgn parm buffer data: " 7032 "magic 0x%x version %d mode %d " 7033 "level0 %d level1 %d " 7034 "level2 %d byte13 %d " 7035 "byte14 %d byte15 %d " 7036 "byte11 %d byte12 %d activeMode %d\n", 7037 p_cgn_param->cgn_param_magic, 7038 p_cgn_param->cgn_param_version, 7039 p_cgn_param->cgn_param_mode, 7040 p_cgn_param->cgn_param_level0, 7041 p_cgn_param->cgn_param_level1, 7042 p_cgn_param->cgn_param_level2, 7043 p_cgn_param->byte13, 7044 p_cgn_param->byte14, 7045 p_cgn_param->byte15, 7046 p_cgn_param->byte11, 7047 p_cgn_param->byte12, 7048 phba->cmf_active_mode); 7049 7050 oldmode = phba->cmf_active_mode; 7051 7052 /* Any parameters out of range are corrected to defaults 7053 * by this routine. No need to fail. 7054 */ 7055 lpfc_cgn_params_val(phba, p_cgn_param); 7056 7057 /* Parameters are verified, move them into driver storage */ 7058 spin_lock_irq(&phba->hbalock); 7059 memcpy(&phba->cgn_p, p_cgn_param, 7060 sizeof(struct lpfc_cgn_param)); 7061 7062 /* Update parameters in congestion info buffer now */ 7063 if (phba->cgn_i) { 7064 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7065 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7066 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7067 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7068 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7069 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7070 LPFC_CGN_CRC32_SEED); 7071 cp->cgn_info_crc = cpu_to_le32(crc); 7072 } 7073 spin_unlock_irq(&phba->hbalock); 7074 7075 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7076 7077 switch (oldmode) { 7078 case LPFC_CFG_OFF: 7079 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7080 /* Turning CMF on */ 7081 lpfc_cmf_start(phba); 7082 7083 if (phba->link_state >= LPFC_LINK_UP) { 7084 phba->cgn_reg_fpin = 7085 phba->cgn_init_reg_fpin; 7086 phba->cgn_reg_signal = 7087 phba->cgn_init_reg_signal; 7088 lpfc_issue_els_edc(phba->pport, 0); 7089 } 7090 } 7091 break; 7092 case LPFC_CFG_MANAGED: 7093 switch (phba->cgn_p.cgn_param_mode) { 7094 case LPFC_CFG_OFF: 7095 /* Turning CMF off */ 7096 lpfc_cmf_stop(phba); 7097 if (phba->link_state >= LPFC_LINK_UP) 7098 lpfc_issue_els_edc(phba->pport, 0); 7099 break; 7100 case LPFC_CFG_MONITOR: 7101 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7102 "4661 Switch from MANAGED to " 7103 "`MONITOR mode\n"); 7104 phba->cmf_max_bytes_per_interval = 7105 phba->cmf_link_byte_count; 7106 7107 /* Resume blocked IO - unblock on workqueue */ 7108 queue_work(phba->wq, 7109 &phba->unblock_request_work); 7110 break; 7111 } 7112 break; 7113 case LPFC_CFG_MONITOR: 7114 switch (phba->cgn_p.cgn_param_mode) { 7115 case LPFC_CFG_OFF: 7116 /* Turning CMF off */ 7117 lpfc_cmf_stop(phba); 7118 if (phba->link_state >= LPFC_LINK_UP) 7119 lpfc_issue_els_edc(phba->pport, 0); 7120 break; 7121 case LPFC_CFG_MANAGED: 7122 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7123 "4662 Switch from MONITOR to " 7124 "MANAGED mode\n"); 7125 lpfc_cmf_signal_init(phba); 7126 break; 7127 } 7128 break; 7129 } 7130 } else { 7131 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7132 "4669 FW cgn parm buf wrong magic 0x%x " 7133 "version %d\n", p_cgn_param->cgn_param_magic, 7134 p_cgn_param->cgn_param_version); 7135 } 7136 } 7137 7138 /** 7139 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7140 * @phba: pointer to lpfc hba data structure. 7141 * 7142 * This routine issues a read_object mailbox command to 7143 * get the congestion management parameters from the FW 7144 * parses it and updates the driver maintained values. 7145 * 7146 * Returns 7147 * 0 if the object was empty 7148 * -Eval if an error was encountered 7149 * Count if bytes were read from object 7150 **/ 7151 int 7152 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7153 { 7154 int ret = 0; 7155 struct lpfc_cgn_param *p_cgn_param = NULL; 7156 u32 *pdata = NULL; 7157 u32 len = 0; 7158 7159 /* Find out if the FW has a new set of congestion parameters. */ 7160 len = sizeof(struct lpfc_cgn_param); 7161 pdata = kzalloc(len, GFP_KERNEL); 7162 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7163 pdata, len); 7164 7165 /* 0 means no data. A negative means error. A positive means 7166 * bytes were copied. 7167 */ 7168 if (!ret) { 7169 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7170 "4670 CGN RD OBJ returns no data\n"); 7171 goto rd_obj_err; 7172 } else if (ret < 0) { 7173 /* Some error. Just exit and return it to the caller.*/ 7174 goto rd_obj_err; 7175 } 7176 7177 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7178 "6234 READ CGN PARAMS Successful %d\n", len); 7179 7180 /* Parse data pointer over len and update the phba congestion 7181 * parameters with values passed back. The receive rate values 7182 * may have been altered in FW, but take no action here. 7183 */ 7184 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7185 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7186 7187 rd_obj_err: 7188 kfree(pdata); 7189 return ret; 7190 } 7191 7192 /** 7193 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7194 * @phba: pointer to lpfc hba data structure. 7195 * 7196 * The FW generated Async ACQE SLI event calls this routine when 7197 * the event type is an SLI Internal Port Event and the Event Code 7198 * indicates a change to the FW maintained congestion parameters. 7199 * 7200 * This routine executes a Read_Object mailbox call to obtain the 7201 * current congestion parameters maintained in FW and corrects 7202 * the driver's active congestion parameters. 7203 * 7204 * The acqe event is not passed because there is no further data 7205 * required. 7206 * 7207 * Returns nonzero error if event processing encountered an error. 7208 * Zero otherwise for success. 7209 **/ 7210 static int 7211 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7212 { 7213 int ret = 0; 7214 7215 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7216 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7217 "4664 Cgn Evt when E2E off. Drop event\n"); 7218 return -EACCES; 7219 } 7220 7221 /* If the event is claiming an empty object, it's ok. A write 7222 * could have cleared it. Only error is a negative return 7223 * status. 7224 */ 7225 ret = lpfc_sli4_cgn_params_read(phba); 7226 if (ret < 0) { 7227 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7228 "4667 Error reading Cgn Params (%d)\n", 7229 ret); 7230 } else if (!ret) { 7231 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7232 "4673 CGN Event empty object.\n"); 7233 } 7234 return ret; 7235 } 7236 7237 /** 7238 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7239 * @phba: pointer to lpfc hba data structure. 7240 * 7241 * This routine is invoked by the worker thread to process all the pending 7242 * SLI4 asynchronous events. 7243 **/ 7244 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7245 { 7246 struct lpfc_cq_event *cq_event; 7247 unsigned long iflags; 7248 7249 /* First, declare the async event has been handled */ 7250 spin_lock_irqsave(&phba->hbalock, iflags); 7251 phba->hba_flag &= ~ASYNC_EVENT; 7252 spin_unlock_irqrestore(&phba->hbalock, iflags); 7253 7254 /* Now, handle all the async events */ 7255 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7256 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7257 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7258 cq_event, struct lpfc_cq_event, list); 7259 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7260 iflags); 7261 7262 /* Process the asynchronous event */ 7263 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7264 case LPFC_TRAILER_CODE_LINK: 7265 lpfc_sli4_async_link_evt(phba, 7266 &cq_event->cqe.acqe_link); 7267 break; 7268 case LPFC_TRAILER_CODE_FCOE: 7269 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7270 break; 7271 case LPFC_TRAILER_CODE_DCBX: 7272 lpfc_sli4_async_dcbx_evt(phba, 7273 &cq_event->cqe.acqe_dcbx); 7274 break; 7275 case LPFC_TRAILER_CODE_GRP5: 7276 lpfc_sli4_async_grp5_evt(phba, 7277 &cq_event->cqe.acqe_grp5); 7278 break; 7279 case LPFC_TRAILER_CODE_FC: 7280 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7281 break; 7282 case LPFC_TRAILER_CODE_SLI: 7283 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7284 break; 7285 case LPFC_TRAILER_CODE_CMSTAT: 7286 lpfc_sli4_async_cmstat_evt(phba); 7287 break; 7288 default: 7289 lpfc_printf_log(phba, KERN_ERR, 7290 LOG_TRACE_EVENT, 7291 "1804 Invalid asynchronous event code: " 7292 "x%x\n", bf_get(lpfc_trailer_code, 7293 &cq_event->cqe.mcqe_cmpl)); 7294 break; 7295 } 7296 7297 /* Free the completion event processed to the free pool */ 7298 lpfc_sli4_cq_event_release(phba, cq_event); 7299 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7300 } 7301 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7302 } 7303 7304 /** 7305 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7306 * @phba: pointer to lpfc hba data structure. 7307 * 7308 * This routine is invoked by the worker thread to process FCF table 7309 * rediscovery pending completion event. 7310 **/ 7311 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7312 { 7313 int rc; 7314 7315 spin_lock_irq(&phba->hbalock); 7316 /* Clear FCF rediscovery timeout event */ 7317 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7318 /* Clear driver fast failover FCF record flag */ 7319 phba->fcf.failover_rec.flag = 0; 7320 /* Set state for FCF fast failover */ 7321 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7322 spin_unlock_irq(&phba->hbalock); 7323 7324 /* Scan FCF table from the first entry to re-discover SAN */ 7325 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7326 "2777 Start post-quiescent FCF table scan\n"); 7327 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7328 if (rc) 7329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7330 "2747 Issue FCF scan read FCF mailbox " 7331 "command failed 0x%x\n", rc); 7332 } 7333 7334 /** 7335 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7336 * @phba: pointer to lpfc hba data structure. 7337 * @dev_grp: The HBA PCI-Device group number. 7338 * 7339 * This routine is invoked to set up the per HBA PCI-Device group function 7340 * API jump table entries. 7341 * 7342 * Return: 0 if success, otherwise -ENODEV 7343 **/ 7344 int 7345 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7346 { 7347 int rc; 7348 7349 /* Set up lpfc PCI-device group */ 7350 phba->pci_dev_grp = dev_grp; 7351 7352 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7353 if (dev_grp == LPFC_PCI_DEV_OC) 7354 phba->sli_rev = LPFC_SLI_REV4; 7355 7356 /* Set up device INIT API function jump table */ 7357 rc = lpfc_init_api_table_setup(phba, dev_grp); 7358 if (rc) 7359 return -ENODEV; 7360 /* Set up SCSI API function jump table */ 7361 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7362 if (rc) 7363 return -ENODEV; 7364 /* Set up SLI API function jump table */ 7365 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7366 if (rc) 7367 return -ENODEV; 7368 /* Set up MBOX API function jump table */ 7369 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7370 if (rc) 7371 return -ENODEV; 7372 7373 return 0; 7374 } 7375 7376 /** 7377 * lpfc_log_intr_mode - Log the active interrupt mode 7378 * @phba: pointer to lpfc hba data structure. 7379 * @intr_mode: active interrupt mode adopted. 7380 * 7381 * This routine it invoked to log the currently used active interrupt mode 7382 * to the device. 7383 **/ 7384 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7385 { 7386 switch (intr_mode) { 7387 case 0: 7388 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7389 "0470 Enable INTx interrupt mode.\n"); 7390 break; 7391 case 1: 7392 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7393 "0481 Enabled MSI interrupt mode.\n"); 7394 break; 7395 case 2: 7396 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7397 "0480 Enabled MSI-X interrupt mode.\n"); 7398 break; 7399 default: 7400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7401 "0482 Illegal interrupt mode.\n"); 7402 break; 7403 } 7404 return; 7405 } 7406 7407 /** 7408 * lpfc_enable_pci_dev - Enable a generic PCI device. 7409 * @phba: pointer to lpfc hba data structure. 7410 * 7411 * This routine is invoked to enable the PCI device that is common to all 7412 * PCI devices. 7413 * 7414 * Return codes 7415 * 0 - successful 7416 * other values - error 7417 **/ 7418 static int 7419 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7420 { 7421 struct pci_dev *pdev; 7422 7423 /* Obtain PCI device reference */ 7424 if (!phba->pcidev) 7425 goto out_error; 7426 else 7427 pdev = phba->pcidev; 7428 /* Enable PCI device */ 7429 if (pci_enable_device_mem(pdev)) 7430 goto out_error; 7431 /* Request PCI resource for the device */ 7432 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7433 goto out_disable_device; 7434 /* Set up device as PCI master and save state for EEH */ 7435 pci_set_master(pdev); 7436 pci_try_set_mwi(pdev); 7437 pci_save_state(pdev); 7438 7439 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7440 if (pci_is_pcie(pdev)) 7441 pdev->needs_freset = 1; 7442 7443 return 0; 7444 7445 out_disable_device: 7446 pci_disable_device(pdev); 7447 out_error: 7448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7449 "1401 Failed to enable pci device\n"); 7450 return -ENODEV; 7451 } 7452 7453 /** 7454 * lpfc_disable_pci_dev - Disable a generic PCI device. 7455 * @phba: pointer to lpfc hba data structure. 7456 * 7457 * This routine is invoked to disable the PCI device that is common to all 7458 * PCI devices. 7459 **/ 7460 static void 7461 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7462 { 7463 struct pci_dev *pdev; 7464 7465 /* Obtain PCI device reference */ 7466 if (!phba->pcidev) 7467 return; 7468 else 7469 pdev = phba->pcidev; 7470 /* Release PCI resource and disable PCI device */ 7471 pci_release_mem_regions(pdev); 7472 pci_disable_device(pdev); 7473 7474 return; 7475 } 7476 7477 /** 7478 * lpfc_reset_hba - Reset a hba 7479 * @phba: pointer to lpfc hba data structure. 7480 * 7481 * This routine is invoked to reset a hba device. It brings the HBA 7482 * offline, performs a board restart, and then brings the board back 7483 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7484 * on outstanding mailbox commands. 7485 **/ 7486 void 7487 lpfc_reset_hba(struct lpfc_hba *phba) 7488 { 7489 /* If resets are disabled then set error state and return. */ 7490 if (!phba->cfg_enable_hba_reset) { 7491 phba->link_state = LPFC_HBA_ERROR; 7492 return; 7493 } 7494 7495 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7496 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7497 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7498 } else { 7499 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7500 lpfc_sli_flush_io_rings(phba); 7501 } 7502 lpfc_offline(phba); 7503 lpfc_sli_brdrestart(phba); 7504 lpfc_online(phba); 7505 lpfc_unblock_mgmt_io(phba); 7506 } 7507 7508 /** 7509 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7510 * @phba: pointer to lpfc hba data structure. 7511 * 7512 * This function enables the PCI SR-IOV virtual functions to a physical 7513 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7514 * enable the number of virtual functions to the physical function. As 7515 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7516 * API call does not considered as an error condition for most of the device. 7517 **/ 7518 uint16_t 7519 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7520 { 7521 struct pci_dev *pdev = phba->pcidev; 7522 uint16_t nr_virtfn; 7523 int pos; 7524 7525 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7526 if (pos == 0) 7527 return 0; 7528 7529 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7530 return nr_virtfn; 7531 } 7532 7533 /** 7534 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7535 * @phba: pointer to lpfc hba data structure. 7536 * @nr_vfn: number of virtual functions to be enabled. 7537 * 7538 * This function enables the PCI SR-IOV virtual functions to a physical 7539 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7540 * enable the number of virtual functions to the physical function. As 7541 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7542 * API call does not considered as an error condition for most of the device. 7543 **/ 7544 int 7545 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7546 { 7547 struct pci_dev *pdev = phba->pcidev; 7548 uint16_t max_nr_vfn; 7549 int rc; 7550 7551 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7552 if (nr_vfn > max_nr_vfn) { 7553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7554 "3057 Requested vfs (%d) greater than " 7555 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7556 return -EINVAL; 7557 } 7558 7559 rc = pci_enable_sriov(pdev, nr_vfn); 7560 if (rc) { 7561 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7562 "2806 Failed to enable sriov on this device " 7563 "with vfn number nr_vf:%d, rc:%d\n", 7564 nr_vfn, rc); 7565 } else 7566 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7567 "2807 Successful enable sriov on this device " 7568 "with vfn number nr_vf:%d\n", nr_vfn); 7569 return rc; 7570 } 7571 7572 static void 7573 lpfc_unblock_requests_work(struct work_struct *work) 7574 { 7575 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7576 unblock_request_work); 7577 7578 lpfc_unblock_requests(phba); 7579 } 7580 7581 /** 7582 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7583 * @phba: pointer to lpfc hba data structure. 7584 * 7585 * This routine is invoked to set up the driver internal resources before the 7586 * device specific resource setup to support the HBA device it attached to. 7587 * 7588 * Return codes 7589 * 0 - successful 7590 * other values - error 7591 **/ 7592 static int 7593 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7594 { 7595 struct lpfc_sli *psli = &phba->sli; 7596 7597 /* 7598 * Driver resources common to all SLI revisions 7599 */ 7600 atomic_set(&phba->fast_event_count, 0); 7601 atomic_set(&phba->dbg_log_idx, 0); 7602 atomic_set(&phba->dbg_log_cnt, 0); 7603 atomic_set(&phba->dbg_log_dmping, 0); 7604 spin_lock_init(&phba->hbalock); 7605 7606 /* Initialize port_list spinlock */ 7607 spin_lock_init(&phba->port_list_lock); 7608 INIT_LIST_HEAD(&phba->port_list); 7609 7610 INIT_LIST_HEAD(&phba->work_list); 7611 init_waitqueue_head(&phba->wait_4_mlo_m_q); 7612 7613 /* Initialize the wait queue head for the kernel thread */ 7614 init_waitqueue_head(&phba->work_waitq); 7615 7616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7617 "1403 Protocols supported %s %s %s\n", 7618 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7619 "SCSI" : " "), 7620 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7621 "NVME" : " "), 7622 (phba->nvmet_support ? "NVMET" : " ")); 7623 7624 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7625 spin_lock_init(&phba->scsi_buf_list_get_lock); 7626 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7627 spin_lock_init(&phba->scsi_buf_list_put_lock); 7628 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7629 7630 /* Initialize the fabric iocb list */ 7631 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7632 7633 /* Initialize list to save ELS buffers */ 7634 INIT_LIST_HEAD(&phba->elsbuf); 7635 7636 /* Initialize FCF connection rec list */ 7637 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7638 7639 /* Initialize OAS configuration list */ 7640 spin_lock_init(&phba->devicelock); 7641 INIT_LIST_HEAD(&phba->luns); 7642 7643 /* MBOX heartbeat timer */ 7644 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7645 /* Fabric block timer */ 7646 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7647 /* EA polling mode timer */ 7648 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7649 /* Heartbeat timer */ 7650 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7651 7652 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7653 7654 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7655 lpfc_idle_stat_delay_work); 7656 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7657 return 0; 7658 } 7659 7660 /** 7661 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7662 * @phba: pointer to lpfc hba data structure. 7663 * 7664 * This routine is invoked to set up the driver internal resources specific to 7665 * support the SLI-3 HBA device it attached to. 7666 * 7667 * Return codes 7668 * 0 - successful 7669 * other values - error 7670 **/ 7671 static int 7672 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7673 { 7674 int rc, entry_sz; 7675 7676 /* 7677 * Initialize timers used by driver 7678 */ 7679 7680 /* FCP polling mode timer */ 7681 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7682 7683 /* Host attention work mask setup */ 7684 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7685 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7686 7687 /* Get all the module params for configuring this host */ 7688 lpfc_get_cfgparam(phba); 7689 /* Set up phase-1 common device driver resources */ 7690 7691 rc = lpfc_setup_driver_resource_phase1(phba); 7692 if (rc) 7693 return -ENODEV; 7694 7695 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 7696 phba->menlo_flag |= HBA_MENLO_SUPPORT; 7697 /* check for menlo minimum sg count */ 7698 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 7699 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 7700 } 7701 7702 if (!phba->sli.sli3_ring) 7703 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7704 sizeof(struct lpfc_sli_ring), 7705 GFP_KERNEL); 7706 if (!phba->sli.sli3_ring) 7707 return -ENOMEM; 7708 7709 /* 7710 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7711 * used to create the sg_dma_buf_pool must be dynamically calculated. 7712 */ 7713 7714 if (phba->sli_rev == LPFC_SLI_REV4) 7715 entry_sz = sizeof(struct sli4_sge); 7716 else 7717 entry_sz = sizeof(struct ulp_bde64); 7718 7719 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7720 if (phba->cfg_enable_bg) { 7721 /* 7722 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7723 * the FCP rsp, and a BDE for each. Sice we have no control 7724 * over how many protection data segments the SCSI Layer 7725 * will hand us (ie: there could be one for every block 7726 * in the IO), we just allocate enough BDEs to accomidate 7727 * our max amount and we need to limit lpfc_sg_seg_cnt to 7728 * minimize the risk of running out. 7729 */ 7730 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7731 sizeof(struct fcp_rsp) + 7732 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7733 7734 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7735 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7736 7737 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7738 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7739 } else { 7740 /* 7741 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7742 * the FCP rsp, a BDE for each, and a BDE for up to 7743 * cfg_sg_seg_cnt data segments. 7744 */ 7745 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7746 sizeof(struct fcp_rsp) + 7747 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7748 7749 /* Total BDEs in BPL for scsi_sg_list */ 7750 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7751 } 7752 7753 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7754 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7755 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7756 phba->cfg_total_seg_cnt); 7757 7758 phba->max_vpi = LPFC_MAX_VPI; 7759 /* This will be set to correct value after config_port mbox */ 7760 phba->max_vports = 0; 7761 7762 /* 7763 * Initialize the SLI Layer to run with lpfc HBAs. 7764 */ 7765 lpfc_sli_setup(phba); 7766 lpfc_sli_queue_init(phba); 7767 7768 /* Allocate device driver memory */ 7769 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7770 return -ENOMEM; 7771 7772 phba->lpfc_sg_dma_buf_pool = 7773 dma_pool_create("lpfc_sg_dma_buf_pool", 7774 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7775 BPL_ALIGN_SZ, 0); 7776 7777 if (!phba->lpfc_sg_dma_buf_pool) 7778 goto fail_free_mem; 7779 7780 phba->lpfc_cmd_rsp_buf_pool = 7781 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7782 &phba->pcidev->dev, 7783 sizeof(struct fcp_cmnd) + 7784 sizeof(struct fcp_rsp), 7785 BPL_ALIGN_SZ, 0); 7786 7787 if (!phba->lpfc_cmd_rsp_buf_pool) 7788 goto fail_free_dma_buf_pool; 7789 7790 /* 7791 * Enable sr-iov virtual functions if supported and configured 7792 * through the module parameter. 7793 */ 7794 if (phba->cfg_sriov_nr_virtfn > 0) { 7795 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7796 phba->cfg_sriov_nr_virtfn); 7797 if (rc) { 7798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7799 "2808 Requested number of SR-IOV " 7800 "virtual functions (%d) is not " 7801 "supported\n", 7802 phba->cfg_sriov_nr_virtfn); 7803 phba->cfg_sriov_nr_virtfn = 0; 7804 } 7805 } 7806 7807 return 0; 7808 7809 fail_free_dma_buf_pool: 7810 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7811 phba->lpfc_sg_dma_buf_pool = NULL; 7812 fail_free_mem: 7813 lpfc_mem_free(phba); 7814 return -ENOMEM; 7815 } 7816 7817 /** 7818 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7819 * @phba: pointer to lpfc hba data structure. 7820 * 7821 * This routine is invoked to unset the driver internal resources set up 7822 * specific for supporting the SLI-3 HBA device it attached to. 7823 **/ 7824 static void 7825 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7826 { 7827 /* Free device driver memory allocated */ 7828 lpfc_mem_free_all(phba); 7829 7830 return; 7831 } 7832 7833 /** 7834 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7835 * @phba: pointer to lpfc hba data structure. 7836 * 7837 * This routine is invoked to set up the driver internal resources specific to 7838 * support the SLI-4 HBA device it attached to. 7839 * 7840 * Return codes 7841 * 0 - successful 7842 * other values - error 7843 **/ 7844 static int 7845 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7846 { 7847 LPFC_MBOXQ_t *mboxq; 7848 MAILBOX_t *mb; 7849 int rc, i, max_buf_size; 7850 int longs; 7851 int extra; 7852 uint64_t wwn; 7853 u32 if_type; 7854 u32 if_fam; 7855 7856 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7857 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7858 phba->sli4_hba.curr_disp_cpu = 0; 7859 7860 /* Get all the module params for configuring this host */ 7861 lpfc_get_cfgparam(phba); 7862 7863 /* Set up phase-1 common device driver resources */ 7864 rc = lpfc_setup_driver_resource_phase1(phba); 7865 if (rc) 7866 return -ENODEV; 7867 7868 /* Before proceed, wait for POST done and device ready */ 7869 rc = lpfc_sli4_post_status_check(phba); 7870 if (rc) 7871 return -ENODEV; 7872 7873 /* Allocate all driver workqueues here */ 7874 7875 /* The lpfc_wq workqueue for deferred irq use */ 7876 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7877 7878 /* 7879 * Initialize timers used by driver 7880 */ 7881 7882 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7883 7884 /* FCF rediscover timer */ 7885 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7886 7887 /* CMF congestion timer */ 7888 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7889 phba->cmf_timer.function = lpfc_cmf_timer; 7890 7891 /* 7892 * Control structure for handling external multi-buffer mailbox 7893 * command pass-through. 7894 */ 7895 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7896 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7897 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7898 7899 phba->max_vpi = LPFC_MAX_VPI; 7900 7901 /* This will be set to correct value after the read_config mbox */ 7902 phba->max_vports = 0; 7903 7904 /* Program the default value of vlan_id and fc_map */ 7905 phba->valid_vlan = 0; 7906 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7907 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7908 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7909 7910 /* 7911 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7912 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7913 * The WQ create will allocate the ring. 7914 */ 7915 7916 /* Initialize buffer queue management fields */ 7917 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 7918 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 7919 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 7920 7921 /* for VMID idle timeout if VMID is enabled */ 7922 if (lpfc_is_vmid_enabled(phba)) 7923 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 7924 7925 /* 7926 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 7927 */ 7928 /* Initialize the Abort buffer list used by driver */ 7929 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 7930 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 7931 7932 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7933 /* Initialize the Abort nvme buffer list used by driver */ 7934 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 7935 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7936 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 7937 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 7938 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 7939 } 7940 7941 /* This abort list used by worker thread */ 7942 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 7943 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 7944 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 7945 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 7946 7947 /* 7948 * Initialize driver internal slow-path work queues 7949 */ 7950 7951 /* Driver internel slow-path CQ Event pool */ 7952 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 7953 /* Response IOCB work queue list */ 7954 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 7955 /* Asynchronous event CQ Event work queue list */ 7956 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 7957 /* Slow-path XRI aborted CQ Event work queue list */ 7958 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 7959 /* Receive queue CQ Event work queue list */ 7960 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 7961 7962 /* Initialize extent block lists. */ 7963 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 7964 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 7965 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 7966 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 7967 7968 /* Initialize mboxq lists. If the early init routines fail 7969 * these lists need to be correctly initialized. 7970 */ 7971 INIT_LIST_HEAD(&phba->sli.mboxq); 7972 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 7973 7974 /* initialize optic_state to 0xFF */ 7975 phba->sli4_hba.lnk_info.optic_state = 0xff; 7976 7977 /* Allocate device driver memory */ 7978 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 7979 if (rc) 7980 return -ENOMEM; 7981 7982 /* IF Type 2 ports get initialized now. */ 7983 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 7984 LPFC_SLI_INTF_IF_TYPE_2) { 7985 rc = lpfc_pci_function_reset(phba); 7986 if (unlikely(rc)) { 7987 rc = -ENODEV; 7988 goto out_free_mem; 7989 } 7990 phba->temp_sensor_support = 1; 7991 } 7992 7993 /* Create the bootstrap mailbox command */ 7994 rc = lpfc_create_bootstrap_mbox(phba); 7995 if (unlikely(rc)) 7996 goto out_free_mem; 7997 7998 /* Set up the host's endian order with the device. */ 7999 rc = lpfc_setup_endian_order(phba); 8000 if (unlikely(rc)) 8001 goto out_free_bsmbx; 8002 8003 /* Set up the hba's configuration parameters. */ 8004 rc = lpfc_sli4_read_config(phba); 8005 if (unlikely(rc)) 8006 goto out_free_bsmbx; 8007 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 8008 if (unlikely(rc)) 8009 goto out_free_bsmbx; 8010 8011 /* IF Type 0 ports get initialized now. */ 8012 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8013 LPFC_SLI_INTF_IF_TYPE_0) { 8014 rc = lpfc_pci_function_reset(phba); 8015 if (unlikely(rc)) 8016 goto out_free_bsmbx; 8017 } 8018 8019 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8020 GFP_KERNEL); 8021 if (!mboxq) { 8022 rc = -ENOMEM; 8023 goto out_free_bsmbx; 8024 } 8025 8026 /* Check for NVMET being configured */ 8027 phba->nvmet_support = 0; 8028 if (lpfc_enable_nvmet_cnt) { 8029 8030 /* First get WWN of HBA instance */ 8031 lpfc_read_nv(phba, mboxq); 8032 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8033 if (rc != MBX_SUCCESS) { 8034 lpfc_printf_log(phba, KERN_ERR, 8035 LOG_TRACE_EVENT, 8036 "6016 Mailbox failed , mbxCmd x%x " 8037 "READ_NV, mbxStatus x%x\n", 8038 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8039 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 8040 mempool_free(mboxq, phba->mbox_mem_pool); 8041 rc = -EIO; 8042 goto out_free_bsmbx; 8043 } 8044 mb = &mboxq->u.mb; 8045 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 8046 sizeof(uint64_t)); 8047 wwn = cpu_to_be64(wwn); 8048 phba->sli4_hba.wwnn.u.name = wwn; 8049 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 8050 sizeof(uint64_t)); 8051 /* wwn is WWPN of HBA instance */ 8052 wwn = cpu_to_be64(wwn); 8053 phba->sli4_hba.wwpn.u.name = wwn; 8054 8055 /* Check to see if it matches any module parameter */ 8056 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 8057 if (wwn == lpfc_enable_nvmet[i]) { 8058 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 8059 if (lpfc_nvmet_mem_alloc(phba)) 8060 break; 8061 8062 phba->nvmet_support = 1; /* a match */ 8063 8064 lpfc_printf_log(phba, KERN_ERR, 8065 LOG_TRACE_EVENT, 8066 "6017 NVME Target %016llx\n", 8067 wwn); 8068 #else 8069 lpfc_printf_log(phba, KERN_ERR, 8070 LOG_TRACE_EVENT, 8071 "6021 Can't enable NVME Target." 8072 " NVME_TARGET_FC infrastructure" 8073 " is not in kernel\n"); 8074 #endif 8075 /* Not supported for NVMET */ 8076 phba->cfg_xri_rebalancing = 0; 8077 if (phba->irq_chann_mode == NHT_MODE) { 8078 phba->cfg_irq_chann = 8079 phba->sli4_hba.num_present_cpu; 8080 phba->cfg_hdw_queue = 8081 phba->sli4_hba.num_present_cpu; 8082 phba->irq_chann_mode = NORMAL_MODE; 8083 } 8084 break; 8085 } 8086 } 8087 } 8088 8089 lpfc_nvme_mod_param_dep(phba); 8090 8091 /* 8092 * Get sli4 parameters that override parameters from Port capabilities. 8093 * If this call fails, it isn't critical unless the SLI4 parameters come 8094 * back in conflict. 8095 */ 8096 rc = lpfc_get_sli4_parameters(phba, mboxq); 8097 if (rc) { 8098 if_type = bf_get(lpfc_sli_intf_if_type, 8099 &phba->sli4_hba.sli_intf); 8100 if_fam = bf_get(lpfc_sli_intf_sli_family, 8101 &phba->sli4_hba.sli_intf); 8102 if (phba->sli4_hba.extents_in_use && 8103 phba->sli4_hba.rpi_hdrs_in_use) { 8104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8105 "2999 Unsupported SLI4 Parameters " 8106 "Extents and RPI headers enabled.\n"); 8107 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8108 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8109 mempool_free(mboxq, phba->mbox_mem_pool); 8110 rc = -EIO; 8111 goto out_free_bsmbx; 8112 } 8113 } 8114 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8115 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8116 mempool_free(mboxq, phba->mbox_mem_pool); 8117 rc = -EIO; 8118 goto out_free_bsmbx; 8119 } 8120 } 8121 8122 /* 8123 * 1 for cmd, 1 for rsp, NVME adds an extra one 8124 * for boundary conditions in its max_sgl_segment template. 8125 */ 8126 extra = 2; 8127 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8128 extra++; 8129 8130 /* 8131 * It doesn't matter what family our adapter is in, we are 8132 * limited to 2 Pages, 512 SGEs, for our SGL. 8133 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8134 */ 8135 max_buf_size = (2 * SLI4_PAGE_SIZE); 8136 8137 /* 8138 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8139 * used to create the sg_dma_buf_pool must be calculated. 8140 */ 8141 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8142 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8143 8144 /* 8145 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8146 * the FCP rsp, and a SGE. Sice we have no control 8147 * over how many protection segments the SCSI Layer 8148 * will hand us (ie: there could be one for every block 8149 * in the IO), just allocate enough SGEs to accomidate 8150 * our max amount and we need to limit lpfc_sg_seg_cnt 8151 * to minimize the risk of running out. 8152 */ 8153 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8154 sizeof(struct fcp_rsp) + max_buf_size; 8155 8156 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8157 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8158 8159 /* 8160 * If supporting DIF, reduce the seg count for scsi to 8161 * allow room for the DIF sges. 8162 */ 8163 if (phba->cfg_enable_bg && 8164 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8165 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8166 else 8167 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8168 8169 } else { 8170 /* 8171 * The scsi_buf for a regular I/O holds the FCP cmnd, 8172 * the FCP rsp, a SGE for each, and a SGE for up to 8173 * cfg_sg_seg_cnt data segments. 8174 */ 8175 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8176 sizeof(struct fcp_rsp) + 8177 ((phba->cfg_sg_seg_cnt + extra) * 8178 sizeof(struct sli4_sge)); 8179 8180 /* Total SGEs for scsi_sg_list */ 8181 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8182 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8183 8184 /* 8185 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8186 * need to post 1 page for the SGL. 8187 */ 8188 } 8189 8190 if (phba->cfg_xpsgl && !phba->nvmet_support) 8191 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8192 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8193 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8194 else 8195 phba->cfg_sg_dma_buf_size = 8196 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8197 8198 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8199 sizeof(struct sli4_sge); 8200 8201 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8202 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8203 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8204 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8205 "6300 Reducing NVME sg segment " 8206 "cnt to %d\n", 8207 LPFC_MAX_NVME_SEG_CNT); 8208 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8209 } else 8210 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8211 } 8212 8213 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8214 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8215 "total:%d scsi:%d nvme:%d\n", 8216 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8217 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8218 phba->cfg_nvme_seg_cnt); 8219 8220 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8221 i = phba->cfg_sg_dma_buf_size; 8222 else 8223 i = SLI4_PAGE_SIZE; 8224 8225 phba->lpfc_sg_dma_buf_pool = 8226 dma_pool_create("lpfc_sg_dma_buf_pool", 8227 &phba->pcidev->dev, 8228 phba->cfg_sg_dma_buf_size, 8229 i, 0); 8230 if (!phba->lpfc_sg_dma_buf_pool) 8231 goto out_free_bsmbx; 8232 8233 phba->lpfc_cmd_rsp_buf_pool = 8234 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8235 &phba->pcidev->dev, 8236 sizeof(struct fcp_cmnd) + 8237 sizeof(struct fcp_rsp), 8238 i, 0); 8239 if (!phba->lpfc_cmd_rsp_buf_pool) 8240 goto out_free_sg_dma_buf; 8241 8242 mempool_free(mboxq, phba->mbox_mem_pool); 8243 8244 /* Verify OAS is supported */ 8245 lpfc_sli4_oas_verify(phba); 8246 8247 /* Verify RAS support on adapter */ 8248 lpfc_sli4_ras_init(phba); 8249 8250 /* Verify all the SLI4 queues */ 8251 rc = lpfc_sli4_queue_verify(phba); 8252 if (rc) 8253 goto out_free_cmd_rsp_buf; 8254 8255 /* Create driver internal CQE event pool */ 8256 rc = lpfc_sli4_cq_event_pool_create(phba); 8257 if (rc) 8258 goto out_free_cmd_rsp_buf; 8259 8260 /* Initialize sgl lists per host */ 8261 lpfc_init_sgl_list(phba); 8262 8263 /* Allocate and initialize active sgl array */ 8264 rc = lpfc_init_active_sgl_array(phba); 8265 if (rc) { 8266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8267 "1430 Failed to initialize sgl list.\n"); 8268 goto out_destroy_cq_event_pool; 8269 } 8270 rc = lpfc_sli4_init_rpi_hdrs(phba); 8271 if (rc) { 8272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8273 "1432 Failed to initialize rpi headers.\n"); 8274 goto out_free_active_sgl; 8275 } 8276 8277 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8278 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8279 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8280 GFP_KERNEL); 8281 if (!phba->fcf.fcf_rr_bmask) { 8282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8283 "2759 Failed allocate memory for FCF round " 8284 "robin failover bmask\n"); 8285 rc = -ENOMEM; 8286 goto out_remove_rpi_hdrs; 8287 } 8288 8289 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8290 sizeof(struct lpfc_hba_eq_hdl), 8291 GFP_KERNEL); 8292 if (!phba->sli4_hba.hba_eq_hdl) { 8293 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8294 "2572 Failed allocate memory for " 8295 "fast-path per-EQ handle array\n"); 8296 rc = -ENOMEM; 8297 goto out_free_fcf_rr_bmask; 8298 } 8299 8300 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8301 sizeof(struct lpfc_vector_map_info), 8302 GFP_KERNEL); 8303 if (!phba->sli4_hba.cpu_map) { 8304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8305 "3327 Failed allocate memory for msi-x " 8306 "interrupt vector mapping\n"); 8307 rc = -ENOMEM; 8308 goto out_free_hba_eq_hdl; 8309 } 8310 8311 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8312 if (!phba->sli4_hba.eq_info) { 8313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8314 "3321 Failed allocation for per_cpu stats\n"); 8315 rc = -ENOMEM; 8316 goto out_free_hba_cpu_map; 8317 } 8318 8319 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8320 sizeof(*phba->sli4_hba.idle_stat), 8321 GFP_KERNEL); 8322 if (!phba->sli4_hba.idle_stat) { 8323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8324 "3390 Failed allocation for idle_stat\n"); 8325 rc = -ENOMEM; 8326 goto out_free_hba_eq_info; 8327 } 8328 8329 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8330 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8331 if (!phba->sli4_hba.c_stat) { 8332 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8333 "3332 Failed allocating per cpu hdwq stats\n"); 8334 rc = -ENOMEM; 8335 goto out_free_hba_idle_stat; 8336 } 8337 #endif 8338 8339 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8340 if (!phba->cmf_stat) { 8341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8342 "3331 Failed allocating per cpu cgn stats\n"); 8343 rc = -ENOMEM; 8344 goto out_free_hba_hdwq_info; 8345 } 8346 8347 /* 8348 * Enable sr-iov virtual functions if supported and configured 8349 * through the module parameter. 8350 */ 8351 if (phba->cfg_sriov_nr_virtfn > 0) { 8352 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8353 phba->cfg_sriov_nr_virtfn); 8354 if (rc) { 8355 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8356 "3020 Requested number of SR-IOV " 8357 "virtual functions (%d) is not " 8358 "supported\n", 8359 phba->cfg_sriov_nr_virtfn); 8360 phba->cfg_sriov_nr_virtfn = 0; 8361 } 8362 } 8363 8364 return 0; 8365 8366 out_free_hba_hdwq_info: 8367 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8368 free_percpu(phba->sli4_hba.c_stat); 8369 out_free_hba_idle_stat: 8370 #endif 8371 kfree(phba->sli4_hba.idle_stat); 8372 out_free_hba_eq_info: 8373 free_percpu(phba->sli4_hba.eq_info); 8374 out_free_hba_cpu_map: 8375 kfree(phba->sli4_hba.cpu_map); 8376 out_free_hba_eq_hdl: 8377 kfree(phba->sli4_hba.hba_eq_hdl); 8378 out_free_fcf_rr_bmask: 8379 kfree(phba->fcf.fcf_rr_bmask); 8380 out_remove_rpi_hdrs: 8381 lpfc_sli4_remove_rpi_hdrs(phba); 8382 out_free_active_sgl: 8383 lpfc_free_active_sgl(phba); 8384 out_destroy_cq_event_pool: 8385 lpfc_sli4_cq_event_pool_destroy(phba); 8386 out_free_cmd_rsp_buf: 8387 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8388 phba->lpfc_cmd_rsp_buf_pool = NULL; 8389 out_free_sg_dma_buf: 8390 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8391 phba->lpfc_sg_dma_buf_pool = NULL; 8392 out_free_bsmbx: 8393 lpfc_destroy_bootstrap_mbox(phba); 8394 out_free_mem: 8395 lpfc_mem_free(phba); 8396 return rc; 8397 } 8398 8399 /** 8400 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8401 * @phba: pointer to lpfc hba data structure. 8402 * 8403 * This routine is invoked to unset the driver internal resources set up 8404 * specific for supporting the SLI-4 HBA device it attached to. 8405 **/ 8406 static void 8407 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8408 { 8409 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8410 8411 free_percpu(phba->sli4_hba.eq_info); 8412 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8413 free_percpu(phba->sli4_hba.c_stat); 8414 #endif 8415 free_percpu(phba->cmf_stat); 8416 kfree(phba->sli4_hba.idle_stat); 8417 8418 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8419 kfree(phba->sli4_hba.cpu_map); 8420 phba->sli4_hba.num_possible_cpu = 0; 8421 phba->sli4_hba.num_present_cpu = 0; 8422 phba->sli4_hba.curr_disp_cpu = 0; 8423 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8424 8425 /* Free memory allocated for fast-path work queue handles */ 8426 kfree(phba->sli4_hba.hba_eq_hdl); 8427 8428 /* Free the allocated rpi headers. */ 8429 lpfc_sli4_remove_rpi_hdrs(phba); 8430 lpfc_sli4_remove_rpis(phba); 8431 8432 /* Free eligible FCF index bmask */ 8433 kfree(phba->fcf.fcf_rr_bmask); 8434 8435 /* Free the ELS sgl list */ 8436 lpfc_free_active_sgl(phba); 8437 lpfc_free_els_sgl_list(phba); 8438 lpfc_free_nvmet_sgl_list(phba); 8439 8440 /* Free the completion queue EQ event pool */ 8441 lpfc_sli4_cq_event_release_all(phba); 8442 lpfc_sli4_cq_event_pool_destroy(phba); 8443 8444 /* Release resource identifiers. */ 8445 lpfc_sli4_dealloc_resource_identifiers(phba); 8446 8447 /* Free the bsmbx region. */ 8448 lpfc_destroy_bootstrap_mbox(phba); 8449 8450 /* Free the SLI Layer memory with SLI4 HBAs */ 8451 lpfc_mem_free_all(phba); 8452 8453 /* Free the current connect table */ 8454 list_for_each_entry_safe(conn_entry, next_conn_entry, 8455 &phba->fcf_conn_rec_list, list) { 8456 list_del_init(&conn_entry->list); 8457 kfree(conn_entry); 8458 } 8459 8460 return; 8461 } 8462 8463 /** 8464 * lpfc_init_api_table_setup - Set up init api function jump table 8465 * @phba: The hba struct for which this call is being executed. 8466 * @dev_grp: The HBA PCI-Device group number. 8467 * 8468 * This routine sets up the device INIT interface API function jump table 8469 * in @phba struct. 8470 * 8471 * Returns: 0 - success, -ENODEV - failure. 8472 **/ 8473 int 8474 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8475 { 8476 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8477 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8478 phba->lpfc_selective_reset = lpfc_selective_reset; 8479 switch (dev_grp) { 8480 case LPFC_PCI_DEV_LP: 8481 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8482 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8483 phba->lpfc_stop_port = lpfc_stop_port_s3; 8484 break; 8485 case LPFC_PCI_DEV_OC: 8486 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8487 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8488 phba->lpfc_stop_port = lpfc_stop_port_s4; 8489 break; 8490 default: 8491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8492 "1431 Invalid HBA PCI-device group: 0x%x\n", 8493 dev_grp); 8494 return -ENODEV; 8495 } 8496 return 0; 8497 } 8498 8499 /** 8500 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8501 * @phba: pointer to lpfc hba data structure. 8502 * 8503 * This routine is invoked to set up the driver internal resources after the 8504 * device specific resource setup to support the HBA device it attached to. 8505 * 8506 * Return codes 8507 * 0 - successful 8508 * other values - error 8509 **/ 8510 static int 8511 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8512 { 8513 int error; 8514 8515 /* Startup the kernel thread for this host adapter. */ 8516 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8517 "lpfc_worker_%d", phba->brd_no); 8518 if (IS_ERR(phba->worker_thread)) { 8519 error = PTR_ERR(phba->worker_thread); 8520 return error; 8521 } 8522 8523 return 0; 8524 } 8525 8526 /** 8527 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8528 * @phba: pointer to lpfc hba data structure. 8529 * 8530 * This routine is invoked to unset the driver internal resources set up after 8531 * the device specific resource setup for supporting the HBA device it 8532 * attached to. 8533 **/ 8534 static void 8535 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8536 { 8537 if (phba->wq) { 8538 destroy_workqueue(phba->wq); 8539 phba->wq = NULL; 8540 } 8541 8542 /* Stop kernel worker thread */ 8543 if (phba->worker_thread) 8544 kthread_stop(phba->worker_thread); 8545 } 8546 8547 /** 8548 * lpfc_free_iocb_list - Free iocb list. 8549 * @phba: pointer to lpfc hba data structure. 8550 * 8551 * This routine is invoked to free the driver's IOCB list and memory. 8552 **/ 8553 void 8554 lpfc_free_iocb_list(struct lpfc_hba *phba) 8555 { 8556 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8557 8558 spin_lock_irq(&phba->hbalock); 8559 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8560 &phba->lpfc_iocb_list, list) { 8561 list_del(&iocbq_entry->list); 8562 kfree(iocbq_entry); 8563 phba->total_iocbq_bufs--; 8564 } 8565 spin_unlock_irq(&phba->hbalock); 8566 8567 return; 8568 } 8569 8570 /** 8571 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8572 * @phba: pointer to lpfc hba data structure. 8573 * @iocb_count: number of requested iocbs 8574 * 8575 * This routine is invoked to allocate and initizlize the driver's IOCB 8576 * list and set up the IOCB tag array accordingly. 8577 * 8578 * Return codes 8579 * 0 - successful 8580 * other values - error 8581 **/ 8582 int 8583 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8584 { 8585 struct lpfc_iocbq *iocbq_entry = NULL; 8586 uint16_t iotag; 8587 int i; 8588 8589 /* Initialize and populate the iocb list per host. */ 8590 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8591 for (i = 0; i < iocb_count; i++) { 8592 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8593 if (iocbq_entry == NULL) { 8594 printk(KERN_ERR "%s: only allocated %d iocbs of " 8595 "expected %d count. Unloading driver.\n", 8596 __func__, i, iocb_count); 8597 goto out_free_iocbq; 8598 } 8599 8600 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8601 if (iotag == 0) { 8602 kfree(iocbq_entry); 8603 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8604 "Unloading driver.\n", __func__); 8605 goto out_free_iocbq; 8606 } 8607 iocbq_entry->sli4_lxritag = NO_XRI; 8608 iocbq_entry->sli4_xritag = NO_XRI; 8609 8610 spin_lock_irq(&phba->hbalock); 8611 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8612 phba->total_iocbq_bufs++; 8613 spin_unlock_irq(&phba->hbalock); 8614 } 8615 8616 return 0; 8617 8618 out_free_iocbq: 8619 lpfc_free_iocb_list(phba); 8620 8621 return -ENOMEM; 8622 } 8623 8624 /** 8625 * lpfc_free_sgl_list - Free a given sgl list. 8626 * @phba: pointer to lpfc hba data structure. 8627 * @sglq_list: pointer to the head of sgl list. 8628 * 8629 * This routine is invoked to free a give sgl list and memory. 8630 **/ 8631 void 8632 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8633 { 8634 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8635 8636 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8637 list_del(&sglq_entry->list); 8638 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8639 kfree(sglq_entry); 8640 } 8641 } 8642 8643 /** 8644 * lpfc_free_els_sgl_list - Free els sgl list. 8645 * @phba: pointer to lpfc hba data structure. 8646 * 8647 * This routine is invoked to free the driver's els sgl list and memory. 8648 **/ 8649 static void 8650 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8651 { 8652 LIST_HEAD(sglq_list); 8653 8654 /* Retrieve all els sgls from driver list */ 8655 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8656 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8657 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8658 8659 /* Now free the sgl list */ 8660 lpfc_free_sgl_list(phba, &sglq_list); 8661 } 8662 8663 /** 8664 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8665 * @phba: pointer to lpfc hba data structure. 8666 * 8667 * This routine is invoked to free the driver's nvmet sgl list and memory. 8668 **/ 8669 static void 8670 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8671 { 8672 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8673 LIST_HEAD(sglq_list); 8674 8675 /* Retrieve all nvmet sgls from driver list */ 8676 spin_lock_irq(&phba->hbalock); 8677 spin_lock(&phba->sli4_hba.sgl_list_lock); 8678 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8679 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8680 spin_unlock_irq(&phba->hbalock); 8681 8682 /* Now free the sgl list */ 8683 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8684 list_del(&sglq_entry->list); 8685 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8686 kfree(sglq_entry); 8687 } 8688 8689 /* Update the nvmet_xri_cnt to reflect no current sgls. 8690 * The next initialization cycle sets the count and allocates 8691 * the sgls over again. 8692 */ 8693 phba->sli4_hba.nvmet_xri_cnt = 0; 8694 } 8695 8696 /** 8697 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8698 * @phba: pointer to lpfc hba data structure. 8699 * 8700 * This routine is invoked to allocate the driver's active sgl memory. 8701 * This array will hold the sglq_entry's for active IOs. 8702 **/ 8703 static int 8704 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8705 { 8706 int size; 8707 size = sizeof(struct lpfc_sglq *); 8708 size *= phba->sli4_hba.max_cfg_param.max_xri; 8709 8710 phba->sli4_hba.lpfc_sglq_active_list = 8711 kzalloc(size, GFP_KERNEL); 8712 if (!phba->sli4_hba.lpfc_sglq_active_list) 8713 return -ENOMEM; 8714 return 0; 8715 } 8716 8717 /** 8718 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8719 * @phba: pointer to lpfc hba data structure. 8720 * 8721 * This routine is invoked to walk through the array of active sglq entries 8722 * and free all of the resources. 8723 * This is just a place holder for now. 8724 **/ 8725 static void 8726 lpfc_free_active_sgl(struct lpfc_hba *phba) 8727 { 8728 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8729 } 8730 8731 /** 8732 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8733 * @phba: pointer to lpfc hba data structure. 8734 * 8735 * This routine is invoked to allocate and initizlize the driver's sgl 8736 * list and set up the sgl xritag tag array accordingly. 8737 * 8738 **/ 8739 static void 8740 lpfc_init_sgl_list(struct lpfc_hba *phba) 8741 { 8742 /* Initialize and populate the sglq list per host/VF. */ 8743 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8744 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8745 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8746 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8747 8748 /* els xri-sgl book keeping */ 8749 phba->sli4_hba.els_xri_cnt = 0; 8750 8751 /* nvme xri-buffer book keeping */ 8752 phba->sli4_hba.io_xri_cnt = 0; 8753 } 8754 8755 /** 8756 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8757 * @phba: pointer to lpfc hba data structure. 8758 * 8759 * This routine is invoked to post rpi header templates to the 8760 * port for those SLI4 ports that do not support extents. This routine 8761 * posts a PAGE_SIZE memory region to the port to hold up to 8762 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8763 * and should be called only when interrupts are disabled. 8764 * 8765 * Return codes 8766 * 0 - successful 8767 * -ERROR - otherwise. 8768 **/ 8769 int 8770 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8771 { 8772 int rc = 0; 8773 struct lpfc_rpi_hdr *rpi_hdr; 8774 8775 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8776 if (!phba->sli4_hba.rpi_hdrs_in_use) 8777 return rc; 8778 if (phba->sli4_hba.extents_in_use) 8779 return -EIO; 8780 8781 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8782 if (!rpi_hdr) { 8783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8784 "0391 Error during rpi post operation\n"); 8785 lpfc_sli4_remove_rpis(phba); 8786 rc = -ENODEV; 8787 } 8788 8789 return rc; 8790 } 8791 8792 /** 8793 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8794 * @phba: pointer to lpfc hba data structure. 8795 * 8796 * This routine is invoked to allocate a single 4KB memory region to 8797 * support rpis and stores them in the phba. This single region 8798 * provides support for up to 64 rpis. The region is used globally 8799 * by the device. 8800 * 8801 * Returns: 8802 * A valid rpi hdr on success. 8803 * A NULL pointer on any failure. 8804 **/ 8805 struct lpfc_rpi_hdr * 8806 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8807 { 8808 uint16_t rpi_limit, curr_rpi_range; 8809 struct lpfc_dmabuf *dmabuf; 8810 struct lpfc_rpi_hdr *rpi_hdr; 8811 8812 /* 8813 * If the SLI4 port supports extents, posting the rpi header isn't 8814 * required. Set the expected maximum count and let the actual value 8815 * get set when extents are fully allocated. 8816 */ 8817 if (!phba->sli4_hba.rpi_hdrs_in_use) 8818 return NULL; 8819 if (phba->sli4_hba.extents_in_use) 8820 return NULL; 8821 8822 /* The limit on the logical index is just the max_rpi count. */ 8823 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8824 8825 spin_lock_irq(&phba->hbalock); 8826 /* 8827 * Establish the starting RPI in this header block. The starting 8828 * rpi is normalized to a zero base because the physical rpi is 8829 * port based. 8830 */ 8831 curr_rpi_range = phba->sli4_hba.next_rpi; 8832 spin_unlock_irq(&phba->hbalock); 8833 8834 /* Reached full RPI range */ 8835 if (curr_rpi_range == rpi_limit) 8836 return NULL; 8837 8838 /* 8839 * First allocate the protocol header region for the port. The 8840 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8841 */ 8842 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8843 if (!dmabuf) 8844 return NULL; 8845 8846 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8847 LPFC_HDR_TEMPLATE_SIZE, 8848 &dmabuf->phys, GFP_KERNEL); 8849 if (!dmabuf->virt) { 8850 rpi_hdr = NULL; 8851 goto err_free_dmabuf; 8852 } 8853 8854 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8855 rpi_hdr = NULL; 8856 goto err_free_coherent; 8857 } 8858 8859 /* Save the rpi header data for cleanup later. */ 8860 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8861 if (!rpi_hdr) 8862 goto err_free_coherent; 8863 8864 rpi_hdr->dmabuf = dmabuf; 8865 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8866 rpi_hdr->page_count = 1; 8867 spin_lock_irq(&phba->hbalock); 8868 8869 /* The rpi_hdr stores the logical index only. */ 8870 rpi_hdr->start_rpi = curr_rpi_range; 8871 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8872 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8873 8874 spin_unlock_irq(&phba->hbalock); 8875 return rpi_hdr; 8876 8877 err_free_coherent: 8878 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8879 dmabuf->virt, dmabuf->phys); 8880 err_free_dmabuf: 8881 kfree(dmabuf); 8882 return NULL; 8883 } 8884 8885 /** 8886 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8887 * @phba: pointer to lpfc hba data structure. 8888 * 8889 * This routine is invoked to remove all memory resources allocated 8890 * to support rpis for SLI4 ports not supporting extents. This routine 8891 * presumes the caller has released all rpis consumed by fabric or port 8892 * logins and is prepared to have the header pages removed. 8893 **/ 8894 void 8895 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 8896 { 8897 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 8898 8899 if (!phba->sli4_hba.rpi_hdrs_in_use) 8900 goto exit; 8901 8902 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 8903 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 8904 list_del(&rpi_hdr->list); 8905 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 8906 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 8907 kfree(rpi_hdr->dmabuf); 8908 kfree(rpi_hdr); 8909 } 8910 exit: 8911 /* There are no rpis available to the port now. */ 8912 phba->sli4_hba.next_rpi = 0; 8913 } 8914 8915 /** 8916 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 8917 * @pdev: pointer to pci device data structure. 8918 * 8919 * This routine is invoked to allocate the driver hba data structure for an 8920 * HBA device. If the allocation is successful, the phba reference to the 8921 * PCI device data structure is set. 8922 * 8923 * Return codes 8924 * pointer to @phba - successful 8925 * NULL - error 8926 **/ 8927 static struct lpfc_hba * 8928 lpfc_hba_alloc(struct pci_dev *pdev) 8929 { 8930 struct lpfc_hba *phba; 8931 8932 /* Allocate memory for HBA structure */ 8933 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 8934 if (!phba) { 8935 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 8936 return NULL; 8937 } 8938 8939 /* Set reference to PCI device in HBA structure */ 8940 phba->pcidev = pdev; 8941 8942 /* Assign an unused board number */ 8943 phba->brd_no = lpfc_get_instance(); 8944 if (phba->brd_no < 0) { 8945 kfree(phba); 8946 return NULL; 8947 } 8948 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 8949 8950 spin_lock_init(&phba->ct_ev_lock); 8951 INIT_LIST_HEAD(&phba->ct_ev_waiters); 8952 8953 return phba; 8954 } 8955 8956 /** 8957 * lpfc_hba_free - Free driver hba data structure with a device. 8958 * @phba: pointer to lpfc hba data structure. 8959 * 8960 * This routine is invoked to free the driver hba data structure with an 8961 * HBA device. 8962 **/ 8963 static void 8964 lpfc_hba_free(struct lpfc_hba *phba) 8965 { 8966 if (phba->sli_rev == LPFC_SLI_REV4) 8967 kfree(phba->sli4_hba.hdwq); 8968 8969 /* Release the driver assigned board number */ 8970 idr_remove(&lpfc_hba_index, phba->brd_no); 8971 8972 /* Free memory allocated with sli3 rings */ 8973 kfree(phba->sli.sli3_ring); 8974 phba->sli.sli3_ring = NULL; 8975 8976 kfree(phba); 8977 return; 8978 } 8979 8980 /** 8981 * lpfc_create_shost - Create hba physical port with associated scsi host. 8982 * @phba: pointer to lpfc hba data structure. 8983 * 8984 * This routine is invoked to create HBA physical port and associate a SCSI 8985 * host with it. 8986 * 8987 * Return codes 8988 * 0 - successful 8989 * other values - error 8990 **/ 8991 static int 8992 lpfc_create_shost(struct lpfc_hba *phba) 8993 { 8994 struct lpfc_vport *vport; 8995 struct Scsi_Host *shost; 8996 8997 /* Initialize HBA FC structure */ 8998 phba->fc_edtov = FF_DEF_EDTOV; 8999 phba->fc_ratov = FF_DEF_RATOV; 9000 phba->fc_altov = FF_DEF_ALTOV; 9001 phba->fc_arbtov = FF_DEF_ARBTOV; 9002 9003 atomic_set(&phba->sdev_cnt, 0); 9004 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 9005 if (!vport) 9006 return -ENODEV; 9007 9008 shost = lpfc_shost_from_vport(vport); 9009 phba->pport = vport; 9010 9011 if (phba->nvmet_support) { 9012 /* Only 1 vport (pport) will support NVME target */ 9013 phba->targetport = NULL; 9014 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 9015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 9016 "6076 NVME Target Found\n"); 9017 } 9018 9019 lpfc_debugfs_initialize(vport); 9020 /* Put reference to SCSI host to driver's device private data */ 9021 pci_set_drvdata(phba->pcidev, shost); 9022 9023 /* 9024 * At this point we are fully registered with PSA. In addition, 9025 * any initial discovery should be completed. 9026 */ 9027 vport->load_flag |= FC_ALLOW_FDMI; 9028 if (phba->cfg_enable_SmartSAN || 9029 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 9030 9031 /* Setup appropriate attribute masks */ 9032 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 9033 if (phba->cfg_enable_SmartSAN) 9034 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 9035 else 9036 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 9037 } 9038 return 0; 9039 } 9040 9041 /** 9042 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 9043 * @phba: pointer to lpfc hba data structure. 9044 * 9045 * This routine is invoked to destroy HBA physical port and the associated 9046 * SCSI host. 9047 **/ 9048 static void 9049 lpfc_destroy_shost(struct lpfc_hba *phba) 9050 { 9051 struct lpfc_vport *vport = phba->pport; 9052 9053 /* Destroy physical port that associated with the SCSI host */ 9054 destroy_port(vport); 9055 9056 return; 9057 } 9058 9059 /** 9060 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9061 * @phba: pointer to lpfc hba data structure. 9062 * @shost: the shost to be used to detect Block guard settings. 9063 * 9064 * This routine sets up the local Block guard protocol settings for @shost. 9065 * This routine also allocates memory for debugging bg buffers. 9066 **/ 9067 static void 9068 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9069 { 9070 uint32_t old_mask; 9071 uint32_t old_guard; 9072 9073 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9074 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9075 "1478 Registering BlockGuard with the " 9076 "SCSI layer\n"); 9077 9078 old_mask = phba->cfg_prot_mask; 9079 old_guard = phba->cfg_prot_guard; 9080 9081 /* Only allow supported values */ 9082 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9083 SHOST_DIX_TYPE0_PROTECTION | 9084 SHOST_DIX_TYPE1_PROTECTION); 9085 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9086 SHOST_DIX_GUARD_CRC); 9087 9088 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9089 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9090 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9091 9092 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9093 if ((old_mask != phba->cfg_prot_mask) || 9094 (old_guard != phba->cfg_prot_guard)) 9095 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9096 "1475 Registering BlockGuard with the " 9097 "SCSI layer: mask %d guard %d\n", 9098 phba->cfg_prot_mask, 9099 phba->cfg_prot_guard); 9100 9101 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9102 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9103 } else 9104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9105 "1479 Not Registering BlockGuard with the SCSI " 9106 "layer, Bad protection parameters: %d %d\n", 9107 old_mask, old_guard); 9108 } 9109 } 9110 9111 /** 9112 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9113 * @phba: pointer to lpfc hba data structure. 9114 * 9115 * This routine is invoked to perform all the necessary post initialization 9116 * setup for the device. 9117 **/ 9118 static void 9119 lpfc_post_init_setup(struct lpfc_hba *phba) 9120 { 9121 struct Scsi_Host *shost; 9122 struct lpfc_adapter_event_header adapter_event; 9123 9124 /* Get the default values for Model Name and Description */ 9125 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9126 9127 /* 9128 * hba setup may have changed the hba_queue_depth so we need to 9129 * adjust the value of can_queue. 9130 */ 9131 shost = pci_get_drvdata(phba->pcidev); 9132 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9133 9134 lpfc_host_attrib_init(shost); 9135 9136 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9137 spin_lock_irq(shost->host_lock); 9138 lpfc_poll_start_timer(phba); 9139 spin_unlock_irq(shost->host_lock); 9140 } 9141 9142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9143 "0428 Perform SCSI scan\n"); 9144 /* Send board arrival event to upper layer */ 9145 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9146 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9147 fc_host_post_vendor_event(shost, fc_get_event_number(), 9148 sizeof(adapter_event), 9149 (char *) &adapter_event, 9150 LPFC_NL_VENDOR_ID); 9151 return; 9152 } 9153 9154 /** 9155 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9156 * @phba: pointer to lpfc hba data structure. 9157 * 9158 * This routine is invoked to set up the PCI device memory space for device 9159 * with SLI-3 interface spec. 9160 * 9161 * Return codes 9162 * 0 - successful 9163 * other values - error 9164 **/ 9165 static int 9166 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9167 { 9168 struct pci_dev *pdev = phba->pcidev; 9169 unsigned long bar0map_len, bar2map_len; 9170 int i, hbq_count; 9171 void *ptr; 9172 int error; 9173 9174 if (!pdev) 9175 return -ENODEV; 9176 9177 /* Set the device DMA mask size */ 9178 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9179 if (error) 9180 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9181 if (error) 9182 return error; 9183 error = -ENODEV; 9184 9185 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9186 * required by each mapping. 9187 */ 9188 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9189 bar0map_len = pci_resource_len(pdev, 0); 9190 9191 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9192 bar2map_len = pci_resource_len(pdev, 2); 9193 9194 /* Map HBA SLIM to a kernel virtual address. */ 9195 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9196 if (!phba->slim_memmap_p) { 9197 dev_printk(KERN_ERR, &pdev->dev, 9198 "ioremap failed for SLIM memory.\n"); 9199 goto out; 9200 } 9201 9202 /* Map HBA Control Registers to a kernel virtual address. */ 9203 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9204 if (!phba->ctrl_regs_memmap_p) { 9205 dev_printk(KERN_ERR, &pdev->dev, 9206 "ioremap failed for HBA control registers.\n"); 9207 goto out_iounmap_slim; 9208 } 9209 9210 /* Allocate memory for SLI-2 structures */ 9211 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9212 &phba->slim2p.phys, GFP_KERNEL); 9213 if (!phba->slim2p.virt) 9214 goto out_iounmap; 9215 9216 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9217 phba->mbox_ext = (phba->slim2p.virt + 9218 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9219 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9220 phba->IOCBs = (phba->slim2p.virt + 9221 offsetof(struct lpfc_sli2_slim, IOCBs)); 9222 9223 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9224 lpfc_sli_hbq_size(), 9225 &phba->hbqslimp.phys, 9226 GFP_KERNEL); 9227 if (!phba->hbqslimp.virt) 9228 goto out_free_slim; 9229 9230 hbq_count = lpfc_sli_hbq_count(); 9231 ptr = phba->hbqslimp.virt; 9232 for (i = 0; i < hbq_count; ++i) { 9233 phba->hbqs[i].hbq_virt = ptr; 9234 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9235 ptr += (lpfc_hbq_defs[i]->entry_count * 9236 sizeof(struct lpfc_hbq_entry)); 9237 } 9238 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9239 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9240 9241 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9242 9243 phba->MBslimaddr = phba->slim_memmap_p; 9244 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9245 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9246 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9247 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9248 9249 return 0; 9250 9251 out_free_slim: 9252 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9253 phba->slim2p.virt, phba->slim2p.phys); 9254 out_iounmap: 9255 iounmap(phba->ctrl_regs_memmap_p); 9256 out_iounmap_slim: 9257 iounmap(phba->slim_memmap_p); 9258 out: 9259 return error; 9260 } 9261 9262 /** 9263 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9264 * @phba: pointer to lpfc hba data structure. 9265 * 9266 * This routine is invoked to unset the PCI device memory space for device 9267 * with SLI-3 interface spec. 9268 **/ 9269 static void 9270 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9271 { 9272 struct pci_dev *pdev; 9273 9274 /* Obtain PCI device reference */ 9275 if (!phba->pcidev) 9276 return; 9277 else 9278 pdev = phba->pcidev; 9279 9280 /* Free coherent DMA memory allocated */ 9281 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9282 phba->hbqslimp.virt, phba->hbqslimp.phys); 9283 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9284 phba->slim2p.virt, phba->slim2p.phys); 9285 9286 /* I/O memory unmap */ 9287 iounmap(phba->ctrl_regs_memmap_p); 9288 iounmap(phba->slim_memmap_p); 9289 9290 return; 9291 } 9292 9293 /** 9294 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9295 * @phba: pointer to lpfc hba data structure. 9296 * 9297 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9298 * done and check status. 9299 * 9300 * Return 0 if successful, otherwise -ENODEV. 9301 **/ 9302 int 9303 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9304 { 9305 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9306 struct lpfc_register reg_data; 9307 int i, port_error = 0; 9308 uint32_t if_type; 9309 9310 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9311 memset(®_data, 0, sizeof(reg_data)); 9312 if (!phba->sli4_hba.PSMPHRregaddr) 9313 return -ENODEV; 9314 9315 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9316 for (i = 0; i < 3000; i++) { 9317 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9318 &portsmphr_reg.word0) || 9319 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9320 /* Port has a fatal POST error, break out */ 9321 port_error = -ENODEV; 9322 break; 9323 } 9324 if (LPFC_POST_STAGE_PORT_READY == 9325 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9326 break; 9327 msleep(10); 9328 } 9329 9330 /* 9331 * If there was a port error during POST, then don't proceed with 9332 * other register reads as the data may not be valid. Just exit. 9333 */ 9334 if (port_error) { 9335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9336 "1408 Port Failed POST - portsmphr=0x%x, " 9337 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9338 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9339 portsmphr_reg.word0, 9340 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9341 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9342 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9343 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9344 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9345 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9346 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9347 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9348 } else { 9349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9350 "2534 Device Info: SLIFamily=0x%x, " 9351 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9352 "SLIHint_2=0x%x, FT=0x%x\n", 9353 bf_get(lpfc_sli_intf_sli_family, 9354 &phba->sli4_hba.sli_intf), 9355 bf_get(lpfc_sli_intf_slirev, 9356 &phba->sli4_hba.sli_intf), 9357 bf_get(lpfc_sli_intf_if_type, 9358 &phba->sli4_hba.sli_intf), 9359 bf_get(lpfc_sli_intf_sli_hint1, 9360 &phba->sli4_hba.sli_intf), 9361 bf_get(lpfc_sli_intf_sli_hint2, 9362 &phba->sli4_hba.sli_intf), 9363 bf_get(lpfc_sli_intf_func_type, 9364 &phba->sli4_hba.sli_intf)); 9365 /* 9366 * Check for other Port errors during the initialization 9367 * process. Fail the load if the port did not come up 9368 * correctly. 9369 */ 9370 if_type = bf_get(lpfc_sli_intf_if_type, 9371 &phba->sli4_hba.sli_intf); 9372 switch (if_type) { 9373 case LPFC_SLI_INTF_IF_TYPE_0: 9374 phba->sli4_hba.ue_mask_lo = 9375 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9376 phba->sli4_hba.ue_mask_hi = 9377 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9378 uerrlo_reg.word0 = 9379 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9380 uerrhi_reg.word0 = 9381 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9382 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9383 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9384 lpfc_printf_log(phba, KERN_ERR, 9385 LOG_TRACE_EVENT, 9386 "1422 Unrecoverable Error " 9387 "Detected during POST " 9388 "uerr_lo_reg=0x%x, " 9389 "uerr_hi_reg=0x%x, " 9390 "ue_mask_lo_reg=0x%x, " 9391 "ue_mask_hi_reg=0x%x\n", 9392 uerrlo_reg.word0, 9393 uerrhi_reg.word0, 9394 phba->sli4_hba.ue_mask_lo, 9395 phba->sli4_hba.ue_mask_hi); 9396 port_error = -ENODEV; 9397 } 9398 break; 9399 case LPFC_SLI_INTF_IF_TYPE_2: 9400 case LPFC_SLI_INTF_IF_TYPE_6: 9401 /* Final checks. The port status should be clean. */ 9402 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9403 ®_data.word0) || 9404 (bf_get(lpfc_sliport_status_err, ®_data) && 9405 !bf_get(lpfc_sliport_status_rn, ®_data))) { 9406 phba->work_status[0] = 9407 readl(phba->sli4_hba.u.if_type2. 9408 ERR1regaddr); 9409 phba->work_status[1] = 9410 readl(phba->sli4_hba.u.if_type2. 9411 ERR2regaddr); 9412 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9413 "2888 Unrecoverable port error " 9414 "following POST: port status reg " 9415 "0x%x, port_smphr reg 0x%x, " 9416 "error 1=0x%x, error 2=0x%x\n", 9417 reg_data.word0, 9418 portsmphr_reg.word0, 9419 phba->work_status[0], 9420 phba->work_status[1]); 9421 port_error = -ENODEV; 9422 break; 9423 } 9424 9425 if (lpfc_pldv_detect && 9426 bf_get(lpfc_sli_intf_sli_family, 9427 &phba->sli4_hba.sli_intf) == 9428 LPFC_SLI_INTF_FAMILY_G6) 9429 pci_write_config_byte(phba->pcidev, 9430 LPFC_SLI_INTF, CFG_PLD); 9431 break; 9432 case LPFC_SLI_INTF_IF_TYPE_1: 9433 default: 9434 break; 9435 } 9436 } 9437 return port_error; 9438 } 9439 9440 /** 9441 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9442 * @phba: pointer to lpfc hba data structure. 9443 * @if_type: The SLI4 interface type getting configured. 9444 * 9445 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9446 * memory map. 9447 **/ 9448 static void 9449 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9450 { 9451 switch (if_type) { 9452 case LPFC_SLI_INTF_IF_TYPE_0: 9453 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9454 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9455 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9456 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9457 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9458 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9459 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9460 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9461 phba->sli4_hba.SLIINTFregaddr = 9462 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9463 break; 9464 case LPFC_SLI_INTF_IF_TYPE_2: 9465 phba->sli4_hba.u.if_type2.EQDregaddr = 9466 phba->sli4_hba.conf_regs_memmap_p + 9467 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9468 phba->sli4_hba.u.if_type2.ERR1regaddr = 9469 phba->sli4_hba.conf_regs_memmap_p + 9470 LPFC_CTL_PORT_ER1_OFFSET; 9471 phba->sli4_hba.u.if_type2.ERR2regaddr = 9472 phba->sli4_hba.conf_regs_memmap_p + 9473 LPFC_CTL_PORT_ER2_OFFSET; 9474 phba->sli4_hba.u.if_type2.CTRLregaddr = 9475 phba->sli4_hba.conf_regs_memmap_p + 9476 LPFC_CTL_PORT_CTL_OFFSET; 9477 phba->sli4_hba.u.if_type2.STATUSregaddr = 9478 phba->sli4_hba.conf_regs_memmap_p + 9479 LPFC_CTL_PORT_STA_OFFSET; 9480 phba->sli4_hba.SLIINTFregaddr = 9481 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9482 phba->sli4_hba.PSMPHRregaddr = 9483 phba->sli4_hba.conf_regs_memmap_p + 9484 LPFC_CTL_PORT_SEM_OFFSET; 9485 phba->sli4_hba.RQDBregaddr = 9486 phba->sli4_hba.conf_regs_memmap_p + 9487 LPFC_ULP0_RQ_DOORBELL; 9488 phba->sli4_hba.WQDBregaddr = 9489 phba->sli4_hba.conf_regs_memmap_p + 9490 LPFC_ULP0_WQ_DOORBELL; 9491 phba->sli4_hba.CQDBregaddr = 9492 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9493 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9494 phba->sli4_hba.MQDBregaddr = 9495 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9496 phba->sli4_hba.BMBXregaddr = 9497 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9498 break; 9499 case LPFC_SLI_INTF_IF_TYPE_6: 9500 phba->sli4_hba.u.if_type2.EQDregaddr = 9501 phba->sli4_hba.conf_regs_memmap_p + 9502 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9503 phba->sli4_hba.u.if_type2.ERR1regaddr = 9504 phba->sli4_hba.conf_regs_memmap_p + 9505 LPFC_CTL_PORT_ER1_OFFSET; 9506 phba->sli4_hba.u.if_type2.ERR2regaddr = 9507 phba->sli4_hba.conf_regs_memmap_p + 9508 LPFC_CTL_PORT_ER2_OFFSET; 9509 phba->sli4_hba.u.if_type2.CTRLregaddr = 9510 phba->sli4_hba.conf_regs_memmap_p + 9511 LPFC_CTL_PORT_CTL_OFFSET; 9512 phba->sli4_hba.u.if_type2.STATUSregaddr = 9513 phba->sli4_hba.conf_regs_memmap_p + 9514 LPFC_CTL_PORT_STA_OFFSET; 9515 phba->sli4_hba.PSMPHRregaddr = 9516 phba->sli4_hba.conf_regs_memmap_p + 9517 LPFC_CTL_PORT_SEM_OFFSET; 9518 phba->sli4_hba.BMBXregaddr = 9519 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9520 break; 9521 case LPFC_SLI_INTF_IF_TYPE_1: 9522 default: 9523 dev_printk(KERN_ERR, &phba->pcidev->dev, 9524 "FATAL - unsupported SLI4 interface type - %d\n", 9525 if_type); 9526 break; 9527 } 9528 } 9529 9530 /** 9531 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9532 * @phba: pointer to lpfc hba data structure. 9533 * @if_type: sli if type to operate on. 9534 * 9535 * This routine is invoked to set up SLI4 BAR1 register memory map. 9536 **/ 9537 static void 9538 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9539 { 9540 switch (if_type) { 9541 case LPFC_SLI_INTF_IF_TYPE_0: 9542 phba->sli4_hba.PSMPHRregaddr = 9543 phba->sli4_hba.ctrl_regs_memmap_p + 9544 LPFC_SLIPORT_IF0_SMPHR; 9545 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9546 LPFC_HST_ISR0; 9547 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9548 LPFC_HST_IMR0; 9549 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9550 LPFC_HST_ISCR0; 9551 break; 9552 case LPFC_SLI_INTF_IF_TYPE_6: 9553 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9554 LPFC_IF6_RQ_DOORBELL; 9555 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9556 LPFC_IF6_WQ_DOORBELL; 9557 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9558 LPFC_IF6_CQ_DOORBELL; 9559 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9560 LPFC_IF6_EQ_DOORBELL; 9561 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9562 LPFC_IF6_MQ_DOORBELL; 9563 break; 9564 case LPFC_SLI_INTF_IF_TYPE_2: 9565 case LPFC_SLI_INTF_IF_TYPE_1: 9566 default: 9567 dev_err(&phba->pcidev->dev, 9568 "FATAL - unsupported SLI4 interface type - %d\n", 9569 if_type); 9570 break; 9571 } 9572 } 9573 9574 /** 9575 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9576 * @phba: pointer to lpfc hba data structure. 9577 * @vf: virtual function number 9578 * 9579 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9580 * based on the given viftual function number, @vf. 9581 * 9582 * Return 0 if successful, otherwise -ENODEV. 9583 **/ 9584 static int 9585 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9586 { 9587 if (vf > LPFC_VIR_FUNC_MAX) 9588 return -ENODEV; 9589 9590 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9591 vf * LPFC_VFR_PAGE_SIZE + 9592 LPFC_ULP0_RQ_DOORBELL); 9593 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9594 vf * LPFC_VFR_PAGE_SIZE + 9595 LPFC_ULP0_WQ_DOORBELL); 9596 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9597 vf * LPFC_VFR_PAGE_SIZE + 9598 LPFC_EQCQ_DOORBELL); 9599 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9600 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9601 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9602 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9603 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9604 return 0; 9605 } 9606 9607 /** 9608 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9609 * @phba: pointer to lpfc hba data structure. 9610 * 9611 * This routine is invoked to create the bootstrap mailbox 9612 * region consistent with the SLI-4 interface spec. This 9613 * routine allocates all memory necessary to communicate 9614 * mailbox commands to the port and sets up all alignment 9615 * needs. No locks are expected to be held when calling 9616 * this routine. 9617 * 9618 * Return codes 9619 * 0 - successful 9620 * -ENOMEM - could not allocated memory. 9621 **/ 9622 static int 9623 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9624 { 9625 uint32_t bmbx_size; 9626 struct lpfc_dmabuf *dmabuf; 9627 struct dma_address *dma_address; 9628 uint32_t pa_addr; 9629 uint64_t phys_addr; 9630 9631 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9632 if (!dmabuf) 9633 return -ENOMEM; 9634 9635 /* 9636 * The bootstrap mailbox region is comprised of 2 parts 9637 * plus an alignment restriction of 16 bytes. 9638 */ 9639 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9640 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9641 &dmabuf->phys, GFP_KERNEL); 9642 if (!dmabuf->virt) { 9643 kfree(dmabuf); 9644 return -ENOMEM; 9645 } 9646 9647 /* 9648 * Initialize the bootstrap mailbox pointers now so that the register 9649 * operations are simple later. The mailbox dma address is required 9650 * to be 16-byte aligned. Also align the virtual memory as each 9651 * maibox is copied into the bmbx mailbox region before issuing the 9652 * command to the port. 9653 */ 9654 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9655 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9656 9657 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9658 LPFC_ALIGN_16_BYTE); 9659 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9660 LPFC_ALIGN_16_BYTE); 9661 9662 /* 9663 * Set the high and low physical addresses now. The SLI4 alignment 9664 * requirement is 16 bytes and the mailbox is posted to the port 9665 * as two 30-bit addresses. The other data is a bit marking whether 9666 * the 30-bit address is the high or low address. 9667 * Upcast bmbx aphys to 64bits so shift instruction compiles 9668 * clean on 32 bit machines. 9669 */ 9670 dma_address = &phba->sli4_hba.bmbx.dma_address; 9671 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9672 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9673 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9674 LPFC_BMBX_BIT1_ADDR_HI); 9675 9676 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9677 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9678 LPFC_BMBX_BIT1_ADDR_LO); 9679 return 0; 9680 } 9681 9682 /** 9683 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9684 * @phba: pointer to lpfc hba data structure. 9685 * 9686 * This routine is invoked to teardown the bootstrap mailbox 9687 * region and release all host resources. This routine requires 9688 * the caller to ensure all mailbox commands recovered, no 9689 * additional mailbox comands are sent, and interrupts are disabled 9690 * before calling this routine. 9691 * 9692 **/ 9693 static void 9694 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9695 { 9696 dma_free_coherent(&phba->pcidev->dev, 9697 phba->sli4_hba.bmbx.bmbx_size, 9698 phba->sli4_hba.bmbx.dmabuf->virt, 9699 phba->sli4_hba.bmbx.dmabuf->phys); 9700 9701 kfree(phba->sli4_hba.bmbx.dmabuf); 9702 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9703 } 9704 9705 static const char * const lpfc_topo_to_str[] = { 9706 "Loop then P2P", 9707 "Loopback", 9708 "P2P Only", 9709 "Unsupported", 9710 "Loop Only", 9711 "Unsupported", 9712 "P2P then Loop", 9713 }; 9714 9715 #define LINK_FLAGS_DEF 0x0 9716 #define LINK_FLAGS_P2P 0x1 9717 #define LINK_FLAGS_LOOP 0x2 9718 /** 9719 * lpfc_map_topology - Map the topology read from READ_CONFIG 9720 * @phba: pointer to lpfc hba data structure. 9721 * @rd_config: pointer to read config data 9722 * 9723 * This routine is invoked to map the topology values as read 9724 * from the read config mailbox command. If the persistent 9725 * topology feature is supported, the firmware will provide the 9726 * saved topology information to be used in INIT_LINK 9727 **/ 9728 static void 9729 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9730 { 9731 u8 ptv, tf, pt; 9732 9733 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9734 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9735 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9736 9737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9738 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9739 ptv, tf, pt); 9740 if (!ptv) { 9741 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9742 "2019 FW does not support persistent topology " 9743 "Using driver parameter defined value [%s]", 9744 lpfc_topo_to_str[phba->cfg_topology]); 9745 return; 9746 } 9747 /* FW supports persistent topology - override module parameter value */ 9748 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9749 9750 /* if ASIC_GEN_NUM >= 0xC) */ 9751 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9752 LPFC_SLI_INTF_IF_TYPE_6) || 9753 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9754 LPFC_SLI_INTF_FAMILY_G6)) { 9755 if (!tf) { 9756 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9757 ? FLAGS_TOPOLOGY_MODE_LOOP 9758 : FLAGS_TOPOLOGY_MODE_PT_PT); 9759 } else { 9760 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9761 } 9762 } else { /* G5 */ 9763 if (tf) { 9764 /* If topology failover set - pt is '0' or '1' */ 9765 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9766 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9767 } else { 9768 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9769 ? FLAGS_TOPOLOGY_MODE_PT_PT 9770 : FLAGS_TOPOLOGY_MODE_LOOP); 9771 } 9772 } 9773 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9774 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9775 "2020 Using persistent topology value [%s]", 9776 lpfc_topo_to_str[phba->cfg_topology]); 9777 } else { 9778 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9779 "2021 Invalid topology values from FW " 9780 "Using driver parameter defined value [%s]", 9781 lpfc_topo_to_str[phba->cfg_topology]); 9782 } 9783 } 9784 9785 /** 9786 * lpfc_sli4_read_config - Get the config parameters. 9787 * @phba: pointer to lpfc hba data structure. 9788 * 9789 * This routine is invoked to read the configuration parameters from the HBA. 9790 * The configuration parameters are used to set the base and maximum values 9791 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9792 * allocation for the port. 9793 * 9794 * Return codes 9795 * 0 - successful 9796 * -ENOMEM - No available memory 9797 * -EIO - The mailbox failed to complete successfully. 9798 **/ 9799 int 9800 lpfc_sli4_read_config(struct lpfc_hba *phba) 9801 { 9802 LPFC_MBOXQ_t *pmb; 9803 struct lpfc_mbx_read_config *rd_config; 9804 union lpfc_sli4_cfg_shdr *shdr; 9805 uint32_t shdr_status, shdr_add_status; 9806 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9807 struct lpfc_rsrc_desc_fcfcoe *desc; 9808 char *pdesc_0; 9809 uint16_t forced_link_speed; 9810 uint32_t if_type, qmin; 9811 int length, i, rc = 0, rc2; 9812 9813 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9814 if (!pmb) { 9815 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9816 "2011 Unable to allocate memory for issuing " 9817 "SLI_CONFIG_SPECIAL mailbox command\n"); 9818 return -ENOMEM; 9819 } 9820 9821 lpfc_read_config(phba, pmb); 9822 9823 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9824 if (rc != MBX_SUCCESS) { 9825 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9826 "2012 Mailbox failed , mbxCmd x%x " 9827 "READ_CONFIG, mbxStatus x%x\n", 9828 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9829 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9830 rc = -EIO; 9831 } else { 9832 rd_config = &pmb->u.mqe.un.rd_config; 9833 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9834 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9835 phba->sli4_hba.lnk_info.lnk_tp = 9836 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9837 phba->sli4_hba.lnk_info.lnk_no = 9838 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9839 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9840 "3081 lnk_type:%d, lnk_numb:%d\n", 9841 phba->sli4_hba.lnk_info.lnk_tp, 9842 phba->sli4_hba.lnk_info.lnk_no); 9843 } else 9844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9845 "3082 Mailbox (x%x) returned ldv:x0\n", 9846 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9847 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9848 phba->bbcredit_support = 1; 9849 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9850 } 9851 9852 phba->sli4_hba.conf_trunk = 9853 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9854 phba->sli4_hba.extents_in_use = 9855 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9856 phba->sli4_hba.max_cfg_param.max_xri = 9857 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9858 /* Reduce resource usage in kdump environment */ 9859 if (is_kdump_kernel() && 9860 phba->sli4_hba.max_cfg_param.max_xri > 512) 9861 phba->sli4_hba.max_cfg_param.max_xri = 512; 9862 phba->sli4_hba.max_cfg_param.xri_base = 9863 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 9864 phba->sli4_hba.max_cfg_param.max_vpi = 9865 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 9866 /* Limit the max we support */ 9867 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 9868 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 9869 phba->sli4_hba.max_cfg_param.vpi_base = 9870 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 9871 phba->sli4_hba.max_cfg_param.max_rpi = 9872 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 9873 phba->sli4_hba.max_cfg_param.rpi_base = 9874 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 9875 phba->sli4_hba.max_cfg_param.max_vfi = 9876 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 9877 phba->sli4_hba.max_cfg_param.vfi_base = 9878 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 9879 phba->sli4_hba.max_cfg_param.max_fcfi = 9880 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 9881 phba->sli4_hba.max_cfg_param.max_eq = 9882 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 9883 phba->sli4_hba.max_cfg_param.max_rq = 9884 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 9885 phba->sli4_hba.max_cfg_param.max_wq = 9886 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 9887 phba->sli4_hba.max_cfg_param.max_cq = 9888 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 9889 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 9890 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 9891 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 9892 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 9893 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 9894 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 9895 phba->max_vports = phba->max_vpi; 9896 9897 /* Next decide on FPIN or Signal E2E CGN support 9898 * For congestion alarms and warnings valid combination are: 9899 * 1. FPIN alarms / FPIN warnings 9900 * 2. Signal alarms / Signal warnings 9901 * 3. FPIN alarms / Signal warnings 9902 * 4. Signal alarms / FPIN warnings 9903 * 9904 * Initialize the adapter frequency to 100 mSecs 9905 */ 9906 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9907 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9908 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9909 9910 if (lpfc_use_cgn_signal) { 9911 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 9912 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 9913 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 9914 } 9915 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 9916 /* MUST support both alarm and warning 9917 * because EDC does not support alarm alone. 9918 */ 9919 if (phba->cgn_reg_signal != 9920 EDC_CG_SIG_WARN_ONLY) { 9921 /* Must support both or none */ 9922 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9923 phba->cgn_reg_signal = 9924 EDC_CG_SIG_NOTSUPPORTED; 9925 } else { 9926 phba->cgn_reg_signal = 9927 EDC_CG_SIG_WARN_ALARM; 9928 phba->cgn_reg_fpin = 9929 LPFC_CGN_FPIN_NONE; 9930 } 9931 } 9932 } 9933 9934 /* Set the congestion initial signal and fpin values. */ 9935 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 9936 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 9937 9938 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 9939 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 9940 phba->cgn_reg_signal, phba->cgn_reg_fpin); 9941 9942 lpfc_map_topology(phba, rd_config); 9943 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9944 "2003 cfg params Extents? %d " 9945 "XRI(B:%d M:%d), " 9946 "VPI(B:%d M:%d) " 9947 "VFI(B:%d M:%d) " 9948 "RPI(B:%d M:%d) " 9949 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 9950 phba->sli4_hba.extents_in_use, 9951 phba->sli4_hba.max_cfg_param.xri_base, 9952 phba->sli4_hba.max_cfg_param.max_xri, 9953 phba->sli4_hba.max_cfg_param.vpi_base, 9954 phba->sli4_hba.max_cfg_param.max_vpi, 9955 phba->sli4_hba.max_cfg_param.vfi_base, 9956 phba->sli4_hba.max_cfg_param.max_vfi, 9957 phba->sli4_hba.max_cfg_param.rpi_base, 9958 phba->sli4_hba.max_cfg_param.max_rpi, 9959 phba->sli4_hba.max_cfg_param.max_fcfi, 9960 phba->sli4_hba.max_cfg_param.max_eq, 9961 phba->sli4_hba.max_cfg_param.max_cq, 9962 phba->sli4_hba.max_cfg_param.max_wq, 9963 phba->sli4_hba.max_cfg_param.max_rq, 9964 phba->lmt); 9965 9966 /* 9967 * Calculate queue resources based on how 9968 * many WQ/CQ/EQs are available. 9969 */ 9970 qmin = phba->sli4_hba.max_cfg_param.max_wq; 9971 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 9972 qmin = phba->sli4_hba.max_cfg_param.max_cq; 9973 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 9974 qmin = phba->sli4_hba.max_cfg_param.max_eq; 9975 /* 9976 * Whats left after this can go toward NVME / FCP. 9977 * The minus 4 accounts for ELS, NVME LS, MBOX 9978 * plus one extra. When configured for 9979 * NVMET, FCP io channel WQs are not created. 9980 */ 9981 qmin -= 4; 9982 9983 /* Check to see if there is enough for NVME */ 9984 if ((phba->cfg_irq_chann > qmin) || 9985 (phba->cfg_hdw_queue > qmin)) { 9986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9987 "2005 Reducing Queues - " 9988 "FW resource limitation: " 9989 "WQ %d CQ %d EQ %d: min %d: " 9990 "IRQ %d HDWQ %d\n", 9991 phba->sli4_hba.max_cfg_param.max_wq, 9992 phba->sli4_hba.max_cfg_param.max_cq, 9993 phba->sli4_hba.max_cfg_param.max_eq, 9994 qmin, phba->cfg_irq_chann, 9995 phba->cfg_hdw_queue); 9996 9997 if (phba->cfg_irq_chann > qmin) 9998 phba->cfg_irq_chann = qmin; 9999 if (phba->cfg_hdw_queue > qmin) 10000 phba->cfg_hdw_queue = qmin; 10001 } 10002 } 10003 10004 if (rc) 10005 goto read_cfg_out; 10006 10007 /* Update link speed if forced link speed is supported */ 10008 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10009 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10010 forced_link_speed = 10011 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10012 if (forced_link_speed) { 10013 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10014 10015 switch (forced_link_speed) { 10016 case LINK_SPEED_1G: 10017 phba->cfg_link_speed = 10018 LPFC_USER_LINK_SPEED_1G; 10019 break; 10020 case LINK_SPEED_2G: 10021 phba->cfg_link_speed = 10022 LPFC_USER_LINK_SPEED_2G; 10023 break; 10024 case LINK_SPEED_4G: 10025 phba->cfg_link_speed = 10026 LPFC_USER_LINK_SPEED_4G; 10027 break; 10028 case LINK_SPEED_8G: 10029 phba->cfg_link_speed = 10030 LPFC_USER_LINK_SPEED_8G; 10031 break; 10032 case LINK_SPEED_10G: 10033 phba->cfg_link_speed = 10034 LPFC_USER_LINK_SPEED_10G; 10035 break; 10036 case LINK_SPEED_16G: 10037 phba->cfg_link_speed = 10038 LPFC_USER_LINK_SPEED_16G; 10039 break; 10040 case LINK_SPEED_32G: 10041 phba->cfg_link_speed = 10042 LPFC_USER_LINK_SPEED_32G; 10043 break; 10044 case LINK_SPEED_64G: 10045 phba->cfg_link_speed = 10046 LPFC_USER_LINK_SPEED_64G; 10047 break; 10048 case 0xffff: 10049 phba->cfg_link_speed = 10050 LPFC_USER_LINK_SPEED_AUTO; 10051 break; 10052 default: 10053 lpfc_printf_log(phba, KERN_ERR, 10054 LOG_TRACE_EVENT, 10055 "0047 Unrecognized link " 10056 "speed : %d\n", 10057 forced_link_speed); 10058 phba->cfg_link_speed = 10059 LPFC_USER_LINK_SPEED_AUTO; 10060 } 10061 } 10062 } 10063 10064 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10065 length = phba->sli4_hba.max_cfg_param.max_xri - 10066 lpfc_sli4_get_els_iocb_cnt(phba); 10067 if (phba->cfg_hba_queue_depth > length) { 10068 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10069 "3361 HBA queue depth changed from %d to %d\n", 10070 phba->cfg_hba_queue_depth, length); 10071 phba->cfg_hba_queue_depth = length; 10072 } 10073 10074 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10075 LPFC_SLI_INTF_IF_TYPE_2) 10076 goto read_cfg_out; 10077 10078 /* get the pf# and vf# for SLI4 if_type 2 port */ 10079 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10080 sizeof(struct lpfc_sli4_cfg_mhdr)); 10081 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10082 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10083 length, LPFC_SLI4_MBX_EMBED); 10084 10085 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10086 shdr = (union lpfc_sli4_cfg_shdr *) 10087 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10088 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10089 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10090 if (rc2 || shdr_status || shdr_add_status) { 10091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10092 "3026 Mailbox failed , mbxCmd x%x " 10093 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10094 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10095 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10096 goto read_cfg_out; 10097 } 10098 10099 /* search for fc_fcoe resrouce descriptor */ 10100 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10101 10102 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10103 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10104 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10105 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10106 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10107 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10108 goto read_cfg_out; 10109 10110 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10111 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10112 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10113 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10114 phba->sli4_hba.iov.pf_number = 10115 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10116 phba->sli4_hba.iov.vf_number = 10117 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10118 break; 10119 } 10120 } 10121 10122 if (i < LPFC_RSRC_DESC_MAX_NUM) 10123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10124 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10125 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10126 phba->sli4_hba.iov.vf_number); 10127 else 10128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10129 "3028 GET_FUNCTION_CONFIG: failed to find " 10130 "Resource Descriptor:x%x\n", 10131 LPFC_RSRC_DESC_TYPE_FCFCOE); 10132 10133 read_cfg_out: 10134 mempool_free(pmb, phba->mbox_mem_pool); 10135 return rc; 10136 } 10137 10138 /** 10139 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10140 * @phba: pointer to lpfc hba data structure. 10141 * 10142 * This routine is invoked to setup the port-side endian order when 10143 * the port if_type is 0. This routine has no function for other 10144 * if_types. 10145 * 10146 * Return codes 10147 * 0 - successful 10148 * -ENOMEM - No available memory 10149 * -EIO - The mailbox failed to complete successfully. 10150 **/ 10151 static int 10152 lpfc_setup_endian_order(struct lpfc_hba *phba) 10153 { 10154 LPFC_MBOXQ_t *mboxq; 10155 uint32_t if_type, rc = 0; 10156 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10157 HOST_ENDIAN_HIGH_WORD1}; 10158 10159 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10160 switch (if_type) { 10161 case LPFC_SLI_INTF_IF_TYPE_0: 10162 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10163 GFP_KERNEL); 10164 if (!mboxq) { 10165 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10166 "0492 Unable to allocate memory for " 10167 "issuing SLI_CONFIG_SPECIAL mailbox " 10168 "command\n"); 10169 return -ENOMEM; 10170 } 10171 10172 /* 10173 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10174 * two words to contain special data values and no other data. 10175 */ 10176 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10177 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10178 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10179 if (rc != MBX_SUCCESS) { 10180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10181 "0493 SLI_CONFIG_SPECIAL mailbox " 10182 "failed with status x%x\n", 10183 rc); 10184 rc = -EIO; 10185 } 10186 mempool_free(mboxq, phba->mbox_mem_pool); 10187 break; 10188 case LPFC_SLI_INTF_IF_TYPE_6: 10189 case LPFC_SLI_INTF_IF_TYPE_2: 10190 case LPFC_SLI_INTF_IF_TYPE_1: 10191 default: 10192 break; 10193 } 10194 return rc; 10195 } 10196 10197 /** 10198 * lpfc_sli4_queue_verify - Verify and update EQ counts 10199 * @phba: pointer to lpfc hba data structure. 10200 * 10201 * This routine is invoked to check the user settable queue counts for EQs. 10202 * After this routine is called the counts will be set to valid values that 10203 * adhere to the constraints of the system's interrupt vectors and the port's 10204 * queue resources. 10205 * 10206 * Return codes 10207 * 0 - successful 10208 * -ENOMEM - No available memory 10209 **/ 10210 static int 10211 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10212 { 10213 /* 10214 * Sanity check for configured queue parameters against the run-time 10215 * device parameters 10216 */ 10217 10218 if (phba->nvmet_support) { 10219 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10220 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10221 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10222 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10223 } 10224 10225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10226 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10227 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10228 phba->cfg_nvmet_mrq); 10229 10230 /* Get EQ depth from module parameter, fake the default for now */ 10231 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10232 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10233 10234 /* Get CQ depth from module parameter, fake the default for now */ 10235 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10236 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10237 return 0; 10238 } 10239 10240 static int 10241 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10242 { 10243 struct lpfc_queue *qdesc; 10244 u32 wqesize; 10245 int cpu; 10246 10247 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10248 /* Create Fast Path IO CQs */ 10249 if (phba->enab_exp_wqcq_pages) 10250 /* Increase the CQ size when WQEs contain an embedded cdb */ 10251 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10252 phba->sli4_hba.cq_esize, 10253 LPFC_CQE_EXP_COUNT, cpu); 10254 10255 else 10256 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10257 phba->sli4_hba.cq_esize, 10258 phba->sli4_hba.cq_ecount, cpu); 10259 if (!qdesc) { 10260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10261 "0499 Failed allocate fast-path IO CQ (%d)\n", 10262 idx); 10263 return 1; 10264 } 10265 qdesc->qe_valid = 1; 10266 qdesc->hdwq = idx; 10267 qdesc->chann = cpu; 10268 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10269 10270 /* Create Fast Path IO WQs */ 10271 if (phba->enab_exp_wqcq_pages) { 10272 /* Increase the WQ size when WQEs contain an embedded cdb */ 10273 wqesize = (phba->fcp_embed_io) ? 10274 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10275 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10276 wqesize, 10277 LPFC_WQE_EXP_COUNT, cpu); 10278 } else 10279 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10280 phba->sli4_hba.wq_esize, 10281 phba->sli4_hba.wq_ecount, cpu); 10282 10283 if (!qdesc) { 10284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10285 "0503 Failed allocate fast-path IO WQ (%d)\n", 10286 idx); 10287 return 1; 10288 } 10289 qdesc->hdwq = idx; 10290 qdesc->chann = cpu; 10291 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10292 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10293 return 0; 10294 } 10295 10296 /** 10297 * lpfc_sli4_queue_create - Create all the SLI4 queues 10298 * @phba: pointer to lpfc hba data structure. 10299 * 10300 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10301 * operation. For each SLI4 queue type, the parameters such as queue entry 10302 * count (queue depth) shall be taken from the module parameter. For now, 10303 * we just use some constant number as place holder. 10304 * 10305 * Return codes 10306 * 0 - successful 10307 * -ENOMEM - No availble memory 10308 * -EIO - The mailbox failed to complete successfully. 10309 **/ 10310 int 10311 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10312 { 10313 struct lpfc_queue *qdesc; 10314 int idx, cpu, eqcpu; 10315 struct lpfc_sli4_hdw_queue *qp; 10316 struct lpfc_vector_map_info *cpup; 10317 struct lpfc_vector_map_info *eqcpup; 10318 struct lpfc_eq_intr_info *eqi; 10319 10320 /* 10321 * Create HBA Record arrays. 10322 * Both NVME and FCP will share that same vectors / EQs 10323 */ 10324 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10325 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10326 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10327 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10328 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10329 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10330 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10331 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10332 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10333 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10334 10335 if (!phba->sli4_hba.hdwq) { 10336 phba->sli4_hba.hdwq = kcalloc( 10337 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10338 GFP_KERNEL); 10339 if (!phba->sli4_hba.hdwq) { 10340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10341 "6427 Failed allocate memory for " 10342 "fast-path Hardware Queue array\n"); 10343 goto out_error; 10344 } 10345 /* Prepare hardware queues to take IO buffers */ 10346 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10347 qp = &phba->sli4_hba.hdwq[idx]; 10348 spin_lock_init(&qp->io_buf_list_get_lock); 10349 spin_lock_init(&qp->io_buf_list_put_lock); 10350 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10351 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10352 qp->get_io_bufs = 0; 10353 qp->put_io_bufs = 0; 10354 qp->total_io_bufs = 0; 10355 spin_lock_init(&qp->abts_io_buf_list_lock); 10356 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10357 qp->abts_scsi_io_bufs = 0; 10358 qp->abts_nvme_io_bufs = 0; 10359 INIT_LIST_HEAD(&qp->sgl_list); 10360 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10361 spin_lock_init(&qp->hdwq_lock); 10362 } 10363 } 10364 10365 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10366 if (phba->nvmet_support) { 10367 phba->sli4_hba.nvmet_cqset = kcalloc( 10368 phba->cfg_nvmet_mrq, 10369 sizeof(struct lpfc_queue *), 10370 GFP_KERNEL); 10371 if (!phba->sli4_hba.nvmet_cqset) { 10372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10373 "3121 Fail allocate memory for " 10374 "fast-path CQ set array\n"); 10375 goto out_error; 10376 } 10377 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10378 phba->cfg_nvmet_mrq, 10379 sizeof(struct lpfc_queue *), 10380 GFP_KERNEL); 10381 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10383 "3122 Fail allocate memory for " 10384 "fast-path RQ set hdr array\n"); 10385 goto out_error; 10386 } 10387 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10388 phba->cfg_nvmet_mrq, 10389 sizeof(struct lpfc_queue *), 10390 GFP_KERNEL); 10391 if (!phba->sli4_hba.nvmet_mrq_data) { 10392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10393 "3124 Fail allocate memory for " 10394 "fast-path RQ set data array\n"); 10395 goto out_error; 10396 } 10397 } 10398 } 10399 10400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10401 10402 /* Create HBA Event Queues (EQs) */ 10403 for_each_present_cpu(cpu) { 10404 /* We only want to create 1 EQ per vector, even though 10405 * multiple CPUs might be using that vector. so only 10406 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10407 */ 10408 cpup = &phba->sli4_hba.cpu_map[cpu]; 10409 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10410 continue; 10411 10412 /* Get a ptr to the Hardware Queue associated with this CPU */ 10413 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10414 10415 /* Allocate an EQ */ 10416 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10417 phba->sli4_hba.eq_esize, 10418 phba->sli4_hba.eq_ecount, cpu); 10419 if (!qdesc) { 10420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10421 "0497 Failed allocate EQ (%d)\n", 10422 cpup->hdwq); 10423 goto out_error; 10424 } 10425 qdesc->qe_valid = 1; 10426 qdesc->hdwq = cpup->hdwq; 10427 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10428 qdesc->last_cpu = qdesc->chann; 10429 10430 /* Save the allocated EQ in the Hardware Queue */ 10431 qp->hba_eq = qdesc; 10432 10433 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10434 list_add(&qdesc->cpu_list, &eqi->list); 10435 } 10436 10437 /* Now we need to populate the other Hardware Queues, that share 10438 * an IRQ vector, with the associated EQ ptr. 10439 */ 10440 for_each_present_cpu(cpu) { 10441 cpup = &phba->sli4_hba.cpu_map[cpu]; 10442 10443 /* Check for EQ already allocated in previous loop */ 10444 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10445 continue; 10446 10447 /* Check for multiple CPUs per hdwq */ 10448 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10449 if (qp->hba_eq) 10450 continue; 10451 10452 /* We need to share an EQ for this hdwq */ 10453 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10454 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10455 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10456 } 10457 10458 /* Allocate IO Path SLI4 CQ/WQs */ 10459 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10460 if (lpfc_alloc_io_wq_cq(phba, idx)) 10461 goto out_error; 10462 } 10463 10464 if (phba->nvmet_support) { 10465 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10466 cpu = lpfc_find_cpu_handle(phba, idx, 10467 LPFC_FIND_BY_HDWQ); 10468 qdesc = lpfc_sli4_queue_alloc(phba, 10469 LPFC_DEFAULT_PAGE_SIZE, 10470 phba->sli4_hba.cq_esize, 10471 phba->sli4_hba.cq_ecount, 10472 cpu); 10473 if (!qdesc) { 10474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10475 "3142 Failed allocate NVME " 10476 "CQ Set (%d)\n", idx); 10477 goto out_error; 10478 } 10479 qdesc->qe_valid = 1; 10480 qdesc->hdwq = idx; 10481 qdesc->chann = cpu; 10482 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10483 } 10484 } 10485 10486 /* 10487 * Create Slow Path Completion Queues (CQs) 10488 */ 10489 10490 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10491 /* Create slow-path Mailbox Command Complete Queue */ 10492 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10493 phba->sli4_hba.cq_esize, 10494 phba->sli4_hba.cq_ecount, cpu); 10495 if (!qdesc) { 10496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10497 "0500 Failed allocate slow-path mailbox CQ\n"); 10498 goto out_error; 10499 } 10500 qdesc->qe_valid = 1; 10501 phba->sli4_hba.mbx_cq = qdesc; 10502 10503 /* Create slow-path ELS Complete Queue */ 10504 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10505 phba->sli4_hba.cq_esize, 10506 phba->sli4_hba.cq_ecount, cpu); 10507 if (!qdesc) { 10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10509 "0501 Failed allocate slow-path ELS CQ\n"); 10510 goto out_error; 10511 } 10512 qdesc->qe_valid = 1; 10513 qdesc->chann = cpu; 10514 phba->sli4_hba.els_cq = qdesc; 10515 10516 10517 /* 10518 * Create Slow Path Work Queues (WQs) 10519 */ 10520 10521 /* Create Mailbox Command Queue */ 10522 10523 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10524 phba->sli4_hba.mq_esize, 10525 phba->sli4_hba.mq_ecount, cpu); 10526 if (!qdesc) { 10527 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10528 "0505 Failed allocate slow-path MQ\n"); 10529 goto out_error; 10530 } 10531 qdesc->chann = cpu; 10532 phba->sli4_hba.mbx_wq = qdesc; 10533 10534 /* 10535 * Create ELS Work Queues 10536 */ 10537 10538 /* Create slow-path ELS Work Queue */ 10539 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10540 phba->sli4_hba.wq_esize, 10541 phba->sli4_hba.wq_ecount, cpu); 10542 if (!qdesc) { 10543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10544 "0504 Failed allocate slow-path ELS WQ\n"); 10545 goto out_error; 10546 } 10547 qdesc->chann = cpu; 10548 phba->sli4_hba.els_wq = qdesc; 10549 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10550 10551 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10552 /* Create NVME LS Complete Queue */ 10553 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10554 phba->sli4_hba.cq_esize, 10555 phba->sli4_hba.cq_ecount, cpu); 10556 if (!qdesc) { 10557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10558 "6079 Failed allocate NVME LS CQ\n"); 10559 goto out_error; 10560 } 10561 qdesc->chann = cpu; 10562 qdesc->qe_valid = 1; 10563 phba->sli4_hba.nvmels_cq = qdesc; 10564 10565 /* Create NVME LS Work Queue */ 10566 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10567 phba->sli4_hba.wq_esize, 10568 phba->sli4_hba.wq_ecount, cpu); 10569 if (!qdesc) { 10570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10571 "6080 Failed allocate NVME LS WQ\n"); 10572 goto out_error; 10573 } 10574 qdesc->chann = cpu; 10575 phba->sli4_hba.nvmels_wq = qdesc; 10576 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10577 } 10578 10579 /* 10580 * Create Receive Queue (RQ) 10581 */ 10582 10583 /* Create Receive Queue for header */ 10584 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10585 phba->sli4_hba.rq_esize, 10586 phba->sli4_hba.rq_ecount, cpu); 10587 if (!qdesc) { 10588 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10589 "0506 Failed allocate receive HRQ\n"); 10590 goto out_error; 10591 } 10592 phba->sli4_hba.hdr_rq = qdesc; 10593 10594 /* Create Receive Queue for data */ 10595 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10596 phba->sli4_hba.rq_esize, 10597 phba->sli4_hba.rq_ecount, cpu); 10598 if (!qdesc) { 10599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10600 "0507 Failed allocate receive DRQ\n"); 10601 goto out_error; 10602 } 10603 phba->sli4_hba.dat_rq = qdesc; 10604 10605 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10606 phba->nvmet_support) { 10607 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10608 cpu = lpfc_find_cpu_handle(phba, idx, 10609 LPFC_FIND_BY_HDWQ); 10610 /* Create NVMET Receive Queue for header */ 10611 qdesc = lpfc_sli4_queue_alloc(phba, 10612 LPFC_DEFAULT_PAGE_SIZE, 10613 phba->sli4_hba.rq_esize, 10614 LPFC_NVMET_RQE_DEF_COUNT, 10615 cpu); 10616 if (!qdesc) { 10617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10618 "3146 Failed allocate " 10619 "receive HRQ\n"); 10620 goto out_error; 10621 } 10622 qdesc->hdwq = idx; 10623 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10624 10625 /* Only needed for header of RQ pair */ 10626 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10627 GFP_KERNEL, 10628 cpu_to_node(cpu)); 10629 if (qdesc->rqbp == NULL) { 10630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10631 "6131 Failed allocate " 10632 "Header RQBP\n"); 10633 goto out_error; 10634 } 10635 10636 /* Put list in known state in case driver load fails. */ 10637 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10638 10639 /* Create NVMET Receive Queue for data */ 10640 qdesc = lpfc_sli4_queue_alloc(phba, 10641 LPFC_DEFAULT_PAGE_SIZE, 10642 phba->sli4_hba.rq_esize, 10643 LPFC_NVMET_RQE_DEF_COUNT, 10644 cpu); 10645 if (!qdesc) { 10646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10647 "3156 Failed allocate " 10648 "receive DRQ\n"); 10649 goto out_error; 10650 } 10651 qdesc->hdwq = idx; 10652 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10653 } 10654 } 10655 10656 /* Clear NVME stats */ 10657 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10658 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10659 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10660 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10661 } 10662 } 10663 10664 /* Clear SCSI stats */ 10665 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10666 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10667 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10668 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10669 } 10670 } 10671 10672 return 0; 10673 10674 out_error: 10675 lpfc_sli4_queue_destroy(phba); 10676 return -ENOMEM; 10677 } 10678 10679 static inline void 10680 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10681 { 10682 if (*qp != NULL) { 10683 lpfc_sli4_queue_free(*qp); 10684 *qp = NULL; 10685 } 10686 } 10687 10688 static inline void 10689 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10690 { 10691 int idx; 10692 10693 if (*qs == NULL) 10694 return; 10695 10696 for (idx = 0; idx < max; idx++) 10697 __lpfc_sli4_release_queue(&(*qs)[idx]); 10698 10699 kfree(*qs); 10700 *qs = NULL; 10701 } 10702 10703 static inline void 10704 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10705 { 10706 struct lpfc_sli4_hdw_queue *hdwq; 10707 struct lpfc_queue *eq; 10708 uint32_t idx; 10709 10710 hdwq = phba->sli4_hba.hdwq; 10711 10712 /* Loop thru all Hardware Queues */ 10713 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10714 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10715 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10716 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10717 hdwq[idx].hba_eq = NULL; 10718 hdwq[idx].io_cq = NULL; 10719 hdwq[idx].io_wq = NULL; 10720 if (phba->cfg_xpsgl && !phba->nvmet_support) 10721 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10722 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10723 } 10724 /* Loop thru all IRQ vectors */ 10725 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10726 /* Free the EQ corresponding to the IRQ vector */ 10727 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10728 lpfc_sli4_queue_free(eq); 10729 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10730 } 10731 } 10732 10733 /** 10734 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10735 * @phba: pointer to lpfc hba data structure. 10736 * 10737 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10738 * operation. 10739 * 10740 * Return codes 10741 * 0 - successful 10742 * -ENOMEM - No available memory 10743 * -EIO - The mailbox failed to complete successfully. 10744 **/ 10745 void 10746 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10747 { 10748 /* 10749 * Set FREE_INIT before beginning to free the queues. 10750 * Wait until the users of queues to acknowledge to 10751 * release queues by clearing FREE_WAIT. 10752 */ 10753 spin_lock_irq(&phba->hbalock); 10754 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10755 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10756 spin_unlock_irq(&phba->hbalock); 10757 msleep(20); 10758 spin_lock_irq(&phba->hbalock); 10759 } 10760 spin_unlock_irq(&phba->hbalock); 10761 10762 lpfc_sli4_cleanup_poll_list(phba); 10763 10764 /* Release HBA eqs */ 10765 if (phba->sli4_hba.hdwq) 10766 lpfc_sli4_release_hdwq(phba); 10767 10768 if (phba->nvmet_support) { 10769 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10770 phba->cfg_nvmet_mrq); 10771 10772 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10773 phba->cfg_nvmet_mrq); 10774 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10775 phba->cfg_nvmet_mrq); 10776 } 10777 10778 /* Release mailbox command work queue */ 10779 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10780 10781 /* Release ELS work queue */ 10782 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10783 10784 /* Release ELS work queue */ 10785 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10786 10787 /* Release unsolicited receive queue */ 10788 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10789 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10790 10791 /* Release ELS complete queue */ 10792 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10793 10794 /* Release NVME LS complete queue */ 10795 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10796 10797 /* Release mailbox command complete queue */ 10798 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10799 10800 /* Everything on this list has been freed */ 10801 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10802 10803 /* Done with freeing the queues */ 10804 spin_lock_irq(&phba->hbalock); 10805 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10806 spin_unlock_irq(&phba->hbalock); 10807 } 10808 10809 int 10810 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10811 { 10812 struct lpfc_rqb *rqbp; 10813 struct lpfc_dmabuf *h_buf; 10814 struct rqb_dmabuf *rqb_buffer; 10815 10816 rqbp = rq->rqbp; 10817 while (!list_empty(&rqbp->rqb_buffer_list)) { 10818 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10819 struct lpfc_dmabuf, list); 10820 10821 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10822 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10823 rqbp->buffer_count--; 10824 } 10825 return 1; 10826 } 10827 10828 static int 10829 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10830 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10831 int qidx, uint32_t qtype) 10832 { 10833 struct lpfc_sli_ring *pring; 10834 int rc; 10835 10836 if (!eq || !cq || !wq) { 10837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10838 "6085 Fast-path %s (%d) not allocated\n", 10839 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10840 return -ENOMEM; 10841 } 10842 10843 /* create the Cq first */ 10844 rc = lpfc_cq_create(phba, cq, eq, 10845 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10846 if (rc) { 10847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10848 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10849 qidx, (uint32_t)rc); 10850 return rc; 10851 } 10852 10853 if (qtype != LPFC_MBOX) { 10854 /* Setup cq_map for fast lookup */ 10855 if (cq_map) 10856 *cq_map = cq->queue_id; 10857 10858 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10859 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10860 qidx, cq->queue_id, qidx, eq->queue_id); 10861 10862 /* create the wq */ 10863 rc = lpfc_wq_create(phba, wq, cq, qtype); 10864 if (rc) { 10865 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10866 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 10867 qidx, (uint32_t)rc); 10868 /* no need to tear down cq - caller will do so */ 10869 return rc; 10870 } 10871 10872 /* Bind this CQ/WQ to the NVME ring */ 10873 pring = wq->pring; 10874 pring->sli.sli4.wqp = (void *)wq; 10875 cq->pring = pring; 10876 10877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10878 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 10879 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 10880 } else { 10881 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 10882 if (rc) { 10883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10884 "0539 Failed setup of slow-path MQ: " 10885 "rc = 0x%x\n", rc); 10886 /* no need to tear down cq - caller will do so */ 10887 return rc; 10888 } 10889 10890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10891 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 10892 phba->sli4_hba.mbx_wq->queue_id, 10893 phba->sli4_hba.mbx_cq->queue_id); 10894 } 10895 10896 return 0; 10897 } 10898 10899 /** 10900 * lpfc_setup_cq_lookup - Setup the CQ lookup table 10901 * @phba: pointer to lpfc hba data structure. 10902 * 10903 * This routine will populate the cq_lookup table by all 10904 * available CQ queue_id's. 10905 **/ 10906 static void 10907 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 10908 { 10909 struct lpfc_queue *eq, *childq; 10910 int qidx; 10911 10912 memset(phba->sli4_hba.cq_lookup, 0, 10913 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 10914 /* Loop thru all IRQ vectors */ 10915 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10916 /* Get the EQ corresponding to the IRQ vector */ 10917 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10918 if (!eq) 10919 continue; 10920 /* Loop through all CQs associated with that EQ */ 10921 list_for_each_entry(childq, &eq->child_list, list) { 10922 if (childq->queue_id > phba->sli4_hba.cq_max) 10923 continue; 10924 if (childq->subtype == LPFC_IO) 10925 phba->sli4_hba.cq_lookup[childq->queue_id] = 10926 childq; 10927 } 10928 } 10929 } 10930 10931 /** 10932 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 10933 * @phba: pointer to lpfc hba data structure. 10934 * 10935 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 10936 * operation. 10937 * 10938 * Return codes 10939 * 0 - successful 10940 * -ENOMEM - No available memory 10941 * -EIO - The mailbox failed to complete successfully. 10942 **/ 10943 int 10944 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 10945 { 10946 uint32_t shdr_status, shdr_add_status; 10947 union lpfc_sli4_cfg_shdr *shdr; 10948 struct lpfc_vector_map_info *cpup; 10949 struct lpfc_sli4_hdw_queue *qp; 10950 LPFC_MBOXQ_t *mboxq; 10951 int qidx, cpu; 10952 uint32_t length, usdelay; 10953 int rc = -ENOMEM; 10954 10955 /* Check for dual-ULP support */ 10956 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10957 if (!mboxq) { 10958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10959 "3249 Unable to allocate memory for " 10960 "QUERY_FW_CFG mailbox command\n"); 10961 return -ENOMEM; 10962 } 10963 length = (sizeof(struct lpfc_mbx_query_fw_config) - 10964 sizeof(struct lpfc_sli4_cfg_mhdr)); 10965 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10966 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 10967 length, LPFC_SLI4_MBX_EMBED); 10968 10969 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10970 10971 shdr = (union lpfc_sli4_cfg_shdr *) 10972 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10973 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10974 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10975 if (shdr_status || shdr_add_status || rc) { 10976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10977 "3250 QUERY_FW_CFG mailbox failed with status " 10978 "x%x add_status x%x, mbx status x%x\n", 10979 shdr_status, shdr_add_status, rc); 10980 mempool_free(mboxq, phba->mbox_mem_pool); 10981 rc = -ENXIO; 10982 goto out_error; 10983 } 10984 10985 phba->sli4_hba.fw_func_mode = 10986 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 10987 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 10988 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 10989 phba->sli4_hba.physical_port = 10990 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 10991 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10992 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 10993 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 10994 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 10995 10996 mempool_free(mboxq, phba->mbox_mem_pool); 10997 10998 /* 10999 * Set up HBA Event Queues (EQs) 11000 */ 11001 qp = phba->sli4_hba.hdwq; 11002 11003 /* Set up HBA event queue */ 11004 if (!qp) { 11005 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11006 "3147 Fast-path EQs not allocated\n"); 11007 rc = -ENOMEM; 11008 goto out_error; 11009 } 11010 11011 /* Loop thru all IRQ vectors */ 11012 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11013 /* Create HBA Event Queues (EQs) in order */ 11014 for_each_present_cpu(cpu) { 11015 cpup = &phba->sli4_hba.cpu_map[cpu]; 11016 11017 /* Look for the CPU thats using that vector with 11018 * LPFC_CPU_FIRST_IRQ set. 11019 */ 11020 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11021 continue; 11022 if (qidx != cpup->eq) 11023 continue; 11024 11025 /* Create an EQ for that vector */ 11026 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 11027 phba->cfg_fcp_imax); 11028 if (rc) { 11029 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11030 "0523 Failed setup of fast-path" 11031 " EQ (%d), rc = 0x%x\n", 11032 cpup->eq, (uint32_t)rc); 11033 goto out_destroy; 11034 } 11035 11036 /* Save the EQ for that vector in the hba_eq_hdl */ 11037 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 11038 qp[cpup->hdwq].hba_eq; 11039 11040 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11041 "2584 HBA EQ setup: queue[%d]-id=%d\n", 11042 cpup->eq, 11043 qp[cpup->hdwq].hba_eq->queue_id); 11044 } 11045 } 11046 11047 /* Loop thru all Hardware Queues */ 11048 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11049 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 11050 cpup = &phba->sli4_hba.cpu_map[cpu]; 11051 11052 /* Create the CQ/WQ corresponding to the Hardware Queue */ 11053 rc = lpfc_create_wq_cq(phba, 11054 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 11055 qp[qidx].io_cq, 11056 qp[qidx].io_wq, 11057 &phba->sli4_hba.hdwq[qidx].io_cq_map, 11058 qidx, 11059 LPFC_IO); 11060 if (rc) { 11061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11062 "0535 Failed to setup fastpath " 11063 "IO WQ/CQ (%d), rc = 0x%x\n", 11064 qidx, (uint32_t)rc); 11065 goto out_destroy; 11066 } 11067 } 11068 11069 /* 11070 * Set up Slow Path Complete Queues (CQs) 11071 */ 11072 11073 /* Set up slow-path MBOX CQ/MQ */ 11074 11075 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11077 "0528 %s not allocated\n", 11078 phba->sli4_hba.mbx_cq ? 11079 "Mailbox WQ" : "Mailbox CQ"); 11080 rc = -ENOMEM; 11081 goto out_destroy; 11082 } 11083 11084 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11085 phba->sli4_hba.mbx_cq, 11086 phba->sli4_hba.mbx_wq, 11087 NULL, 0, LPFC_MBOX); 11088 if (rc) { 11089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11090 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11091 (uint32_t)rc); 11092 goto out_destroy; 11093 } 11094 if (phba->nvmet_support) { 11095 if (!phba->sli4_hba.nvmet_cqset) { 11096 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11097 "3165 Fast-path NVME CQ Set " 11098 "array not allocated\n"); 11099 rc = -ENOMEM; 11100 goto out_destroy; 11101 } 11102 if (phba->cfg_nvmet_mrq > 1) { 11103 rc = lpfc_cq_create_set(phba, 11104 phba->sli4_hba.nvmet_cqset, 11105 qp, 11106 LPFC_WCQ, LPFC_NVMET); 11107 if (rc) { 11108 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11109 "3164 Failed setup of NVME CQ " 11110 "Set, rc = 0x%x\n", 11111 (uint32_t)rc); 11112 goto out_destroy; 11113 } 11114 } else { 11115 /* Set up NVMET Receive Complete Queue */ 11116 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11117 qp[0].hba_eq, 11118 LPFC_WCQ, LPFC_NVMET); 11119 if (rc) { 11120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11121 "6089 Failed setup NVMET CQ: " 11122 "rc = 0x%x\n", (uint32_t)rc); 11123 goto out_destroy; 11124 } 11125 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11126 11127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11128 "6090 NVMET CQ setup: cq-id=%d, " 11129 "parent eq-id=%d\n", 11130 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11131 qp[0].hba_eq->queue_id); 11132 } 11133 } 11134 11135 /* Set up slow-path ELS WQ/CQ */ 11136 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11138 "0530 ELS %s not allocated\n", 11139 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11140 rc = -ENOMEM; 11141 goto out_destroy; 11142 } 11143 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11144 phba->sli4_hba.els_cq, 11145 phba->sli4_hba.els_wq, 11146 NULL, 0, LPFC_ELS); 11147 if (rc) { 11148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11149 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11150 (uint32_t)rc); 11151 goto out_destroy; 11152 } 11153 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11154 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11155 phba->sli4_hba.els_wq->queue_id, 11156 phba->sli4_hba.els_cq->queue_id); 11157 11158 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11159 /* Set up NVME LS Complete Queue */ 11160 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11162 "6091 LS %s not allocated\n", 11163 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11164 rc = -ENOMEM; 11165 goto out_destroy; 11166 } 11167 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11168 phba->sli4_hba.nvmels_cq, 11169 phba->sli4_hba.nvmels_wq, 11170 NULL, 0, LPFC_NVME_LS); 11171 if (rc) { 11172 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11173 "0526 Failed setup of NVVME LS WQ/CQ: " 11174 "rc = 0x%x\n", (uint32_t)rc); 11175 goto out_destroy; 11176 } 11177 11178 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11179 "6096 ELS WQ setup: wq-id=%d, " 11180 "parent cq-id=%d\n", 11181 phba->sli4_hba.nvmels_wq->queue_id, 11182 phba->sli4_hba.nvmels_cq->queue_id); 11183 } 11184 11185 /* 11186 * Create NVMET Receive Queue (RQ) 11187 */ 11188 if (phba->nvmet_support) { 11189 if ((!phba->sli4_hba.nvmet_cqset) || 11190 (!phba->sli4_hba.nvmet_mrq_hdr) || 11191 (!phba->sli4_hba.nvmet_mrq_data)) { 11192 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11193 "6130 MRQ CQ Queues not " 11194 "allocated\n"); 11195 rc = -ENOMEM; 11196 goto out_destroy; 11197 } 11198 if (phba->cfg_nvmet_mrq > 1) { 11199 rc = lpfc_mrq_create(phba, 11200 phba->sli4_hba.nvmet_mrq_hdr, 11201 phba->sli4_hba.nvmet_mrq_data, 11202 phba->sli4_hba.nvmet_cqset, 11203 LPFC_NVMET); 11204 if (rc) { 11205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11206 "6098 Failed setup of NVMET " 11207 "MRQ: rc = 0x%x\n", 11208 (uint32_t)rc); 11209 goto out_destroy; 11210 } 11211 11212 } else { 11213 rc = lpfc_rq_create(phba, 11214 phba->sli4_hba.nvmet_mrq_hdr[0], 11215 phba->sli4_hba.nvmet_mrq_data[0], 11216 phba->sli4_hba.nvmet_cqset[0], 11217 LPFC_NVMET); 11218 if (rc) { 11219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11220 "6057 Failed setup of NVMET " 11221 "Receive Queue: rc = 0x%x\n", 11222 (uint32_t)rc); 11223 goto out_destroy; 11224 } 11225 11226 lpfc_printf_log( 11227 phba, KERN_INFO, LOG_INIT, 11228 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11229 "dat-rq-id=%d parent cq-id=%d\n", 11230 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11231 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11232 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11233 11234 } 11235 } 11236 11237 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11239 "0540 Receive Queue not allocated\n"); 11240 rc = -ENOMEM; 11241 goto out_destroy; 11242 } 11243 11244 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11245 phba->sli4_hba.els_cq, LPFC_USOL); 11246 if (rc) { 11247 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11248 "0541 Failed setup of Receive Queue: " 11249 "rc = 0x%x\n", (uint32_t)rc); 11250 goto out_destroy; 11251 } 11252 11253 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11254 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11255 "parent cq-id=%d\n", 11256 phba->sli4_hba.hdr_rq->queue_id, 11257 phba->sli4_hba.dat_rq->queue_id, 11258 phba->sli4_hba.els_cq->queue_id); 11259 11260 if (phba->cfg_fcp_imax) 11261 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11262 else 11263 usdelay = 0; 11264 11265 for (qidx = 0; qidx < phba->cfg_irq_chann; 11266 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11267 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11268 usdelay); 11269 11270 if (phba->sli4_hba.cq_max) { 11271 kfree(phba->sli4_hba.cq_lookup); 11272 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11273 sizeof(struct lpfc_queue *), GFP_KERNEL); 11274 if (!phba->sli4_hba.cq_lookup) { 11275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11276 "0549 Failed setup of CQ Lookup table: " 11277 "size 0x%x\n", phba->sli4_hba.cq_max); 11278 rc = -ENOMEM; 11279 goto out_destroy; 11280 } 11281 lpfc_setup_cq_lookup(phba); 11282 } 11283 return 0; 11284 11285 out_destroy: 11286 lpfc_sli4_queue_unset(phba); 11287 out_error: 11288 return rc; 11289 } 11290 11291 /** 11292 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11293 * @phba: pointer to lpfc hba data structure. 11294 * 11295 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11296 * operation. 11297 * 11298 * Return codes 11299 * 0 - successful 11300 * -ENOMEM - No available memory 11301 * -EIO - The mailbox failed to complete successfully. 11302 **/ 11303 void 11304 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11305 { 11306 struct lpfc_sli4_hdw_queue *qp; 11307 struct lpfc_queue *eq; 11308 int qidx; 11309 11310 /* Unset mailbox command work queue */ 11311 if (phba->sli4_hba.mbx_wq) 11312 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11313 11314 /* Unset NVME LS work queue */ 11315 if (phba->sli4_hba.nvmels_wq) 11316 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11317 11318 /* Unset ELS work queue */ 11319 if (phba->sli4_hba.els_wq) 11320 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11321 11322 /* Unset unsolicited receive queue */ 11323 if (phba->sli4_hba.hdr_rq) 11324 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11325 phba->sli4_hba.dat_rq); 11326 11327 /* Unset mailbox command complete queue */ 11328 if (phba->sli4_hba.mbx_cq) 11329 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11330 11331 /* Unset ELS complete queue */ 11332 if (phba->sli4_hba.els_cq) 11333 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11334 11335 /* Unset NVME LS complete queue */ 11336 if (phba->sli4_hba.nvmels_cq) 11337 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11338 11339 if (phba->nvmet_support) { 11340 /* Unset NVMET MRQ queue */ 11341 if (phba->sli4_hba.nvmet_mrq_hdr) { 11342 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11343 lpfc_rq_destroy( 11344 phba, 11345 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11346 phba->sli4_hba.nvmet_mrq_data[qidx]); 11347 } 11348 11349 /* Unset NVMET CQ Set complete queue */ 11350 if (phba->sli4_hba.nvmet_cqset) { 11351 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11352 lpfc_cq_destroy( 11353 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11354 } 11355 } 11356 11357 /* Unset fast-path SLI4 queues */ 11358 if (phba->sli4_hba.hdwq) { 11359 /* Loop thru all Hardware Queues */ 11360 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11361 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11362 qp = &phba->sli4_hba.hdwq[qidx]; 11363 lpfc_wq_destroy(phba, qp->io_wq); 11364 lpfc_cq_destroy(phba, qp->io_cq); 11365 } 11366 /* Loop thru all IRQ vectors */ 11367 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11368 /* Destroy the EQ corresponding to the IRQ vector */ 11369 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11370 lpfc_eq_destroy(phba, eq); 11371 } 11372 } 11373 11374 kfree(phba->sli4_hba.cq_lookup); 11375 phba->sli4_hba.cq_lookup = NULL; 11376 phba->sli4_hba.cq_max = 0; 11377 } 11378 11379 /** 11380 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11381 * @phba: pointer to lpfc hba data structure. 11382 * 11383 * This routine is invoked to allocate and set up a pool of completion queue 11384 * events. The body of the completion queue event is a completion queue entry 11385 * CQE. For now, this pool is used for the interrupt service routine to queue 11386 * the following HBA completion queue events for the worker thread to process: 11387 * - Mailbox asynchronous events 11388 * - Receive queue completion unsolicited events 11389 * Later, this can be used for all the slow-path events. 11390 * 11391 * Return codes 11392 * 0 - successful 11393 * -ENOMEM - No available memory 11394 **/ 11395 static int 11396 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11397 { 11398 struct lpfc_cq_event *cq_event; 11399 int i; 11400 11401 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11402 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11403 if (!cq_event) 11404 goto out_pool_create_fail; 11405 list_add_tail(&cq_event->list, 11406 &phba->sli4_hba.sp_cqe_event_pool); 11407 } 11408 return 0; 11409 11410 out_pool_create_fail: 11411 lpfc_sli4_cq_event_pool_destroy(phba); 11412 return -ENOMEM; 11413 } 11414 11415 /** 11416 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11417 * @phba: pointer to lpfc hba data structure. 11418 * 11419 * This routine is invoked to free the pool of completion queue events at 11420 * driver unload time. Note that, it is the responsibility of the driver 11421 * cleanup routine to free all the outstanding completion-queue events 11422 * allocated from this pool back into the pool before invoking this routine 11423 * to destroy the pool. 11424 **/ 11425 static void 11426 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11427 { 11428 struct lpfc_cq_event *cq_event, *next_cq_event; 11429 11430 list_for_each_entry_safe(cq_event, next_cq_event, 11431 &phba->sli4_hba.sp_cqe_event_pool, list) { 11432 list_del(&cq_event->list); 11433 kfree(cq_event); 11434 } 11435 } 11436 11437 /** 11438 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11439 * @phba: pointer to lpfc hba data structure. 11440 * 11441 * This routine is the lock free version of the API invoked to allocate a 11442 * completion-queue event from the free pool. 11443 * 11444 * Return: Pointer to the newly allocated completion-queue event if successful 11445 * NULL otherwise. 11446 **/ 11447 struct lpfc_cq_event * 11448 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11449 { 11450 struct lpfc_cq_event *cq_event = NULL; 11451 11452 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11453 struct lpfc_cq_event, list); 11454 return cq_event; 11455 } 11456 11457 /** 11458 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11459 * @phba: pointer to lpfc hba data structure. 11460 * 11461 * This routine is the lock version of the API invoked to allocate a 11462 * completion-queue event from the free pool. 11463 * 11464 * Return: Pointer to the newly allocated completion-queue event if successful 11465 * NULL otherwise. 11466 **/ 11467 struct lpfc_cq_event * 11468 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11469 { 11470 struct lpfc_cq_event *cq_event; 11471 unsigned long iflags; 11472 11473 spin_lock_irqsave(&phba->hbalock, iflags); 11474 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11475 spin_unlock_irqrestore(&phba->hbalock, iflags); 11476 return cq_event; 11477 } 11478 11479 /** 11480 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11481 * @phba: pointer to lpfc hba data structure. 11482 * @cq_event: pointer to the completion queue event to be freed. 11483 * 11484 * This routine is the lock free version of the API invoked to release a 11485 * completion-queue event back into the free pool. 11486 **/ 11487 void 11488 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11489 struct lpfc_cq_event *cq_event) 11490 { 11491 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11492 } 11493 11494 /** 11495 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11496 * @phba: pointer to lpfc hba data structure. 11497 * @cq_event: pointer to the completion queue event to be freed. 11498 * 11499 * This routine is the lock version of the API invoked to release a 11500 * completion-queue event back into the free pool. 11501 **/ 11502 void 11503 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11504 struct lpfc_cq_event *cq_event) 11505 { 11506 unsigned long iflags; 11507 spin_lock_irqsave(&phba->hbalock, iflags); 11508 __lpfc_sli4_cq_event_release(phba, cq_event); 11509 spin_unlock_irqrestore(&phba->hbalock, iflags); 11510 } 11511 11512 /** 11513 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11514 * @phba: pointer to lpfc hba data structure. 11515 * 11516 * This routine is to free all the pending completion-queue events to the 11517 * back into the free pool for device reset. 11518 **/ 11519 static void 11520 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11521 { 11522 LIST_HEAD(cq_event_list); 11523 struct lpfc_cq_event *cq_event; 11524 unsigned long iflags; 11525 11526 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11527 11528 /* Pending ELS XRI abort events */ 11529 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11530 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11531 &cq_event_list); 11532 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11533 11534 /* Pending asynnc events */ 11535 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11536 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11537 &cq_event_list); 11538 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11539 11540 while (!list_empty(&cq_event_list)) { 11541 list_remove_head(&cq_event_list, cq_event, 11542 struct lpfc_cq_event, list); 11543 lpfc_sli4_cq_event_release(phba, cq_event); 11544 } 11545 } 11546 11547 /** 11548 * lpfc_pci_function_reset - Reset pci function. 11549 * @phba: pointer to lpfc hba data structure. 11550 * 11551 * This routine is invoked to request a PCI function reset. It will destroys 11552 * all resources assigned to the PCI function which originates this request. 11553 * 11554 * Return codes 11555 * 0 - successful 11556 * -ENOMEM - No available memory 11557 * -EIO - The mailbox failed to complete successfully. 11558 **/ 11559 int 11560 lpfc_pci_function_reset(struct lpfc_hba *phba) 11561 { 11562 LPFC_MBOXQ_t *mboxq; 11563 uint32_t rc = 0, if_type; 11564 uint32_t shdr_status, shdr_add_status; 11565 uint32_t rdy_chk; 11566 uint32_t port_reset = 0; 11567 union lpfc_sli4_cfg_shdr *shdr; 11568 struct lpfc_register reg_data; 11569 uint16_t devid; 11570 11571 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11572 switch (if_type) { 11573 case LPFC_SLI_INTF_IF_TYPE_0: 11574 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11575 GFP_KERNEL); 11576 if (!mboxq) { 11577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11578 "0494 Unable to allocate memory for " 11579 "issuing SLI_FUNCTION_RESET mailbox " 11580 "command\n"); 11581 return -ENOMEM; 11582 } 11583 11584 /* Setup PCI function reset mailbox-ioctl command */ 11585 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11586 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11587 LPFC_SLI4_MBX_EMBED); 11588 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11589 shdr = (union lpfc_sli4_cfg_shdr *) 11590 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11591 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11592 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11593 &shdr->response); 11594 mempool_free(mboxq, phba->mbox_mem_pool); 11595 if (shdr_status || shdr_add_status || rc) { 11596 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11597 "0495 SLI_FUNCTION_RESET mailbox " 11598 "failed with status x%x add_status x%x," 11599 " mbx status x%x\n", 11600 shdr_status, shdr_add_status, rc); 11601 rc = -ENXIO; 11602 } 11603 break; 11604 case LPFC_SLI_INTF_IF_TYPE_2: 11605 case LPFC_SLI_INTF_IF_TYPE_6: 11606 wait: 11607 /* 11608 * Poll the Port Status Register and wait for RDY for 11609 * up to 30 seconds. If the port doesn't respond, treat 11610 * it as an error. 11611 */ 11612 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11613 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11614 STATUSregaddr, ®_data.word0)) { 11615 rc = -ENODEV; 11616 goto out; 11617 } 11618 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11619 break; 11620 msleep(20); 11621 } 11622 11623 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11624 phba->work_status[0] = readl( 11625 phba->sli4_hba.u.if_type2.ERR1regaddr); 11626 phba->work_status[1] = readl( 11627 phba->sli4_hba.u.if_type2.ERR2regaddr); 11628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11629 "2890 Port not ready, port status reg " 11630 "0x%x error 1=0x%x, error 2=0x%x\n", 11631 reg_data.word0, 11632 phba->work_status[0], 11633 phba->work_status[1]); 11634 rc = -ENODEV; 11635 goto out; 11636 } 11637 11638 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11639 lpfc_pldv_detect = true; 11640 11641 if (!port_reset) { 11642 /* 11643 * Reset the port now 11644 */ 11645 reg_data.word0 = 0; 11646 bf_set(lpfc_sliport_ctrl_end, ®_data, 11647 LPFC_SLIPORT_LITTLE_ENDIAN); 11648 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11649 LPFC_SLIPORT_INIT_PORT); 11650 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11651 CTRLregaddr); 11652 /* flush */ 11653 pci_read_config_word(phba->pcidev, 11654 PCI_DEVICE_ID, &devid); 11655 11656 port_reset = 1; 11657 msleep(20); 11658 goto wait; 11659 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11660 rc = -ENODEV; 11661 goto out; 11662 } 11663 break; 11664 11665 case LPFC_SLI_INTF_IF_TYPE_1: 11666 default: 11667 break; 11668 } 11669 11670 out: 11671 /* Catch the not-ready port failure after a port reset. */ 11672 if (rc) { 11673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11674 "3317 HBA not functional: IP Reset Failed " 11675 "try: echo fw_reset > board_mode\n"); 11676 rc = -ENODEV; 11677 } 11678 11679 return rc; 11680 } 11681 11682 /** 11683 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11684 * @phba: pointer to lpfc hba data structure. 11685 * 11686 * This routine is invoked to set up the PCI device memory space for device 11687 * with SLI-4 interface spec. 11688 * 11689 * Return codes 11690 * 0 - successful 11691 * other values - error 11692 **/ 11693 static int 11694 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11695 { 11696 struct pci_dev *pdev = phba->pcidev; 11697 unsigned long bar0map_len, bar1map_len, bar2map_len; 11698 int error; 11699 uint32_t if_type; 11700 11701 if (!pdev) 11702 return -ENODEV; 11703 11704 /* Set the device DMA mask size */ 11705 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11706 if (error) 11707 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11708 if (error) 11709 return error; 11710 11711 /* 11712 * The BARs and register set definitions and offset locations are 11713 * dependent on the if_type. 11714 */ 11715 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11716 &phba->sli4_hba.sli_intf.word0)) { 11717 return -ENODEV; 11718 } 11719 11720 /* There is no SLI3 failback for SLI4 devices. */ 11721 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11722 LPFC_SLI_INTF_VALID) { 11723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11724 "2894 SLI_INTF reg contents invalid " 11725 "sli_intf reg 0x%x\n", 11726 phba->sli4_hba.sli_intf.word0); 11727 return -ENODEV; 11728 } 11729 11730 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11731 /* 11732 * Get the bus address of SLI4 device Bar regions and the 11733 * number of bytes required by each mapping. The mapping of the 11734 * particular PCI BARs regions is dependent on the type of 11735 * SLI4 device. 11736 */ 11737 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11738 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11739 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11740 11741 /* 11742 * Map SLI4 PCI Config Space Register base to a kernel virtual 11743 * addr 11744 */ 11745 phba->sli4_hba.conf_regs_memmap_p = 11746 ioremap(phba->pci_bar0_map, bar0map_len); 11747 if (!phba->sli4_hba.conf_regs_memmap_p) { 11748 dev_printk(KERN_ERR, &pdev->dev, 11749 "ioremap failed for SLI4 PCI config " 11750 "registers.\n"); 11751 return -ENODEV; 11752 } 11753 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11754 /* Set up BAR0 PCI config space register memory map */ 11755 lpfc_sli4_bar0_register_memmap(phba, if_type); 11756 } else { 11757 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11758 bar0map_len = pci_resource_len(pdev, 1); 11759 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11760 dev_printk(KERN_ERR, &pdev->dev, 11761 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11762 return -ENODEV; 11763 } 11764 phba->sli4_hba.conf_regs_memmap_p = 11765 ioremap(phba->pci_bar0_map, bar0map_len); 11766 if (!phba->sli4_hba.conf_regs_memmap_p) { 11767 dev_printk(KERN_ERR, &pdev->dev, 11768 "ioremap failed for SLI4 PCI config " 11769 "registers.\n"); 11770 return -ENODEV; 11771 } 11772 lpfc_sli4_bar0_register_memmap(phba, if_type); 11773 } 11774 11775 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11776 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11777 /* 11778 * Map SLI4 if type 0 HBA Control Register base to a 11779 * kernel virtual address and setup the registers. 11780 */ 11781 phba->pci_bar1_map = pci_resource_start(pdev, 11782 PCI_64BIT_BAR2); 11783 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11784 phba->sli4_hba.ctrl_regs_memmap_p = 11785 ioremap(phba->pci_bar1_map, 11786 bar1map_len); 11787 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11788 dev_err(&pdev->dev, 11789 "ioremap failed for SLI4 HBA " 11790 "control registers.\n"); 11791 error = -ENOMEM; 11792 goto out_iounmap_conf; 11793 } 11794 phba->pci_bar2_memmap_p = 11795 phba->sli4_hba.ctrl_regs_memmap_p; 11796 lpfc_sli4_bar1_register_memmap(phba, if_type); 11797 } else { 11798 error = -ENOMEM; 11799 goto out_iounmap_conf; 11800 } 11801 } 11802 11803 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11804 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11805 /* 11806 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11807 * virtual address and setup the registers. 11808 */ 11809 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11810 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11811 phba->sli4_hba.drbl_regs_memmap_p = 11812 ioremap(phba->pci_bar1_map, bar1map_len); 11813 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11814 dev_err(&pdev->dev, 11815 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11816 error = -ENOMEM; 11817 goto out_iounmap_conf; 11818 } 11819 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11820 lpfc_sli4_bar1_register_memmap(phba, if_type); 11821 } 11822 11823 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11824 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11825 /* 11826 * Map SLI4 if type 0 HBA Doorbell Register base to 11827 * a kernel virtual address and setup the registers. 11828 */ 11829 phba->pci_bar2_map = pci_resource_start(pdev, 11830 PCI_64BIT_BAR4); 11831 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11832 phba->sli4_hba.drbl_regs_memmap_p = 11833 ioremap(phba->pci_bar2_map, 11834 bar2map_len); 11835 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11836 dev_err(&pdev->dev, 11837 "ioremap failed for SLI4 HBA" 11838 " doorbell registers.\n"); 11839 error = -ENOMEM; 11840 goto out_iounmap_ctrl; 11841 } 11842 phba->pci_bar4_memmap_p = 11843 phba->sli4_hba.drbl_regs_memmap_p; 11844 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11845 if (error) 11846 goto out_iounmap_all; 11847 } else { 11848 error = -ENOMEM; 11849 goto out_iounmap_all; 11850 } 11851 } 11852 11853 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11854 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11855 /* 11856 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11857 * virtual address and setup the registers. 11858 */ 11859 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11860 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11861 phba->sli4_hba.dpp_regs_memmap_p = 11862 ioremap(phba->pci_bar2_map, bar2map_len); 11863 if (!phba->sli4_hba.dpp_regs_memmap_p) { 11864 dev_err(&pdev->dev, 11865 "ioremap failed for SLI4 HBA dpp registers.\n"); 11866 error = -ENOMEM; 11867 goto out_iounmap_ctrl; 11868 } 11869 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 11870 } 11871 11872 /* Set up the EQ/CQ register handeling functions now */ 11873 switch (if_type) { 11874 case LPFC_SLI_INTF_IF_TYPE_0: 11875 case LPFC_SLI_INTF_IF_TYPE_2: 11876 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 11877 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 11878 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 11879 break; 11880 case LPFC_SLI_INTF_IF_TYPE_6: 11881 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 11882 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 11883 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 11884 break; 11885 default: 11886 break; 11887 } 11888 11889 return 0; 11890 11891 out_iounmap_all: 11892 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11893 out_iounmap_ctrl: 11894 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11895 out_iounmap_conf: 11896 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11897 11898 return error; 11899 } 11900 11901 /** 11902 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 11903 * @phba: pointer to lpfc hba data structure. 11904 * 11905 * This routine is invoked to unset the PCI device memory space for device 11906 * with SLI-4 interface spec. 11907 **/ 11908 static void 11909 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 11910 { 11911 uint32_t if_type; 11912 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11913 11914 switch (if_type) { 11915 case LPFC_SLI_INTF_IF_TYPE_0: 11916 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11917 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11918 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11919 break; 11920 case LPFC_SLI_INTF_IF_TYPE_2: 11921 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11922 break; 11923 case LPFC_SLI_INTF_IF_TYPE_6: 11924 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11925 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11926 if (phba->sli4_hba.dpp_regs_memmap_p) 11927 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 11928 break; 11929 case LPFC_SLI_INTF_IF_TYPE_1: 11930 default: 11931 dev_printk(KERN_ERR, &phba->pcidev->dev, 11932 "FATAL - unsupported SLI4 interface type - %d\n", 11933 if_type); 11934 break; 11935 } 11936 } 11937 11938 /** 11939 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 11940 * @phba: pointer to lpfc hba data structure. 11941 * 11942 * This routine is invoked to enable the MSI-X interrupt vectors to device 11943 * with SLI-3 interface specs. 11944 * 11945 * Return codes 11946 * 0 - successful 11947 * other values - error 11948 **/ 11949 static int 11950 lpfc_sli_enable_msix(struct lpfc_hba *phba) 11951 { 11952 int rc; 11953 LPFC_MBOXQ_t *pmb; 11954 11955 /* Set up MSI-X multi-message vectors */ 11956 rc = pci_alloc_irq_vectors(phba->pcidev, 11957 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 11958 if (rc < 0) { 11959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11960 "0420 PCI enable MSI-X failed (%d)\n", rc); 11961 goto vec_fail_out; 11962 } 11963 11964 /* 11965 * Assign MSI-X vectors to interrupt handlers 11966 */ 11967 11968 /* vector-0 is associated to slow-path handler */ 11969 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 11970 &lpfc_sli_sp_intr_handler, 0, 11971 LPFC_SP_DRIVER_HANDLER_NAME, phba); 11972 if (rc) { 11973 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11974 "0421 MSI-X slow-path request_irq failed " 11975 "(%d)\n", rc); 11976 goto msi_fail_out; 11977 } 11978 11979 /* vector-1 is associated to fast-path handler */ 11980 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 11981 &lpfc_sli_fp_intr_handler, 0, 11982 LPFC_FP_DRIVER_HANDLER_NAME, phba); 11983 11984 if (rc) { 11985 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11986 "0429 MSI-X fast-path request_irq failed " 11987 "(%d)\n", rc); 11988 goto irq_fail_out; 11989 } 11990 11991 /* 11992 * Configure HBA MSI-X attention conditions to messages 11993 */ 11994 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11995 11996 if (!pmb) { 11997 rc = -ENOMEM; 11998 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11999 "0474 Unable to allocate memory for issuing " 12000 "MBOX_CONFIG_MSI command\n"); 12001 goto mem_fail_out; 12002 } 12003 rc = lpfc_config_msi(phba, pmb); 12004 if (rc) 12005 goto mbx_fail_out; 12006 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12007 if (rc != MBX_SUCCESS) { 12008 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 12009 "0351 Config MSI mailbox command failed, " 12010 "mbxCmd x%x, mbxStatus x%x\n", 12011 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 12012 goto mbx_fail_out; 12013 } 12014 12015 /* Free memory allocated for mailbox command */ 12016 mempool_free(pmb, phba->mbox_mem_pool); 12017 return rc; 12018 12019 mbx_fail_out: 12020 /* Free memory allocated for mailbox command */ 12021 mempool_free(pmb, phba->mbox_mem_pool); 12022 12023 mem_fail_out: 12024 /* free the irq already requested */ 12025 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 12026 12027 irq_fail_out: 12028 /* free the irq already requested */ 12029 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 12030 12031 msi_fail_out: 12032 /* Unconfigure MSI-X capability structure */ 12033 pci_free_irq_vectors(phba->pcidev); 12034 12035 vec_fail_out: 12036 return rc; 12037 } 12038 12039 /** 12040 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 12041 * @phba: pointer to lpfc hba data structure. 12042 * 12043 * This routine is invoked to enable the MSI interrupt mode to device with 12044 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 12045 * enable the MSI vector. The device driver is responsible for calling the 12046 * request_irq() to register MSI vector with a interrupt the handler, which 12047 * is done in this function. 12048 * 12049 * Return codes 12050 * 0 - successful 12051 * other values - error 12052 */ 12053 static int 12054 lpfc_sli_enable_msi(struct lpfc_hba *phba) 12055 { 12056 int rc; 12057 12058 rc = pci_enable_msi(phba->pcidev); 12059 if (!rc) 12060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12061 "0462 PCI enable MSI mode success.\n"); 12062 else { 12063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12064 "0471 PCI enable MSI mode failed (%d)\n", rc); 12065 return rc; 12066 } 12067 12068 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12069 0, LPFC_DRIVER_NAME, phba); 12070 if (rc) { 12071 pci_disable_msi(phba->pcidev); 12072 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12073 "0478 MSI request_irq failed (%d)\n", rc); 12074 } 12075 return rc; 12076 } 12077 12078 /** 12079 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12080 * @phba: pointer to lpfc hba data structure. 12081 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12082 * 12083 * This routine is invoked to enable device interrupt and associate driver's 12084 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12085 * spec. Depends on the interrupt mode configured to the driver, the driver 12086 * will try to fallback from the configured interrupt mode to an interrupt 12087 * mode which is supported by the platform, kernel, and device in the order 12088 * of: 12089 * MSI-X -> MSI -> IRQ. 12090 * 12091 * Return codes 12092 * 0 - successful 12093 * other values - error 12094 **/ 12095 static uint32_t 12096 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12097 { 12098 uint32_t intr_mode = LPFC_INTR_ERROR; 12099 int retval; 12100 12101 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12102 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12103 if (retval) 12104 return intr_mode; 12105 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12106 12107 if (cfg_mode == 2) { 12108 /* Now, try to enable MSI-X interrupt mode */ 12109 retval = lpfc_sli_enable_msix(phba); 12110 if (!retval) { 12111 /* Indicate initialization to MSI-X mode */ 12112 phba->intr_type = MSIX; 12113 intr_mode = 2; 12114 } 12115 } 12116 12117 /* Fallback to MSI if MSI-X initialization failed */ 12118 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12119 retval = lpfc_sli_enable_msi(phba); 12120 if (!retval) { 12121 /* Indicate initialization to MSI mode */ 12122 phba->intr_type = MSI; 12123 intr_mode = 1; 12124 } 12125 } 12126 12127 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12128 if (phba->intr_type == NONE) { 12129 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12130 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12131 if (!retval) { 12132 /* Indicate initialization to INTx mode */ 12133 phba->intr_type = INTx; 12134 intr_mode = 0; 12135 } 12136 } 12137 return intr_mode; 12138 } 12139 12140 /** 12141 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12142 * @phba: pointer to lpfc hba data structure. 12143 * 12144 * This routine is invoked to disable device interrupt and disassociate the 12145 * driver's interrupt handler(s) from interrupt vector(s) to device with 12146 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12147 * release the interrupt vector(s) for the message signaled interrupt. 12148 **/ 12149 static void 12150 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12151 { 12152 int nr_irqs, i; 12153 12154 if (phba->intr_type == MSIX) 12155 nr_irqs = LPFC_MSIX_VECTORS; 12156 else 12157 nr_irqs = 1; 12158 12159 for (i = 0; i < nr_irqs; i++) 12160 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12161 pci_free_irq_vectors(phba->pcidev); 12162 12163 /* Reset interrupt management states */ 12164 phba->intr_type = NONE; 12165 phba->sli.slistat.sli_intr = 0; 12166 } 12167 12168 /** 12169 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12170 * @phba: pointer to lpfc hba data structure. 12171 * @id: EQ vector index or Hardware Queue index 12172 * @match: LPFC_FIND_BY_EQ = match by EQ 12173 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12174 * Return the CPU that matches the selection criteria 12175 */ 12176 static uint16_t 12177 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12178 { 12179 struct lpfc_vector_map_info *cpup; 12180 int cpu; 12181 12182 /* Loop through all CPUs */ 12183 for_each_present_cpu(cpu) { 12184 cpup = &phba->sli4_hba.cpu_map[cpu]; 12185 12186 /* If we are matching by EQ, there may be multiple CPUs using 12187 * using the same vector, so select the one with 12188 * LPFC_CPU_FIRST_IRQ set. 12189 */ 12190 if ((match == LPFC_FIND_BY_EQ) && 12191 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12192 (cpup->eq == id)) 12193 return cpu; 12194 12195 /* If matching by HDWQ, select the first CPU that matches */ 12196 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12197 return cpu; 12198 } 12199 return 0; 12200 } 12201 12202 #ifdef CONFIG_X86 12203 /** 12204 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12205 * @phba: pointer to lpfc hba data structure. 12206 * @cpu: CPU map index 12207 * @phys_id: CPU package physical id 12208 * @core_id: CPU core id 12209 */ 12210 static int 12211 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12212 uint16_t phys_id, uint16_t core_id) 12213 { 12214 struct lpfc_vector_map_info *cpup; 12215 int idx; 12216 12217 for_each_present_cpu(idx) { 12218 cpup = &phba->sli4_hba.cpu_map[idx]; 12219 /* Does the cpup match the one we are looking for */ 12220 if ((cpup->phys_id == phys_id) && 12221 (cpup->core_id == core_id) && 12222 (cpu != idx)) 12223 return 1; 12224 } 12225 return 0; 12226 } 12227 #endif 12228 12229 /* 12230 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12231 * @phba: pointer to lpfc hba data structure. 12232 * @eqidx: index for eq and irq vector 12233 * @flag: flags to set for vector_map structure 12234 * @cpu: cpu used to index vector_map structure 12235 * 12236 * The routine assigns eq info into vector_map structure 12237 */ 12238 static inline void 12239 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12240 unsigned int cpu) 12241 { 12242 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12243 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12244 12245 cpup->eq = eqidx; 12246 cpup->flag |= flag; 12247 12248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12249 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12250 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12251 } 12252 12253 /** 12254 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12255 * @phba: pointer to lpfc hba data structure. 12256 * 12257 * The routine initializes the cpu_map array structure 12258 */ 12259 static void 12260 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12261 { 12262 struct lpfc_vector_map_info *cpup; 12263 struct lpfc_eq_intr_info *eqi; 12264 int cpu; 12265 12266 for_each_possible_cpu(cpu) { 12267 cpup = &phba->sli4_hba.cpu_map[cpu]; 12268 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12269 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12270 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12271 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12272 cpup->flag = 0; 12273 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12274 INIT_LIST_HEAD(&eqi->list); 12275 eqi->icnt = 0; 12276 } 12277 } 12278 12279 /** 12280 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12281 * @phba: pointer to lpfc hba data structure. 12282 * 12283 * The routine initializes the hba_eq_hdl array structure 12284 */ 12285 static void 12286 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12287 { 12288 struct lpfc_hba_eq_hdl *eqhdl; 12289 int i; 12290 12291 for (i = 0; i < phba->cfg_irq_chann; i++) { 12292 eqhdl = lpfc_get_eq_hdl(i); 12293 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 12294 eqhdl->phba = phba; 12295 } 12296 } 12297 12298 /** 12299 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12300 * @phba: pointer to lpfc hba data structure. 12301 * @vectors: number of msix vectors allocated. 12302 * 12303 * The routine will figure out the CPU affinity assignment for every 12304 * MSI-X vector allocated for the HBA. 12305 * In addition, the CPU to IO channel mapping will be calculated 12306 * and the phba->sli4_hba.cpu_map array will reflect this. 12307 */ 12308 static void 12309 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12310 { 12311 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12312 int max_phys_id, min_phys_id; 12313 int max_core_id, min_core_id; 12314 struct lpfc_vector_map_info *cpup; 12315 struct lpfc_vector_map_info *new_cpup; 12316 #ifdef CONFIG_X86 12317 struct cpuinfo_x86 *cpuinfo; 12318 #endif 12319 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12320 struct lpfc_hdwq_stat *c_stat; 12321 #endif 12322 12323 max_phys_id = 0; 12324 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12325 max_core_id = 0; 12326 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12327 12328 /* Update CPU map with physical id and core id of each CPU */ 12329 for_each_present_cpu(cpu) { 12330 cpup = &phba->sli4_hba.cpu_map[cpu]; 12331 #ifdef CONFIG_X86 12332 cpuinfo = &cpu_data(cpu); 12333 cpup->phys_id = cpuinfo->phys_proc_id; 12334 cpup->core_id = cpuinfo->cpu_core_id; 12335 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12336 cpup->flag |= LPFC_CPU_MAP_HYPER; 12337 #else 12338 /* No distinction between CPUs for other platforms */ 12339 cpup->phys_id = 0; 12340 cpup->core_id = cpu; 12341 #endif 12342 12343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12344 "3328 CPU %d physid %d coreid %d flag x%x\n", 12345 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12346 12347 if (cpup->phys_id > max_phys_id) 12348 max_phys_id = cpup->phys_id; 12349 if (cpup->phys_id < min_phys_id) 12350 min_phys_id = cpup->phys_id; 12351 12352 if (cpup->core_id > max_core_id) 12353 max_core_id = cpup->core_id; 12354 if (cpup->core_id < min_core_id) 12355 min_core_id = cpup->core_id; 12356 } 12357 12358 /* After looking at each irq vector assigned to this pcidev, its 12359 * possible to see that not ALL CPUs have been accounted for. 12360 * Next we will set any unassigned (unaffinitized) cpu map 12361 * entries to a IRQ on the same phys_id. 12362 */ 12363 first_cpu = cpumask_first(cpu_present_mask); 12364 start_cpu = first_cpu; 12365 12366 for_each_present_cpu(cpu) { 12367 cpup = &phba->sli4_hba.cpu_map[cpu]; 12368 12369 /* Is this CPU entry unassigned */ 12370 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12371 /* Mark CPU as IRQ not assigned by the kernel */ 12372 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12373 12374 /* If so, find a new_cpup thats on the the SAME 12375 * phys_id as cpup. start_cpu will start where we 12376 * left off so all unassigned entries don't get assgined 12377 * the IRQ of the first entry. 12378 */ 12379 new_cpu = start_cpu; 12380 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12381 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12382 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12383 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12384 (new_cpup->phys_id == cpup->phys_id)) 12385 goto found_same; 12386 new_cpu = cpumask_next( 12387 new_cpu, cpu_present_mask); 12388 if (new_cpu == nr_cpumask_bits) 12389 new_cpu = first_cpu; 12390 } 12391 /* At this point, we leave the CPU as unassigned */ 12392 continue; 12393 found_same: 12394 /* We found a matching phys_id, so copy the IRQ info */ 12395 cpup->eq = new_cpup->eq; 12396 12397 /* Bump start_cpu to the next slot to minmize the 12398 * chance of having multiple unassigned CPU entries 12399 * selecting the same IRQ. 12400 */ 12401 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12402 if (start_cpu == nr_cpumask_bits) 12403 start_cpu = first_cpu; 12404 12405 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12406 "3337 Set Affinity: CPU %d " 12407 "eq %d from peer cpu %d same " 12408 "phys_id (%d)\n", 12409 cpu, cpup->eq, new_cpu, 12410 cpup->phys_id); 12411 } 12412 } 12413 12414 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12415 start_cpu = first_cpu; 12416 12417 for_each_present_cpu(cpu) { 12418 cpup = &phba->sli4_hba.cpu_map[cpu]; 12419 12420 /* Is this entry unassigned */ 12421 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12422 /* Mark it as IRQ not assigned by the kernel */ 12423 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12424 12425 /* If so, find a new_cpup thats on ANY phys_id 12426 * as the cpup. start_cpu will start where we 12427 * left off so all unassigned entries don't get 12428 * assigned the IRQ of the first entry. 12429 */ 12430 new_cpu = start_cpu; 12431 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12432 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12433 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12434 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12435 goto found_any; 12436 new_cpu = cpumask_next( 12437 new_cpu, cpu_present_mask); 12438 if (new_cpu == nr_cpumask_bits) 12439 new_cpu = first_cpu; 12440 } 12441 /* We should never leave an entry unassigned */ 12442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12443 "3339 Set Affinity: CPU %d " 12444 "eq %d UNASSIGNED\n", 12445 cpup->hdwq, cpup->eq); 12446 continue; 12447 found_any: 12448 /* We found an available entry, copy the IRQ info */ 12449 cpup->eq = new_cpup->eq; 12450 12451 /* Bump start_cpu to the next slot to minmize the 12452 * chance of having multiple unassigned CPU entries 12453 * selecting the same IRQ. 12454 */ 12455 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12456 if (start_cpu == nr_cpumask_bits) 12457 start_cpu = first_cpu; 12458 12459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12460 "3338 Set Affinity: CPU %d " 12461 "eq %d from peer cpu %d (%d/%d)\n", 12462 cpu, cpup->eq, new_cpu, 12463 new_cpup->phys_id, new_cpup->core_id); 12464 } 12465 } 12466 12467 /* Assign hdwq indices that are unique across all cpus in the map 12468 * that are also FIRST_CPUs. 12469 */ 12470 idx = 0; 12471 for_each_present_cpu(cpu) { 12472 cpup = &phba->sli4_hba.cpu_map[cpu]; 12473 12474 /* Only FIRST IRQs get a hdwq index assignment. */ 12475 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12476 continue; 12477 12478 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12479 cpup->hdwq = idx; 12480 idx++; 12481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12482 "3333 Set Affinity: CPU %d (phys %d core %d): " 12483 "hdwq %d eq %d flg x%x\n", 12484 cpu, cpup->phys_id, cpup->core_id, 12485 cpup->hdwq, cpup->eq, cpup->flag); 12486 } 12487 /* Associate a hdwq with each cpu_map entry 12488 * This will be 1 to 1 - hdwq to cpu, unless there are less 12489 * hardware queues then CPUs. For that case we will just round-robin 12490 * the available hardware queues as they get assigned to CPUs. 12491 * The next_idx is the idx from the FIRST_CPU loop above to account 12492 * for irq_chann < hdwq. The idx is used for round-robin assignments 12493 * and needs to start at 0. 12494 */ 12495 next_idx = idx; 12496 start_cpu = 0; 12497 idx = 0; 12498 for_each_present_cpu(cpu) { 12499 cpup = &phba->sli4_hba.cpu_map[cpu]; 12500 12501 /* FIRST cpus are already mapped. */ 12502 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12503 continue; 12504 12505 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12506 * of the unassigned cpus to the next idx so that all 12507 * hdw queues are fully utilized. 12508 */ 12509 if (next_idx < phba->cfg_hdw_queue) { 12510 cpup->hdwq = next_idx; 12511 next_idx++; 12512 continue; 12513 } 12514 12515 /* Not a First CPU and all hdw_queues are used. Reuse a 12516 * Hardware Queue for another CPU, so be smart about it 12517 * and pick one that has its IRQ/EQ mapped to the same phys_id 12518 * (CPU package) and core_id. 12519 */ 12520 new_cpu = start_cpu; 12521 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12522 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12523 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12524 new_cpup->phys_id == cpup->phys_id && 12525 new_cpup->core_id == cpup->core_id) { 12526 goto found_hdwq; 12527 } 12528 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12529 if (new_cpu == nr_cpumask_bits) 12530 new_cpu = first_cpu; 12531 } 12532 12533 /* If we can't match both phys_id and core_id, 12534 * settle for just a phys_id match. 12535 */ 12536 new_cpu = start_cpu; 12537 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12538 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12539 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12540 new_cpup->phys_id == cpup->phys_id) 12541 goto found_hdwq; 12542 12543 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12544 if (new_cpu == nr_cpumask_bits) 12545 new_cpu = first_cpu; 12546 } 12547 12548 /* Otherwise just round robin on cfg_hdw_queue */ 12549 cpup->hdwq = idx % phba->cfg_hdw_queue; 12550 idx++; 12551 goto logit; 12552 found_hdwq: 12553 /* We found an available entry, copy the IRQ info */ 12554 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12555 if (start_cpu == nr_cpumask_bits) 12556 start_cpu = first_cpu; 12557 cpup->hdwq = new_cpup->hdwq; 12558 logit: 12559 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12560 "3335 Set Affinity: CPU %d (phys %d core %d): " 12561 "hdwq %d eq %d flg x%x\n", 12562 cpu, cpup->phys_id, cpup->core_id, 12563 cpup->hdwq, cpup->eq, cpup->flag); 12564 } 12565 12566 /* 12567 * Initialize the cpu_map slots for not-present cpus in case 12568 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12569 */ 12570 idx = 0; 12571 for_each_possible_cpu(cpu) { 12572 cpup = &phba->sli4_hba.cpu_map[cpu]; 12573 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12574 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12575 c_stat->hdwq_no = cpup->hdwq; 12576 #endif 12577 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12578 continue; 12579 12580 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12581 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12582 c_stat->hdwq_no = cpup->hdwq; 12583 #endif 12584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12585 "3340 Set Affinity: not present " 12586 "CPU %d hdwq %d\n", 12587 cpu, cpup->hdwq); 12588 } 12589 12590 /* The cpu_map array will be used later during initialization 12591 * when EQ / CQ / WQs are allocated and configured. 12592 */ 12593 return; 12594 } 12595 12596 /** 12597 * lpfc_cpuhp_get_eq 12598 * 12599 * @phba: pointer to lpfc hba data structure. 12600 * @cpu: cpu going offline 12601 * @eqlist: eq list to append to 12602 */ 12603 static int 12604 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12605 struct list_head *eqlist) 12606 { 12607 const struct cpumask *maskp; 12608 struct lpfc_queue *eq; 12609 struct cpumask *tmp; 12610 u16 idx; 12611 12612 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12613 if (!tmp) 12614 return -ENOMEM; 12615 12616 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12617 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12618 if (!maskp) 12619 continue; 12620 /* 12621 * if irq is not affinitized to the cpu going 12622 * then we don't need to poll the eq attached 12623 * to it. 12624 */ 12625 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12626 continue; 12627 /* get the cpus that are online and are affini- 12628 * tized to this irq vector. If the count is 12629 * more than 1 then cpuhp is not going to shut- 12630 * down this vector. Since this cpu has not 12631 * gone offline yet, we need >1. 12632 */ 12633 cpumask_and(tmp, maskp, cpu_online_mask); 12634 if (cpumask_weight(tmp) > 1) 12635 continue; 12636 12637 /* Now that we have an irq to shutdown, get the eq 12638 * mapped to this irq. Note: multiple hdwq's in 12639 * the software can share an eq, but eventually 12640 * only eq will be mapped to this vector 12641 */ 12642 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12643 list_add(&eq->_poll_list, eqlist); 12644 } 12645 kfree(tmp); 12646 return 0; 12647 } 12648 12649 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12650 { 12651 if (phba->sli_rev != LPFC_SLI_REV4) 12652 return; 12653 12654 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12655 &phba->cpuhp); 12656 /* 12657 * unregistering the instance doesn't stop the polling 12658 * timer. Wait for the poll timer to retire. 12659 */ 12660 synchronize_rcu(); 12661 del_timer_sync(&phba->cpuhp_poll_timer); 12662 } 12663 12664 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12665 { 12666 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 12667 return; 12668 12669 __lpfc_cpuhp_remove(phba); 12670 } 12671 12672 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12673 { 12674 if (phba->sli_rev != LPFC_SLI_REV4) 12675 return; 12676 12677 rcu_read_lock(); 12678 12679 if (!list_empty(&phba->poll_list)) 12680 mod_timer(&phba->cpuhp_poll_timer, 12681 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12682 12683 rcu_read_unlock(); 12684 12685 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12686 &phba->cpuhp); 12687 } 12688 12689 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12690 { 12691 if (phba->pport->load_flag & FC_UNLOADING) { 12692 *retval = -EAGAIN; 12693 return true; 12694 } 12695 12696 if (phba->sli_rev != LPFC_SLI_REV4) { 12697 *retval = 0; 12698 return true; 12699 } 12700 12701 /* proceed with the hotplug */ 12702 return false; 12703 } 12704 12705 /** 12706 * lpfc_irq_set_aff - set IRQ affinity 12707 * @eqhdl: EQ handle 12708 * @cpu: cpu to set affinity 12709 * 12710 **/ 12711 static inline void 12712 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12713 { 12714 cpumask_clear(&eqhdl->aff_mask); 12715 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12716 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12717 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); 12718 } 12719 12720 /** 12721 * lpfc_irq_clear_aff - clear IRQ affinity 12722 * @eqhdl: EQ handle 12723 * 12724 **/ 12725 static inline void 12726 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12727 { 12728 cpumask_clear(&eqhdl->aff_mask); 12729 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12730 } 12731 12732 /** 12733 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12734 * @phba: pointer to HBA context object. 12735 * @cpu: cpu going offline/online 12736 * @offline: true, cpu is going offline. false, cpu is coming online. 12737 * 12738 * If cpu is going offline, we'll try our best effort to find the next 12739 * online cpu on the phba's original_mask and migrate all offlining IRQ 12740 * affinities. 12741 * 12742 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12743 * 12744 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12745 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12746 * 12747 **/ 12748 static void 12749 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12750 { 12751 struct lpfc_vector_map_info *cpup; 12752 struct cpumask *aff_mask; 12753 unsigned int cpu_select, cpu_next, idx; 12754 const struct cpumask *orig_mask; 12755 12756 if (phba->irq_chann_mode == NORMAL_MODE) 12757 return; 12758 12759 orig_mask = &phba->sli4_hba.irq_aff_mask; 12760 12761 if (!cpumask_test_cpu(cpu, orig_mask)) 12762 return; 12763 12764 cpup = &phba->sli4_hba.cpu_map[cpu]; 12765 12766 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12767 return; 12768 12769 if (offline) { 12770 /* Find next online CPU on original mask */ 12771 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12772 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12773 12774 /* Found a valid CPU */ 12775 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12776 /* Go through each eqhdl and ensure offlining 12777 * cpu aff_mask is migrated 12778 */ 12779 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12780 aff_mask = lpfc_get_aff_mask(idx); 12781 12782 /* Migrate affinity */ 12783 if (cpumask_test_cpu(cpu, aff_mask)) 12784 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12785 cpu_select); 12786 } 12787 } else { 12788 /* Rely on irqbalance if no online CPUs left on NUMA */ 12789 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12790 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12791 } 12792 } else { 12793 /* Migrate affinity back to this CPU */ 12794 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12795 } 12796 } 12797 12798 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12799 { 12800 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12801 struct lpfc_queue *eq, *next; 12802 LIST_HEAD(eqlist); 12803 int retval; 12804 12805 if (!phba) { 12806 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12807 return 0; 12808 } 12809 12810 if (__lpfc_cpuhp_checks(phba, &retval)) 12811 return retval; 12812 12813 lpfc_irq_rebalance(phba, cpu, true); 12814 12815 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12816 if (retval) 12817 return retval; 12818 12819 /* start polling on these eq's */ 12820 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12821 list_del_init(&eq->_poll_list); 12822 lpfc_sli4_start_polling(eq); 12823 } 12824 12825 return 0; 12826 } 12827 12828 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12829 { 12830 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12831 struct lpfc_queue *eq, *next; 12832 unsigned int n; 12833 int retval; 12834 12835 if (!phba) { 12836 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12837 return 0; 12838 } 12839 12840 if (__lpfc_cpuhp_checks(phba, &retval)) 12841 return retval; 12842 12843 lpfc_irq_rebalance(phba, cpu, false); 12844 12845 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12846 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12847 if (n == cpu) 12848 lpfc_sli4_stop_polling(eq); 12849 } 12850 12851 return 0; 12852 } 12853 12854 /** 12855 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12856 * @phba: pointer to lpfc hba data structure. 12857 * 12858 * This routine is invoked to enable the MSI-X interrupt vectors to device 12859 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12860 * to cpus on the system. 12861 * 12862 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12863 * the number of cpus on the same numa node as this adapter. The vectors are 12864 * allocated without requesting OS affinity mapping. A vector will be 12865 * allocated and assigned to each online and offline cpu. If the cpu is 12866 * online, then affinity will be set to that cpu. If the cpu is offline, then 12867 * affinity will be set to the nearest peer cpu within the numa node that is 12868 * online. If there are no online cpus within the numa node, affinity is not 12869 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12870 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12871 * configured. 12872 * 12873 * If numa mode is not enabled and there is more than 1 vector allocated, then 12874 * the driver relies on the managed irq interface where the OS assigns vector to 12875 * cpu affinity. The driver will then use that affinity mapping to setup its 12876 * cpu mapping table. 12877 * 12878 * Return codes 12879 * 0 - successful 12880 * other values - error 12881 **/ 12882 static int 12883 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 12884 { 12885 int vectors, rc, index; 12886 char *name; 12887 const struct cpumask *aff_mask = NULL; 12888 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 12889 struct lpfc_vector_map_info *cpup; 12890 struct lpfc_hba_eq_hdl *eqhdl; 12891 const struct cpumask *maskp; 12892 unsigned int flags = PCI_IRQ_MSIX; 12893 12894 /* Set up MSI-X multi-message vectors */ 12895 vectors = phba->cfg_irq_chann; 12896 12897 if (phba->irq_chann_mode != NORMAL_MODE) 12898 aff_mask = &phba->sli4_hba.irq_aff_mask; 12899 12900 if (aff_mask) { 12901 cpu_cnt = cpumask_weight(aff_mask); 12902 vectors = min(phba->cfg_irq_chann, cpu_cnt); 12903 12904 /* cpu: iterates over aff_mask including offline or online 12905 * cpu_select: iterates over online aff_mask to set affinity 12906 */ 12907 cpu = cpumask_first(aff_mask); 12908 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12909 } else { 12910 flags |= PCI_IRQ_AFFINITY; 12911 } 12912 12913 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 12914 if (rc < 0) { 12915 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12916 "0484 PCI enable MSI-X failed (%d)\n", rc); 12917 goto vec_fail_out; 12918 } 12919 vectors = rc; 12920 12921 /* Assign MSI-X vectors to interrupt handlers */ 12922 for (index = 0; index < vectors; index++) { 12923 eqhdl = lpfc_get_eq_hdl(index); 12924 name = eqhdl->handler_name; 12925 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 12926 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 12927 LPFC_DRIVER_HANDLER_NAME"%d", index); 12928 12929 eqhdl->idx = index; 12930 rc = request_irq(pci_irq_vector(phba->pcidev, index), 12931 &lpfc_sli4_hba_intr_handler, 0, 12932 name, eqhdl); 12933 if (rc) { 12934 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12935 "0486 MSI-X fast-path (%d) " 12936 "request_irq failed (%d)\n", index, rc); 12937 goto cfg_fail_out; 12938 } 12939 12940 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 12941 12942 if (aff_mask) { 12943 /* If found a neighboring online cpu, set affinity */ 12944 if (cpu_select < nr_cpu_ids) 12945 lpfc_irq_set_aff(eqhdl, cpu_select); 12946 12947 /* Assign EQ to cpu_map */ 12948 lpfc_assign_eq_map_info(phba, index, 12949 LPFC_CPU_FIRST_IRQ, 12950 cpu); 12951 12952 /* Iterate to next offline or online cpu in aff_mask */ 12953 cpu = cpumask_next(cpu, aff_mask); 12954 12955 /* Find next online cpu in aff_mask to set affinity */ 12956 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12957 } else if (vectors == 1) { 12958 cpu = cpumask_first(cpu_present_mask); 12959 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 12960 cpu); 12961 } else { 12962 maskp = pci_irq_get_affinity(phba->pcidev, index); 12963 12964 /* Loop through all CPUs associated with vector index */ 12965 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 12966 cpup = &phba->sli4_hba.cpu_map[cpu]; 12967 12968 /* If this is the first CPU thats assigned to 12969 * this vector, set LPFC_CPU_FIRST_IRQ. 12970 * 12971 * With certain platforms its possible that irq 12972 * vectors are affinitized to all the cpu's. 12973 * This can result in each cpu_map.eq to be set 12974 * to the last vector, resulting in overwrite 12975 * of all the previous cpu_map.eq. Ensure that 12976 * each vector receives a place in cpu_map. 12977 * Later call to lpfc_cpu_affinity_check will 12978 * ensure we are nicely balanced out. 12979 */ 12980 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 12981 continue; 12982 lpfc_assign_eq_map_info(phba, index, 12983 LPFC_CPU_FIRST_IRQ, 12984 cpu); 12985 break; 12986 } 12987 } 12988 } 12989 12990 if (vectors != phba->cfg_irq_chann) { 12991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12992 "3238 Reducing IO channels to match number of " 12993 "MSI-X vectors, requested %d got %d\n", 12994 phba->cfg_irq_chann, vectors); 12995 if (phba->cfg_irq_chann > vectors) 12996 phba->cfg_irq_chann = vectors; 12997 } 12998 12999 return rc; 13000 13001 cfg_fail_out: 13002 /* free the irq already requested */ 13003 for (--index; index >= 0; index--) { 13004 eqhdl = lpfc_get_eq_hdl(index); 13005 lpfc_irq_clear_aff(eqhdl); 13006 free_irq(eqhdl->irq, eqhdl); 13007 } 13008 13009 /* Unconfigure MSI-X capability structure */ 13010 pci_free_irq_vectors(phba->pcidev); 13011 13012 vec_fail_out: 13013 return rc; 13014 } 13015 13016 /** 13017 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 13018 * @phba: pointer to lpfc hba data structure. 13019 * 13020 * This routine is invoked to enable the MSI interrupt mode to device with 13021 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 13022 * called to enable the MSI vector. The device driver is responsible for 13023 * calling the request_irq() to register MSI vector with a interrupt the 13024 * handler, which is done in this function. 13025 * 13026 * Return codes 13027 * 0 - successful 13028 * other values - error 13029 **/ 13030 static int 13031 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 13032 { 13033 int rc, index; 13034 unsigned int cpu; 13035 struct lpfc_hba_eq_hdl *eqhdl; 13036 13037 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 13038 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 13039 if (rc > 0) 13040 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13041 "0487 PCI enable MSI mode success.\n"); 13042 else { 13043 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13044 "0488 PCI enable MSI mode failed (%d)\n", rc); 13045 return rc ? rc : -1; 13046 } 13047 13048 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13049 0, LPFC_DRIVER_NAME, phba); 13050 if (rc) { 13051 pci_free_irq_vectors(phba->pcidev); 13052 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13053 "0490 MSI request_irq failed (%d)\n", rc); 13054 return rc; 13055 } 13056 13057 eqhdl = lpfc_get_eq_hdl(0); 13058 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13059 13060 cpu = cpumask_first(cpu_present_mask); 13061 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13062 13063 for (index = 0; index < phba->cfg_irq_chann; index++) { 13064 eqhdl = lpfc_get_eq_hdl(index); 13065 eqhdl->idx = index; 13066 } 13067 13068 return 0; 13069 } 13070 13071 /** 13072 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13073 * @phba: pointer to lpfc hba data structure. 13074 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13075 * 13076 * This routine is invoked to enable device interrupt and associate driver's 13077 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13078 * interface spec. Depends on the interrupt mode configured to the driver, 13079 * the driver will try to fallback from the configured interrupt mode to an 13080 * interrupt mode which is supported by the platform, kernel, and device in 13081 * the order of: 13082 * MSI-X -> MSI -> IRQ. 13083 * 13084 * Return codes 13085 * 0 - successful 13086 * other values - error 13087 **/ 13088 static uint32_t 13089 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13090 { 13091 uint32_t intr_mode = LPFC_INTR_ERROR; 13092 int retval, idx; 13093 13094 if (cfg_mode == 2) { 13095 /* Preparation before conf_msi mbox cmd */ 13096 retval = 0; 13097 if (!retval) { 13098 /* Now, try to enable MSI-X interrupt mode */ 13099 retval = lpfc_sli4_enable_msix(phba); 13100 if (!retval) { 13101 /* Indicate initialization to MSI-X mode */ 13102 phba->intr_type = MSIX; 13103 intr_mode = 2; 13104 } 13105 } 13106 } 13107 13108 /* Fallback to MSI if MSI-X initialization failed */ 13109 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13110 retval = lpfc_sli4_enable_msi(phba); 13111 if (!retval) { 13112 /* Indicate initialization to MSI mode */ 13113 phba->intr_type = MSI; 13114 intr_mode = 1; 13115 } 13116 } 13117 13118 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13119 if (phba->intr_type == NONE) { 13120 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13121 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13122 if (!retval) { 13123 struct lpfc_hba_eq_hdl *eqhdl; 13124 unsigned int cpu; 13125 13126 /* Indicate initialization to INTx mode */ 13127 phba->intr_type = INTx; 13128 intr_mode = 0; 13129 13130 eqhdl = lpfc_get_eq_hdl(0); 13131 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13132 13133 cpu = cpumask_first(cpu_present_mask); 13134 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13135 cpu); 13136 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13137 eqhdl = lpfc_get_eq_hdl(idx); 13138 eqhdl->idx = idx; 13139 } 13140 } 13141 } 13142 return intr_mode; 13143 } 13144 13145 /** 13146 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13147 * @phba: pointer to lpfc hba data structure. 13148 * 13149 * This routine is invoked to disable device interrupt and disassociate 13150 * the driver's interrupt handler(s) from interrupt vector(s) to device 13151 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13152 * will release the interrupt vector(s) for the message signaled interrupt. 13153 **/ 13154 static void 13155 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13156 { 13157 /* Disable the currently initialized interrupt mode */ 13158 if (phba->intr_type == MSIX) { 13159 int index; 13160 struct lpfc_hba_eq_hdl *eqhdl; 13161 13162 /* Free up MSI-X multi-message vectors */ 13163 for (index = 0; index < phba->cfg_irq_chann; index++) { 13164 eqhdl = lpfc_get_eq_hdl(index); 13165 lpfc_irq_clear_aff(eqhdl); 13166 free_irq(eqhdl->irq, eqhdl); 13167 } 13168 } else { 13169 free_irq(phba->pcidev->irq, phba); 13170 } 13171 13172 pci_free_irq_vectors(phba->pcidev); 13173 13174 /* Reset interrupt management states */ 13175 phba->intr_type = NONE; 13176 phba->sli.slistat.sli_intr = 0; 13177 } 13178 13179 /** 13180 * lpfc_unset_hba - Unset SLI3 hba device initialization 13181 * @phba: pointer to lpfc hba data structure. 13182 * 13183 * This routine is invoked to unset the HBA device initialization steps to 13184 * a device with SLI-3 interface spec. 13185 **/ 13186 static void 13187 lpfc_unset_hba(struct lpfc_hba *phba) 13188 { 13189 struct lpfc_vport *vport = phba->pport; 13190 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13191 13192 spin_lock_irq(shost->host_lock); 13193 vport->load_flag |= FC_UNLOADING; 13194 spin_unlock_irq(shost->host_lock); 13195 13196 kfree(phba->vpi_bmask); 13197 kfree(phba->vpi_ids); 13198 13199 lpfc_stop_hba_timers(phba); 13200 13201 phba->pport->work_port_events = 0; 13202 13203 lpfc_sli_hba_down(phba); 13204 13205 lpfc_sli_brdrestart(phba); 13206 13207 lpfc_sli_disable_intr(phba); 13208 13209 return; 13210 } 13211 13212 /** 13213 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13214 * @phba: Pointer to HBA context object. 13215 * 13216 * This function is called in the SLI4 code path to wait for completion 13217 * of device's XRIs exchange busy. It will check the XRI exchange busy 13218 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13219 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13220 * I/Os every 30 seconds, log error message, and wait forever. Only when 13221 * all XRI exchange busy complete, the driver unload shall proceed with 13222 * invoking the function reset ioctl mailbox command to the CNA and the 13223 * the rest of the driver unload resource release. 13224 **/ 13225 static void 13226 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13227 { 13228 struct lpfc_sli4_hdw_queue *qp; 13229 int idx, ccnt; 13230 int wait_time = 0; 13231 int io_xri_cmpl = 1; 13232 int nvmet_xri_cmpl = 1; 13233 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13234 13235 /* Driver just aborted IOs during the hba_unset process. Pause 13236 * here to give the HBA time to complete the IO and get entries 13237 * into the abts lists. 13238 */ 13239 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13240 13241 /* Wait for NVME pending IO to flush back to transport. */ 13242 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13243 lpfc_nvme_wait_for_io_drain(phba); 13244 13245 ccnt = 0; 13246 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13247 qp = &phba->sli4_hba.hdwq[idx]; 13248 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13249 if (!io_xri_cmpl) /* if list is NOT empty */ 13250 ccnt++; 13251 } 13252 if (ccnt) 13253 io_xri_cmpl = 0; 13254 13255 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13256 nvmet_xri_cmpl = 13257 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13258 } 13259 13260 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13261 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13262 if (!nvmet_xri_cmpl) 13263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13264 "6424 NVMET XRI exchange busy " 13265 "wait time: %d seconds.\n", 13266 wait_time/1000); 13267 if (!io_xri_cmpl) 13268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13269 "6100 IO XRI exchange busy " 13270 "wait time: %d seconds.\n", 13271 wait_time/1000); 13272 if (!els_xri_cmpl) 13273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13274 "2878 ELS XRI exchange busy " 13275 "wait time: %d seconds.\n", 13276 wait_time/1000); 13277 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13278 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13279 } else { 13280 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13281 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13282 } 13283 13284 ccnt = 0; 13285 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13286 qp = &phba->sli4_hba.hdwq[idx]; 13287 io_xri_cmpl = list_empty( 13288 &qp->lpfc_abts_io_buf_list); 13289 if (!io_xri_cmpl) /* if list is NOT empty */ 13290 ccnt++; 13291 } 13292 if (ccnt) 13293 io_xri_cmpl = 0; 13294 13295 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13296 nvmet_xri_cmpl = list_empty( 13297 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13298 } 13299 els_xri_cmpl = 13300 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13301 13302 } 13303 } 13304 13305 /** 13306 * lpfc_sli4_hba_unset - Unset the fcoe hba 13307 * @phba: Pointer to HBA context object. 13308 * 13309 * This function is called in the SLI4 code path to reset the HBA's FCoE 13310 * function. The caller is not required to hold any lock. This routine 13311 * issues PCI function reset mailbox command to reset the FCoE function. 13312 * At the end of the function, it calls lpfc_hba_down_post function to 13313 * free any pending commands. 13314 **/ 13315 static void 13316 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13317 { 13318 int wait_cnt = 0; 13319 LPFC_MBOXQ_t *mboxq; 13320 struct pci_dev *pdev = phba->pcidev; 13321 13322 lpfc_stop_hba_timers(phba); 13323 hrtimer_cancel(&phba->cmf_timer); 13324 13325 if (phba->pport) 13326 phba->sli4_hba.intr_enable = 0; 13327 13328 /* 13329 * Gracefully wait out the potential current outstanding asynchronous 13330 * mailbox command. 13331 */ 13332 13333 /* First, block any pending async mailbox command from posted */ 13334 spin_lock_irq(&phba->hbalock); 13335 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13336 spin_unlock_irq(&phba->hbalock); 13337 /* Now, trying to wait it out if we can */ 13338 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13339 msleep(10); 13340 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13341 break; 13342 } 13343 /* Forcefully release the outstanding mailbox command if timed out */ 13344 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13345 spin_lock_irq(&phba->hbalock); 13346 mboxq = phba->sli.mbox_active; 13347 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13348 __lpfc_mbox_cmpl_put(phba, mboxq); 13349 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13350 phba->sli.mbox_active = NULL; 13351 spin_unlock_irq(&phba->hbalock); 13352 } 13353 13354 /* Abort all iocbs associated with the hba */ 13355 lpfc_sli_hba_iocb_abort(phba); 13356 13357 /* Wait for completion of device XRI exchange busy */ 13358 lpfc_sli4_xri_exchange_busy_wait(phba); 13359 13360 /* per-phba callback de-registration for hotplug event */ 13361 if (phba->pport) 13362 lpfc_cpuhp_remove(phba); 13363 13364 /* Disable PCI subsystem interrupt */ 13365 lpfc_sli4_disable_intr(phba); 13366 13367 /* Disable SR-IOV if enabled */ 13368 if (phba->cfg_sriov_nr_virtfn) 13369 pci_disable_sriov(pdev); 13370 13371 /* Stop kthread signal shall trigger work_done one more time */ 13372 kthread_stop(phba->worker_thread); 13373 13374 /* Disable FW logging to host memory */ 13375 lpfc_ras_stop_fwlog(phba); 13376 13377 /* Unset the queues shared with the hardware then release all 13378 * allocated resources. 13379 */ 13380 lpfc_sli4_queue_unset(phba); 13381 lpfc_sli4_queue_destroy(phba); 13382 13383 /* Reset SLI4 HBA FCoE function */ 13384 lpfc_pci_function_reset(phba); 13385 13386 /* Free RAS DMA memory */ 13387 if (phba->ras_fwlog.ras_enabled) 13388 lpfc_sli4_ras_dma_free(phba); 13389 13390 /* Stop the SLI4 device port */ 13391 if (phba->pport) 13392 phba->pport->work_port_events = 0; 13393 } 13394 13395 static uint32_t 13396 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13397 { 13398 uint32_t msb = 0; 13399 uint32_t bit; 13400 13401 for (bit = 0; bit < 8; bit++) { 13402 msb = (crc >> 31) & 1; 13403 crc <<= 1; 13404 13405 if (msb ^ (byte & 1)) { 13406 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13407 crc |= 1; 13408 } 13409 byte >>= 1; 13410 } 13411 return crc; 13412 } 13413 13414 static uint32_t 13415 lpfc_cgn_reverse_bits(uint32_t wd) 13416 { 13417 uint32_t result = 0; 13418 uint32_t i; 13419 13420 for (i = 0; i < 32; i++) { 13421 result <<= 1; 13422 result |= (1 & (wd >> i)); 13423 } 13424 return result; 13425 } 13426 13427 /* 13428 * The routine corresponds with the algorithm the HBA firmware 13429 * uses to validate the data integrity. 13430 */ 13431 uint32_t 13432 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13433 { 13434 uint32_t i; 13435 uint32_t result; 13436 uint8_t *data = (uint8_t *)ptr; 13437 13438 for (i = 0; i < byteLen; ++i) 13439 crc = lpfc_cgn_crc32(crc, data[i]); 13440 13441 result = ~lpfc_cgn_reverse_bits(crc); 13442 return result; 13443 } 13444 13445 void 13446 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13447 { 13448 struct lpfc_cgn_info *cp; 13449 struct timespec64 cmpl_time; 13450 struct tm broken; 13451 uint16_t size; 13452 uint32_t crc; 13453 13454 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13455 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13456 13457 if (!phba->cgn_i) 13458 return; 13459 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13460 13461 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13462 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13463 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13464 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13465 13466 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13467 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13468 atomic64_set(&phba->cgn_latency_evt, 0); 13469 phba->cgn_evt_minute = 0; 13470 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; 13471 13472 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); 13473 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13474 cp->cgn_info_version = LPFC_CGN_INFO_V3; 13475 13476 /* cgn parameters */ 13477 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13478 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13479 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13480 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13481 13482 ktime_get_real_ts64(&cmpl_time); 13483 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13484 13485 cp->cgn_info_month = broken.tm_mon + 1; 13486 cp->cgn_info_day = broken.tm_mday; 13487 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ 13488 cp->cgn_info_hour = broken.tm_hour; 13489 cp->cgn_info_minute = broken.tm_min; 13490 cp->cgn_info_second = broken.tm_sec; 13491 13492 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13493 "2643 CGNInfo Init: Start Time " 13494 "%d/%d/%d %d:%d:%d\n", 13495 cp->cgn_info_day, cp->cgn_info_month, 13496 cp->cgn_info_year, cp->cgn_info_hour, 13497 cp->cgn_info_minute, cp->cgn_info_second); 13498 13499 /* Fill in default LUN qdepth */ 13500 if (phba->pport) { 13501 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13502 cp->cgn_lunq = cpu_to_le16(size); 13503 } 13504 13505 /* last used Index initialized to 0xff already */ 13506 13507 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13508 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13509 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13510 cp->cgn_info_crc = cpu_to_le32(crc); 13511 13512 phba->cgn_evt_timestamp = jiffies + 13513 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13514 } 13515 13516 void 13517 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13518 { 13519 struct lpfc_cgn_info *cp; 13520 struct timespec64 cmpl_time; 13521 struct tm broken; 13522 uint32_t crc; 13523 13524 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13525 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13526 13527 if (!phba->cgn_i) 13528 return; 13529 13530 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13531 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); 13532 13533 ktime_get_real_ts64(&cmpl_time); 13534 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13535 13536 cp->cgn_stat_month = broken.tm_mon + 1; 13537 cp->cgn_stat_day = broken.tm_mday; 13538 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ 13539 cp->cgn_stat_hour = broken.tm_hour; 13540 cp->cgn_stat_minute = broken.tm_min; 13541 13542 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13543 "2647 CGNstat Init: Start Time " 13544 "%d/%d/%d %d:%d\n", 13545 cp->cgn_stat_day, cp->cgn_stat_month, 13546 cp->cgn_stat_year, cp->cgn_stat_hour, 13547 cp->cgn_stat_minute); 13548 13549 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13550 cp->cgn_info_crc = cpu_to_le32(crc); 13551 } 13552 13553 /** 13554 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13555 * @phba: Pointer to hba context object. 13556 * @reg: flag to determine register or unregister. 13557 */ 13558 static int 13559 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13560 { 13561 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13562 union lpfc_sli4_cfg_shdr *shdr; 13563 uint32_t shdr_status, shdr_add_status; 13564 LPFC_MBOXQ_t *mboxq; 13565 int length, rc; 13566 13567 if (!phba->cgn_i) 13568 return -ENXIO; 13569 13570 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13571 if (!mboxq) { 13572 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13573 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13574 "HBA state x%x reg %d\n", 13575 phba->pport->port_state, reg); 13576 return -ENOMEM; 13577 } 13578 13579 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13580 sizeof(struct lpfc_sli4_cfg_mhdr)); 13581 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13582 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13583 LPFC_SLI4_MBX_EMBED); 13584 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13585 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13586 if (reg > 0) 13587 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13588 else 13589 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13590 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13591 reg_congestion_buf->addr_lo = 13592 putPaddrLow(phba->cgn_i->phys); 13593 reg_congestion_buf->addr_hi = 13594 putPaddrHigh(phba->cgn_i->phys); 13595 13596 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13597 shdr = (union lpfc_sli4_cfg_shdr *) 13598 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13599 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13600 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13601 &shdr->response); 13602 mempool_free(mboxq, phba->mbox_mem_pool); 13603 if (shdr_status || shdr_add_status || rc) { 13604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13605 "2642 REG_CONGESTION_BUF mailbox " 13606 "failed with status x%x add_status x%x," 13607 " mbx status x%x reg %d\n", 13608 shdr_status, shdr_add_status, rc, reg); 13609 return -ENXIO; 13610 } 13611 return 0; 13612 } 13613 13614 int 13615 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13616 { 13617 lpfc_cmf_stop(phba); 13618 return __lpfc_reg_congestion_buf(phba, 0); 13619 } 13620 13621 int 13622 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13623 { 13624 return __lpfc_reg_congestion_buf(phba, 1); 13625 } 13626 13627 /** 13628 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13629 * @phba: Pointer to HBA context object. 13630 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13631 * 13632 * This function is called in the SLI4 code path to read the port's 13633 * sli4 capabilities. 13634 * 13635 * This function may be be called from any context that can block-wait 13636 * for the completion. The expectation is that this routine is called 13637 * typically from probe_one or from the online routine. 13638 **/ 13639 int 13640 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13641 { 13642 int rc; 13643 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13644 struct lpfc_pc_sli4_params *sli4_params; 13645 uint32_t mbox_tmo; 13646 int length; 13647 bool exp_wqcq_pages = true; 13648 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13649 13650 /* 13651 * By default, the driver assumes the SLI4 port requires RPI 13652 * header postings. The SLI4_PARAM response will correct this 13653 * assumption. 13654 */ 13655 phba->sli4_hba.rpi_hdrs_in_use = 1; 13656 13657 /* Read the port's SLI4 Config Parameters */ 13658 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13659 sizeof(struct lpfc_sli4_cfg_mhdr)); 13660 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13661 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13662 length, LPFC_SLI4_MBX_EMBED); 13663 if (!phba->sli4_hba.intr_enable) 13664 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13665 else { 13666 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13667 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13668 } 13669 if (unlikely(rc)) 13670 return rc; 13671 sli4_params = &phba->sli4_hba.pc_sli4_params; 13672 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13673 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13674 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13675 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13676 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13677 mbx_sli4_parameters); 13678 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13679 mbx_sli4_parameters); 13680 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13681 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13682 else 13683 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13684 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13685 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13686 mbx_sli4_parameters); 13687 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13688 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13689 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13690 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13691 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13692 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13693 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13694 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13695 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13696 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13697 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13698 mbx_sli4_parameters); 13699 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13700 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13701 mbx_sli4_parameters); 13702 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13703 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13704 13705 /* Check for Extended Pre-Registered SGL support */ 13706 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13707 13708 /* Check for firmware nvme support */ 13709 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13710 bf_get(cfg_xib, mbx_sli4_parameters)); 13711 13712 if (rc) { 13713 /* Save this to indicate the Firmware supports NVME */ 13714 sli4_params->nvme = 1; 13715 13716 /* Firmware NVME support, check driver FC4 NVME support */ 13717 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13718 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13719 "6133 Disabling NVME support: " 13720 "FC4 type not supported: x%x\n", 13721 phba->cfg_enable_fc4_type); 13722 goto fcponly; 13723 } 13724 } else { 13725 /* No firmware NVME support, check driver FC4 NVME support */ 13726 sli4_params->nvme = 0; 13727 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13729 "6101 Disabling NVME support: Not " 13730 "supported by firmware (%d %d) x%x\n", 13731 bf_get(cfg_nvme, mbx_sli4_parameters), 13732 bf_get(cfg_xib, mbx_sli4_parameters), 13733 phba->cfg_enable_fc4_type); 13734 fcponly: 13735 phba->nvmet_support = 0; 13736 phba->cfg_nvmet_mrq = 0; 13737 phba->cfg_nvme_seg_cnt = 0; 13738 13739 /* If no FC4 type support, move to just SCSI support */ 13740 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13741 return -ENODEV; 13742 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13743 } 13744 } 13745 13746 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13747 * accommodate 512K and 1M IOs in a single nvme buf. 13748 */ 13749 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13750 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13751 13752 /* Enable embedded Payload BDE if support is indicated */ 13753 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13754 phba->cfg_enable_pbde = 1; 13755 else 13756 phba->cfg_enable_pbde = 0; 13757 13758 /* 13759 * To support Suppress Response feature we must satisfy 3 conditions. 13760 * lpfc_suppress_rsp module parameter must be set (default). 13761 * In SLI4-Parameters Descriptor: 13762 * Extended Inline Buffers (XIB) must be supported. 13763 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13764 * (double negative). 13765 */ 13766 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13767 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13768 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13769 else 13770 phba->cfg_suppress_rsp = 0; 13771 13772 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13773 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13774 13775 /* Make sure that sge_supp_len can be handled by the driver */ 13776 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13777 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13778 13779 /* 13780 * Check whether the adapter supports an embedded copy of the 13781 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13782 * to use this option, 128-byte WQEs must be used. 13783 */ 13784 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13785 phba->fcp_embed_io = 1; 13786 else 13787 phba->fcp_embed_io = 0; 13788 13789 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13790 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13791 bf_get(cfg_xib, mbx_sli4_parameters), 13792 phba->cfg_enable_pbde, 13793 phba->fcp_embed_io, sli4_params->nvme, 13794 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13795 13796 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13797 LPFC_SLI_INTF_IF_TYPE_2) && 13798 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13799 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13800 exp_wqcq_pages = false; 13801 13802 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13803 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13804 exp_wqcq_pages && 13805 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13806 phba->enab_exp_wqcq_pages = 1; 13807 else 13808 phba->enab_exp_wqcq_pages = 0; 13809 /* 13810 * Check if the SLI port supports MDS Diagnostics 13811 */ 13812 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13813 phba->mds_diags_support = 1; 13814 else 13815 phba->mds_diags_support = 0; 13816 13817 /* 13818 * Check if the SLI port supports NSLER 13819 */ 13820 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13821 phba->nsler = 1; 13822 else 13823 phba->nsler = 0; 13824 13825 return 0; 13826 } 13827 13828 /** 13829 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13830 * @pdev: pointer to PCI device 13831 * @pid: pointer to PCI device identifier 13832 * 13833 * This routine is to be called to attach a device with SLI-3 interface spec 13834 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13835 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13836 * information of the device and driver to see if the driver state that it can 13837 * support this kind of device. If the match is successful, the driver core 13838 * invokes this routine. If this routine determines it can claim the HBA, it 13839 * does all the initialization that it needs to do to handle the HBA properly. 13840 * 13841 * Return code 13842 * 0 - driver can claim the device 13843 * negative value - driver can not claim the device 13844 **/ 13845 static int 13846 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13847 { 13848 struct lpfc_hba *phba; 13849 struct lpfc_vport *vport = NULL; 13850 struct Scsi_Host *shost = NULL; 13851 int error; 13852 uint32_t cfg_mode, intr_mode; 13853 13854 /* Allocate memory for HBA structure */ 13855 phba = lpfc_hba_alloc(pdev); 13856 if (!phba) 13857 return -ENOMEM; 13858 13859 /* Perform generic PCI device enabling operation */ 13860 error = lpfc_enable_pci_dev(phba); 13861 if (error) 13862 goto out_free_phba; 13863 13864 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 13865 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 13866 if (error) 13867 goto out_disable_pci_dev; 13868 13869 /* Set up SLI-3 specific device PCI memory space */ 13870 error = lpfc_sli_pci_mem_setup(phba); 13871 if (error) { 13872 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13873 "1402 Failed to set up pci memory space.\n"); 13874 goto out_disable_pci_dev; 13875 } 13876 13877 /* Set up SLI-3 specific device driver resources */ 13878 error = lpfc_sli_driver_resource_setup(phba); 13879 if (error) { 13880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13881 "1404 Failed to set up driver resource.\n"); 13882 goto out_unset_pci_mem_s3; 13883 } 13884 13885 /* Initialize and populate the iocb list per host */ 13886 13887 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 13888 if (error) { 13889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13890 "1405 Failed to initialize iocb list.\n"); 13891 goto out_unset_driver_resource_s3; 13892 } 13893 13894 /* Set up common device driver resources */ 13895 error = lpfc_setup_driver_resource_phase2(phba); 13896 if (error) { 13897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13898 "1406 Failed to set up driver resource.\n"); 13899 goto out_free_iocb_list; 13900 } 13901 13902 /* Get the default values for Model Name and Description */ 13903 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13904 13905 /* Create SCSI host to the physical port */ 13906 error = lpfc_create_shost(phba); 13907 if (error) { 13908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13909 "1407 Failed to create scsi host.\n"); 13910 goto out_unset_driver_resource; 13911 } 13912 13913 /* Configure sysfs attributes */ 13914 vport = phba->pport; 13915 error = lpfc_alloc_sysfs_attr(vport); 13916 if (error) { 13917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13918 "1476 Failed to allocate sysfs attr\n"); 13919 goto out_destroy_shost; 13920 } 13921 13922 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13923 /* Now, trying to enable interrupt and bring up the device */ 13924 cfg_mode = phba->cfg_use_msi; 13925 while (true) { 13926 /* Put device to a known state before enabling interrupt */ 13927 lpfc_stop_port(phba); 13928 /* Configure and enable interrupt */ 13929 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 13930 if (intr_mode == LPFC_INTR_ERROR) { 13931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13932 "0431 Failed to enable interrupt.\n"); 13933 error = -ENODEV; 13934 goto out_free_sysfs_attr; 13935 } 13936 /* SLI-3 HBA setup */ 13937 if (lpfc_sli_hba_setup(phba)) { 13938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13939 "1477 Failed to set up hba\n"); 13940 error = -ENODEV; 13941 goto out_remove_device; 13942 } 13943 13944 /* Wait 50ms for the interrupts of previous mailbox commands */ 13945 msleep(50); 13946 /* Check active interrupts on message signaled interrupts */ 13947 if (intr_mode == 0 || 13948 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 13949 /* Log the current active interrupt mode */ 13950 phba->intr_mode = intr_mode; 13951 lpfc_log_intr_mode(phba, intr_mode); 13952 break; 13953 } else { 13954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13955 "0447 Configure interrupt mode (%d) " 13956 "failed active interrupt test.\n", 13957 intr_mode); 13958 /* Disable the current interrupt mode */ 13959 lpfc_sli_disable_intr(phba); 13960 /* Try next level of interrupt mode */ 13961 cfg_mode = --intr_mode; 13962 } 13963 } 13964 13965 /* Perform post initialization setup */ 13966 lpfc_post_init_setup(phba); 13967 13968 /* Check if there are static vports to be created. */ 13969 lpfc_create_static_vport(phba); 13970 13971 return 0; 13972 13973 out_remove_device: 13974 lpfc_unset_hba(phba); 13975 out_free_sysfs_attr: 13976 lpfc_free_sysfs_attr(vport); 13977 out_destroy_shost: 13978 lpfc_destroy_shost(phba); 13979 out_unset_driver_resource: 13980 lpfc_unset_driver_resource_phase2(phba); 13981 out_free_iocb_list: 13982 lpfc_free_iocb_list(phba); 13983 out_unset_driver_resource_s3: 13984 lpfc_sli_driver_resource_unset(phba); 13985 out_unset_pci_mem_s3: 13986 lpfc_sli_pci_mem_unset(phba); 13987 out_disable_pci_dev: 13988 lpfc_disable_pci_dev(phba); 13989 if (shost) 13990 scsi_host_put(shost); 13991 out_free_phba: 13992 lpfc_hba_free(phba); 13993 return error; 13994 } 13995 13996 /** 13997 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 13998 * @pdev: pointer to PCI device 13999 * 14000 * This routine is to be called to disattach a device with SLI-3 interface 14001 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 14002 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14003 * device to be removed from the PCI subsystem properly. 14004 **/ 14005 static void 14006 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 14007 { 14008 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14009 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14010 struct lpfc_vport **vports; 14011 struct lpfc_hba *phba = vport->phba; 14012 int i; 14013 14014 spin_lock_irq(&phba->hbalock); 14015 vport->load_flag |= FC_UNLOADING; 14016 spin_unlock_irq(&phba->hbalock); 14017 14018 lpfc_free_sysfs_attr(vport); 14019 14020 /* Release all the vports against this physical port */ 14021 vports = lpfc_create_vport_work_array(phba); 14022 if (vports != NULL) 14023 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14024 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14025 continue; 14026 fc_vport_terminate(vports[i]->fc_vport); 14027 } 14028 lpfc_destroy_vport_work_array(phba, vports); 14029 14030 /* Remove FC host with the physical port */ 14031 fc_remove_host(shost); 14032 scsi_remove_host(shost); 14033 14034 /* Clean up all nodes, mailboxes and IOs. */ 14035 lpfc_cleanup(vport); 14036 14037 /* 14038 * Bring down the SLI Layer. This step disable all interrupts, 14039 * clears the rings, discards all mailbox commands, and resets 14040 * the HBA. 14041 */ 14042 14043 /* HBA interrupt will be disabled after this call */ 14044 lpfc_sli_hba_down(phba); 14045 /* Stop kthread signal shall trigger work_done one more time */ 14046 kthread_stop(phba->worker_thread); 14047 /* Final cleanup of txcmplq and reset the HBA */ 14048 lpfc_sli_brdrestart(phba); 14049 14050 kfree(phba->vpi_bmask); 14051 kfree(phba->vpi_ids); 14052 14053 lpfc_stop_hba_timers(phba); 14054 spin_lock_irq(&phba->port_list_lock); 14055 list_del_init(&vport->listentry); 14056 spin_unlock_irq(&phba->port_list_lock); 14057 14058 lpfc_debugfs_terminate(vport); 14059 14060 /* Disable SR-IOV if enabled */ 14061 if (phba->cfg_sriov_nr_virtfn) 14062 pci_disable_sriov(pdev); 14063 14064 /* Disable interrupt */ 14065 lpfc_sli_disable_intr(phba); 14066 14067 scsi_host_put(shost); 14068 14069 /* 14070 * Call scsi_free before mem_free since scsi bufs are released to their 14071 * corresponding pools here. 14072 */ 14073 lpfc_scsi_free(phba); 14074 lpfc_free_iocb_list(phba); 14075 14076 lpfc_mem_free_all(phba); 14077 14078 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14079 phba->hbqslimp.virt, phba->hbqslimp.phys); 14080 14081 /* Free resources associated with SLI2 interface */ 14082 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14083 phba->slim2p.virt, phba->slim2p.phys); 14084 14085 /* unmap adapter SLIM and Control Registers */ 14086 iounmap(phba->ctrl_regs_memmap_p); 14087 iounmap(phba->slim_memmap_p); 14088 14089 lpfc_hba_free(phba); 14090 14091 pci_release_mem_regions(pdev); 14092 pci_disable_device(pdev); 14093 } 14094 14095 /** 14096 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14097 * @dev_d: pointer to device 14098 * 14099 * This routine is to be called from the kernel's PCI subsystem to support 14100 * system Power Management (PM) to device with SLI-3 interface spec. When 14101 * PM invokes this method, it quiesces the device by stopping the driver's 14102 * worker thread for the device, turning off device's interrupt and DMA, 14103 * and bring the device offline. Note that as the driver implements the 14104 * minimum PM requirements to a power-aware driver's PM support for the 14105 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14106 * to the suspend() method call will be treated as SUSPEND and the driver will 14107 * fully reinitialize its device during resume() method call, the driver will 14108 * set device to PCI_D3hot state in PCI config space instead of setting it 14109 * according to the @msg provided by the PM. 14110 * 14111 * Return code 14112 * 0 - driver suspended the device 14113 * Error otherwise 14114 **/ 14115 static int __maybe_unused 14116 lpfc_pci_suspend_one_s3(struct device *dev_d) 14117 { 14118 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14119 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14120 14121 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14122 "0473 PCI device Power Management suspend.\n"); 14123 14124 /* Bring down the device */ 14125 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14126 lpfc_offline(phba); 14127 kthread_stop(phba->worker_thread); 14128 14129 /* Disable interrupt from device */ 14130 lpfc_sli_disable_intr(phba); 14131 14132 return 0; 14133 } 14134 14135 /** 14136 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14137 * @dev_d: pointer to device 14138 * 14139 * This routine is to be called from the kernel's PCI subsystem to support 14140 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14141 * invokes this method, it restores the device's PCI config space state and 14142 * fully reinitializes the device and brings it online. Note that as the 14143 * driver implements the minimum PM requirements to a power-aware driver's 14144 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14145 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14146 * driver will fully reinitialize its device during resume() method call, 14147 * the device will be set to PCI_D0 directly in PCI config space before 14148 * restoring the state. 14149 * 14150 * Return code 14151 * 0 - driver suspended the device 14152 * Error otherwise 14153 **/ 14154 static int __maybe_unused 14155 lpfc_pci_resume_one_s3(struct device *dev_d) 14156 { 14157 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14158 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14159 uint32_t intr_mode; 14160 int error; 14161 14162 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14163 "0452 PCI device Power Management resume.\n"); 14164 14165 /* Startup the kernel thread for this host adapter. */ 14166 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14167 "lpfc_worker_%d", phba->brd_no); 14168 if (IS_ERR(phba->worker_thread)) { 14169 error = PTR_ERR(phba->worker_thread); 14170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14171 "0434 PM resume failed to start worker " 14172 "thread: error=x%x.\n", error); 14173 return error; 14174 } 14175 14176 /* Init cpu_map array */ 14177 lpfc_cpu_map_array_init(phba); 14178 /* Init hba_eq_hdl array */ 14179 lpfc_hba_eq_hdl_array_init(phba); 14180 /* Configure and enable interrupt */ 14181 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14182 if (intr_mode == LPFC_INTR_ERROR) { 14183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14184 "0430 PM resume Failed to enable interrupt\n"); 14185 return -EIO; 14186 } else 14187 phba->intr_mode = intr_mode; 14188 14189 /* Restart HBA and bring it online */ 14190 lpfc_sli_brdrestart(phba); 14191 lpfc_online(phba); 14192 14193 /* Log the current active interrupt mode */ 14194 lpfc_log_intr_mode(phba, phba->intr_mode); 14195 14196 return 0; 14197 } 14198 14199 /** 14200 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14201 * @phba: pointer to lpfc hba data structure. 14202 * 14203 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14204 * aborts all the outstanding SCSI I/Os to the pci device. 14205 **/ 14206 static void 14207 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14208 { 14209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14210 "2723 PCI channel I/O abort preparing for recovery\n"); 14211 14212 /* 14213 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14214 * and let the SCSI mid-layer to retry them to recover. 14215 */ 14216 lpfc_sli_abort_fcp_rings(phba); 14217 } 14218 14219 /** 14220 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14221 * @phba: pointer to lpfc hba data structure. 14222 * 14223 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14224 * disables the device interrupt and pci device, and aborts the internal FCP 14225 * pending I/Os. 14226 **/ 14227 static void 14228 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14229 { 14230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14231 "2710 PCI channel disable preparing for reset\n"); 14232 14233 /* Block any management I/Os to the device */ 14234 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14235 14236 /* Block all SCSI devices' I/Os on the host */ 14237 lpfc_scsi_dev_block(phba); 14238 14239 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14240 lpfc_sli_flush_io_rings(phba); 14241 14242 /* stop all timers */ 14243 lpfc_stop_hba_timers(phba); 14244 14245 /* Disable interrupt and pci device */ 14246 lpfc_sli_disable_intr(phba); 14247 pci_disable_device(phba->pcidev); 14248 } 14249 14250 /** 14251 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14252 * @phba: pointer to lpfc hba data structure. 14253 * 14254 * This routine is called to prepare the SLI3 device for PCI slot permanently 14255 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14256 * pending I/Os. 14257 **/ 14258 static void 14259 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14260 { 14261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14262 "2711 PCI channel permanent disable for failure\n"); 14263 /* Block all SCSI devices' I/Os on the host */ 14264 lpfc_scsi_dev_block(phba); 14265 14266 /* stop all timers */ 14267 lpfc_stop_hba_timers(phba); 14268 14269 /* Clean up all driver's outstanding SCSI I/Os */ 14270 lpfc_sli_flush_io_rings(phba); 14271 } 14272 14273 /** 14274 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14275 * @pdev: pointer to PCI device. 14276 * @state: the current PCI connection state. 14277 * 14278 * This routine is called from the PCI subsystem for I/O error handling to 14279 * device with SLI-3 interface spec. This function is called by the PCI 14280 * subsystem after a PCI bus error affecting this device has been detected. 14281 * When this function is invoked, it will need to stop all the I/Os and 14282 * interrupt(s) to the device. Once that is done, it will return 14283 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14284 * as desired. 14285 * 14286 * Return codes 14287 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14288 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14289 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14290 **/ 14291 static pci_ers_result_t 14292 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14293 { 14294 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14295 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14296 14297 switch (state) { 14298 case pci_channel_io_normal: 14299 /* Non-fatal error, prepare for recovery */ 14300 lpfc_sli_prep_dev_for_recover(phba); 14301 return PCI_ERS_RESULT_CAN_RECOVER; 14302 case pci_channel_io_frozen: 14303 /* Fatal error, prepare for slot reset */ 14304 lpfc_sli_prep_dev_for_reset(phba); 14305 return PCI_ERS_RESULT_NEED_RESET; 14306 case pci_channel_io_perm_failure: 14307 /* Permanent failure, prepare for device down */ 14308 lpfc_sli_prep_dev_for_perm_failure(phba); 14309 return PCI_ERS_RESULT_DISCONNECT; 14310 default: 14311 /* Unknown state, prepare and request slot reset */ 14312 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14313 "0472 Unknown PCI error state: x%x\n", state); 14314 lpfc_sli_prep_dev_for_reset(phba); 14315 return PCI_ERS_RESULT_NEED_RESET; 14316 } 14317 } 14318 14319 /** 14320 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14321 * @pdev: pointer to PCI device. 14322 * 14323 * This routine is called from the PCI subsystem for error handling to 14324 * device with SLI-3 interface spec. This is called after PCI bus has been 14325 * reset to restart the PCI card from scratch, as if from a cold-boot. 14326 * During the PCI subsystem error recovery, after driver returns 14327 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14328 * recovery and then call this routine before calling the .resume method 14329 * to recover the device. This function will initialize the HBA device, 14330 * enable the interrupt, but it will just put the HBA to offline state 14331 * without passing any I/O traffic. 14332 * 14333 * Return codes 14334 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14335 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14336 */ 14337 static pci_ers_result_t 14338 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14339 { 14340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14342 struct lpfc_sli *psli = &phba->sli; 14343 uint32_t intr_mode; 14344 14345 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14346 if (pci_enable_device_mem(pdev)) { 14347 printk(KERN_ERR "lpfc: Cannot re-enable " 14348 "PCI device after reset.\n"); 14349 return PCI_ERS_RESULT_DISCONNECT; 14350 } 14351 14352 pci_restore_state(pdev); 14353 14354 /* 14355 * As the new kernel behavior of pci_restore_state() API call clears 14356 * device saved_state flag, need to save the restored state again. 14357 */ 14358 pci_save_state(pdev); 14359 14360 if (pdev->is_busmaster) 14361 pci_set_master(pdev); 14362 14363 spin_lock_irq(&phba->hbalock); 14364 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14365 spin_unlock_irq(&phba->hbalock); 14366 14367 /* Configure and enable interrupt */ 14368 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14369 if (intr_mode == LPFC_INTR_ERROR) { 14370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14371 "0427 Cannot re-enable interrupt after " 14372 "slot reset.\n"); 14373 return PCI_ERS_RESULT_DISCONNECT; 14374 } else 14375 phba->intr_mode = intr_mode; 14376 14377 /* Take device offline, it will perform cleanup */ 14378 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14379 lpfc_offline(phba); 14380 lpfc_sli_brdrestart(phba); 14381 14382 /* Log the current active interrupt mode */ 14383 lpfc_log_intr_mode(phba, phba->intr_mode); 14384 14385 return PCI_ERS_RESULT_RECOVERED; 14386 } 14387 14388 /** 14389 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14390 * @pdev: pointer to PCI device 14391 * 14392 * This routine is called from the PCI subsystem for error handling to device 14393 * with SLI-3 interface spec. It is called when kernel error recovery tells 14394 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14395 * error recovery. After this call, traffic can start to flow from this device 14396 * again. 14397 */ 14398 static void 14399 lpfc_io_resume_s3(struct pci_dev *pdev) 14400 { 14401 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14402 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14403 14404 /* Bring device online, it will be no-op for non-fatal error resume */ 14405 lpfc_online(phba); 14406 } 14407 14408 /** 14409 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14410 * @phba: pointer to lpfc hba data structure. 14411 * 14412 * returns the number of ELS/CT IOCBs to reserve 14413 **/ 14414 int 14415 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14416 { 14417 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14418 14419 if (phba->sli_rev == LPFC_SLI_REV4) { 14420 if (max_xri <= 100) 14421 return 10; 14422 else if (max_xri <= 256) 14423 return 25; 14424 else if (max_xri <= 512) 14425 return 50; 14426 else if (max_xri <= 1024) 14427 return 100; 14428 else if (max_xri <= 1536) 14429 return 150; 14430 else if (max_xri <= 2048) 14431 return 200; 14432 else 14433 return 250; 14434 } else 14435 return 0; 14436 } 14437 14438 /** 14439 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14440 * @phba: pointer to lpfc hba data structure. 14441 * 14442 * returns the number of ELS/CT + NVMET IOCBs to reserve 14443 **/ 14444 int 14445 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14446 { 14447 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14448 14449 if (phba->nvmet_support) 14450 max_xri += LPFC_NVMET_BUF_POST; 14451 return max_xri; 14452 } 14453 14454 14455 static int 14456 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14457 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14458 const struct firmware *fw) 14459 { 14460 int rc; 14461 u8 sli_family; 14462 14463 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14464 /* Three cases: (1) FW was not supported on the detected adapter. 14465 * (2) FW update has been locked out administratively. 14466 * (3) Some other error during FW update. 14467 * In each case, an unmaskable message is written to the console 14468 * for admin diagnosis. 14469 */ 14470 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14471 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14472 magic_number != MAGIC_NUMBER_G6) || 14473 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14474 magic_number != MAGIC_NUMBER_G7) || 14475 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14476 magic_number != MAGIC_NUMBER_G7P)) { 14477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14478 "3030 This firmware version is not supported on" 14479 " this HBA model. Device:%x Magic:%x Type:%x " 14480 "ID:%x Size %d %zd\n", 14481 phba->pcidev->device, magic_number, ftype, fid, 14482 fsize, fw->size); 14483 rc = -EINVAL; 14484 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14485 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14486 "3021 Firmware downloads have been prohibited " 14487 "by a system configuration setting on " 14488 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14489 "%zd\n", 14490 phba->pcidev->device, magic_number, ftype, fid, 14491 fsize, fw->size); 14492 rc = -EACCES; 14493 } else { 14494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14495 "3022 FW Download failed. Add Status x%x " 14496 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14497 "%zd\n", 14498 offset, phba->pcidev->device, magic_number, 14499 ftype, fid, fsize, fw->size); 14500 rc = -EIO; 14501 } 14502 return rc; 14503 } 14504 14505 /** 14506 * lpfc_write_firmware - attempt to write a firmware image to the port 14507 * @fw: pointer to firmware image returned from request_firmware. 14508 * @context: pointer to firmware image returned from request_firmware. 14509 * 14510 **/ 14511 static void 14512 lpfc_write_firmware(const struct firmware *fw, void *context) 14513 { 14514 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14515 char fwrev[FW_REV_STR_SIZE]; 14516 struct lpfc_grp_hdr *image; 14517 struct list_head dma_buffer_list; 14518 int i, rc = 0; 14519 struct lpfc_dmabuf *dmabuf, *next; 14520 uint32_t offset = 0, temp_offset = 0; 14521 uint32_t magic_number, ftype, fid, fsize; 14522 14523 /* It can be null in no-wait mode, sanity check */ 14524 if (!fw) { 14525 rc = -ENXIO; 14526 goto out; 14527 } 14528 image = (struct lpfc_grp_hdr *)fw->data; 14529 14530 magic_number = be32_to_cpu(image->magic_number); 14531 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14532 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14533 fsize = be32_to_cpu(image->size); 14534 14535 INIT_LIST_HEAD(&dma_buffer_list); 14536 lpfc_decode_firmware_rev(phba, fwrev, 1); 14537 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14539 "3023 Updating Firmware, Current Version:%s " 14540 "New Version:%s\n", 14541 fwrev, image->revision); 14542 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14543 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14544 GFP_KERNEL); 14545 if (!dmabuf) { 14546 rc = -ENOMEM; 14547 goto release_out; 14548 } 14549 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14550 SLI4_PAGE_SIZE, 14551 &dmabuf->phys, 14552 GFP_KERNEL); 14553 if (!dmabuf->virt) { 14554 kfree(dmabuf); 14555 rc = -ENOMEM; 14556 goto release_out; 14557 } 14558 list_add_tail(&dmabuf->list, &dma_buffer_list); 14559 } 14560 while (offset < fw->size) { 14561 temp_offset = offset; 14562 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14563 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14564 memcpy(dmabuf->virt, 14565 fw->data + temp_offset, 14566 fw->size - temp_offset); 14567 temp_offset = fw->size; 14568 break; 14569 } 14570 memcpy(dmabuf->virt, fw->data + temp_offset, 14571 SLI4_PAGE_SIZE); 14572 temp_offset += SLI4_PAGE_SIZE; 14573 } 14574 rc = lpfc_wr_object(phba, &dma_buffer_list, 14575 (fw->size - offset), &offset); 14576 if (rc) { 14577 rc = lpfc_log_write_firmware_error(phba, offset, 14578 magic_number, 14579 ftype, 14580 fid, 14581 fsize, 14582 fw); 14583 goto release_out; 14584 } 14585 } 14586 rc = offset; 14587 } else 14588 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14589 "3029 Skipped Firmware update, Current " 14590 "Version:%s New Version:%s\n", 14591 fwrev, image->revision); 14592 14593 release_out: 14594 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14595 list_del(&dmabuf->list); 14596 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14597 dmabuf->virt, dmabuf->phys); 14598 kfree(dmabuf); 14599 } 14600 release_firmware(fw); 14601 out: 14602 if (rc < 0) 14603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14604 "3062 Firmware update error, status %d.\n", rc); 14605 else 14606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14607 "3024 Firmware update success: size %d.\n", rc); 14608 } 14609 14610 /** 14611 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14612 * @phba: pointer to lpfc hba data structure. 14613 * @fw_upgrade: which firmware to update. 14614 * 14615 * This routine is called to perform Linux generic firmware upgrade on device 14616 * that supports such feature. 14617 **/ 14618 int 14619 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14620 { 14621 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 14622 int ret; 14623 const struct firmware *fw; 14624 14625 /* Only supported on SLI4 interface type 2 for now */ 14626 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14627 LPFC_SLI_INTF_IF_TYPE_2) 14628 return -EPERM; 14629 14630 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 14631 14632 if (fw_upgrade == INT_FW_UPGRADE) { 14633 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14634 file_name, &phba->pcidev->dev, 14635 GFP_KERNEL, (void *)phba, 14636 lpfc_write_firmware); 14637 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14638 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14639 if (!ret) 14640 lpfc_write_firmware(fw, (void *)phba); 14641 } else { 14642 ret = -EINVAL; 14643 } 14644 14645 return ret; 14646 } 14647 14648 /** 14649 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14650 * @pdev: pointer to PCI device 14651 * @pid: pointer to PCI device identifier 14652 * 14653 * This routine is called from the kernel's PCI subsystem to device with 14654 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14655 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14656 * information of the device and driver to see if the driver state that it 14657 * can support this kind of device. If the match is successful, the driver 14658 * core invokes this routine. If this routine determines it can claim the HBA, 14659 * it does all the initialization that it needs to do to handle the HBA 14660 * properly. 14661 * 14662 * Return code 14663 * 0 - driver can claim the device 14664 * negative value - driver can not claim the device 14665 **/ 14666 static int 14667 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14668 { 14669 struct lpfc_hba *phba; 14670 struct lpfc_vport *vport = NULL; 14671 struct Scsi_Host *shost = NULL; 14672 int error; 14673 uint32_t cfg_mode, intr_mode; 14674 14675 /* Allocate memory for HBA structure */ 14676 phba = lpfc_hba_alloc(pdev); 14677 if (!phba) 14678 return -ENOMEM; 14679 14680 INIT_LIST_HEAD(&phba->poll_list); 14681 14682 /* Perform generic PCI device enabling operation */ 14683 error = lpfc_enable_pci_dev(phba); 14684 if (error) 14685 goto out_free_phba; 14686 14687 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14688 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14689 if (error) 14690 goto out_disable_pci_dev; 14691 14692 /* Set up SLI-4 specific device PCI memory space */ 14693 error = lpfc_sli4_pci_mem_setup(phba); 14694 if (error) { 14695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14696 "1410 Failed to set up pci memory space.\n"); 14697 goto out_disable_pci_dev; 14698 } 14699 14700 /* Set up SLI-4 Specific device driver resources */ 14701 error = lpfc_sli4_driver_resource_setup(phba); 14702 if (error) { 14703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14704 "1412 Failed to set up driver resource.\n"); 14705 goto out_unset_pci_mem_s4; 14706 } 14707 14708 INIT_LIST_HEAD(&phba->active_rrq_list); 14709 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14710 14711 /* Set up common device driver resources */ 14712 error = lpfc_setup_driver_resource_phase2(phba); 14713 if (error) { 14714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14715 "1414 Failed to set up driver resource.\n"); 14716 goto out_unset_driver_resource_s4; 14717 } 14718 14719 /* Get the default values for Model Name and Description */ 14720 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14721 14722 /* Now, trying to enable interrupt and bring up the device */ 14723 cfg_mode = phba->cfg_use_msi; 14724 14725 /* Put device to a known state before enabling interrupt */ 14726 phba->pport = NULL; 14727 lpfc_stop_port(phba); 14728 14729 /* Init cpu_map array */ 14730 lpfc_cpu_map_array_init(phba); 14731 14732 /* Init hba_eq_hdl array */ 14733 lpfc_hba_eq_hdl_array_init(phba); 14734 14735 /* Configure and enable interrupt */ 14736 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14737 if (intr_mode == LPFC_INTR_ERROR) { 14738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14739 "0426 Failed to enable interrupt.\n"); 14740 error = -ENODEV; 14741 goto out_unset_driver_resource; 14742 } 14743 /* Default to single EQ for non-MSI-X */ 14744 if (phba->intr_type != MSIX) { 14745 phba->cfg_irq_chann = 1; 14746 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14747 if (phba->nvmet_support) 14748 phba->cfg_nvmet_mrq = 1; 14749 } 14750 } 14751 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14752 14753 /* Create SCSI host to the physical port */ 14754 error = lpfc_create_shost(phba); 14755 if (error) { 14756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14757 "1415 Failed to create scsi host.\n"); 14758 goto out_disable_intr; 14759 } 14760 vport = phba->pport; 14761 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14762 14763 /* Configure sysfs attributes */ 14764 error = lpfc_alloc_sysfs_attr(vport); 14765 if (error) { 14766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14767 "1416 Failed to allocate sysfs attr\n"); 14768 goto out_destroy_shost; 14769 } 14770 14771 /* Set up SLI-4 HBA */ 14772 if (lpfc_sli4_hba_setup(phba)) { 14773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14774 "1421 Failed to set up hba\n"); 14775 error = -ENODEV; 14776 goto out_free_sysfs_attr; 14777 } 14778 14779 /* Log the current active interrupt mode */ 14780 phba->intr_mode = intr_mode; 14781 lpfc_log_intr_mode(phba, intr_mode); 14782 14783 /* Perform post initialization setup */ 14784 lpfc_post_init_setup(phba); 14785 14786 /* NVME support in FW earlier in the driver load corrects the 14787 * FC4 type making a check for nvme_support unnecessary. 14788 */ 14789 if (phba->nvmet_support == 0) { 14790 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14791 /* Create NVME binding with nvme_fc_transport. This 14792 * ensures the vport is initialized. If the localport 14793 * create fails, it should not unload the driver to 14794 * support field issues. 14795 */ 14796 error = lpfc_nvme_create_localport(vport); 14797 if (error) { 14798 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14799 "6004 NVME registration " 14800 "failed, error x%x\n", 14801 error); 14802 } 14803 } 14804 } 14805 14806 /* check for firmware upgrade or downgrade */ 14807 if (phba->cfg_request_firmware_upgrade) 14808 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14809 14810 /* Check if there are static vports to be created. */ 14811 lpfc_create_static_vport(phba); 14812 14813 /* Enable RAS FW log support */ 14814 lpfc_sli4_ras_setup(phba); 14815 14816 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14817 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14818 14819 return 0; 14820 14821 out_free_sysfs_attr: 14822 lpfc_free_sysfs_attr(vport); 14823 out_destroy_shost: 14824 lpfc_destroy_shost(phba); 14825 out_disable_intr: 14826 lpfc_sli4_disable_intr(phba); 14827 out_unset_driver_resource: 14828 lpfc_unset_driver_resource_phase2(phba); 14829 out_unset_driver_resource_s4: 14830 lpfc_sli4_driver_resource_unset(phba); 14831 out_unset_pci_mem_s4: 14832 lpfc_sli4_pci_mem_unset(phba); 14833 out_disable_pci_dev: 14834 lpfc_disable_pci_dev(phba); 14835 if (shost) 14836 scsi_host_put(shost); 14837 out_free_phba: 14838 lpfc_hba_free(phba); 14839 return error; 14840 } 14841 14842 /** 14843 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14844 * @pdev: pointer to PCI device 14845 * 14846 * This routine is called from the kernel's PCI subsystem to device with 14847 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14848 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14849 * device to be removed from the PCI subsystem properly. 14850 **/ 14851 static void 14852 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14853 { 14854 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14855 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14856 struct lpfc_vport **vports; 14857 struct lpfc_hba *phba = vport->phba; 14858 int i; 14859 14860 /* Mark the device unloading flag */ 14861 spin_lock_irq(&phba->hbalock); 14862 vport->load_flag |= FC_UNLOADING; 14863 spin_unlock_irq(&phba->hbalock); 14864 if (phba->cgn_i) 14865 lpfc_unreg_congestion_buf(phba); 14866 14867 lpfc_free_sysfs_attr(vport); 14868 14869 /* Release all the vports against this physical port */ 14870 vports = lpfc_create_vport_work_array(phba); 14871 if (vports != NULL) 14872 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14873 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14874 continue; 14875 fc_vport_terminate(vports[i]->fc_vport); 14876 } 14877 lpfc_destroy_vport_work_array(phba, vports); 14878 14879 /* Remove FC host with the physical port */ 14880 fc_remove_host(shost); 14881 scsi_remove_host(shost); 14882 14883 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 14884 * localports are destroyed after to cleanup all transport memory. 14885 */ 14886 lpfc_cleanup(vport); 14887 lpfc_nvmet_destroy_targetport(phba); 14888 lpfc_nvme_destroy_localport(vport); 14889 14890 /* De-allocate multi-XRI pools */ 14891 if (phba->cfg_xri_rebalancing) 14892 lpfc_destroy_multixri_pools(phba); 14893 14894 /* 14895 * Bring down the SLI Layer. This step disables all interrupts, 14896 * clears the rings, discards all mailbox commands, and resets 14897 * the HBA FCoE function. 14898 */ 14899 lpfc_debugfs_terminate(vport); 14900 14901 lpfc_stop_hba_timers(phba); 14902 spin_lock_irq(&phba->port_list_lock); 14903 list_del_init(&vport->listentry); 14904 spin_unlock_irq(&phba->port_list_lock); 14905 14906 /* Perform scsi free before driver resource_unset since scsi 14907 * buffers are released to their corresponding pools here. 14908 */ 14909 lpfc_io_free(phba); 14910 lpfc_free_iocb_list(phba); 14911 lpfc_sli4_hba_unset(phba); 14912 14913 lpfc_unset_driver_resource_phase2(phba); 14914 lpfc_sli4_driver_resource_unset(phba); 14915 14916 /* Unmap adapter Control and Doorbell registers */ 14917 lpfc_sli4_pci_mem_unset(phba); 14918 14919 /* Release PCI resources and disable device's PCI function */ 14920 scsi_host_put(shost); 14921 lpfc_disable_pci_dev(phba); 14922 14923 /* Finally, free the driver's device data structure */ 14924 lpfc_hba_free(phba); 14925 14926 return; 14927 } 14928 14929 /** 14930 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 14931 * @dev_d: pointer to device 14932 * 14933 * This routine is called from the kernel's PCI subsystem to support system 14934 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 14935 * this method, it quiesces the device by stopping the driver's worker 14936 * thread for the device, turning off device's interrupt and DMA, and bring 14937 * the device offline. Note that as the driver implements the minimum PM 14938 * requirements to a power-aware driver's PM support for suspend/resume -- all 14939 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 14940 * method call will be treated as SUSPEND and the driver will fully 14941 * reinitialize its device during resume() method call, the driver will set 14942 * device to PCI_D3hot state in PCI config space instead of setting it 14943 * according to the @msg provided by the PM. 14944 * 14945 * Return code 14946 * 0 - driver suspended the device 14947 * Error otherwise 14948 **/ 14949 static int __maybe_unused 14950 lpfc_pci_suspend_one_s4(struct device *dev_d) 14951 { 14952 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14953 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14954 14955 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14956 "2843 PCI device Power Management suspend.\n"); 14957 14958 /* Bring down the device */ 14959 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14960 lpfc_offline(phba); 14961 kthread_stop(phba->worker_thread); 14962 14963 /* Disable interrupt from device */ 14964 lpfc_sli4_disable_intr(phba); 14965 lpfc_sli4_queue_destroy(phba); 14966 14967 return 0; 14968 } 14969 14970 /** 14971 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 14972 * @dev_d: pointer to device 14973 * 14974 * This routine is called from the kernel's PCI subsystem to support system 14975 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 14976 * this method, it restores the device's PCI config space state and fully 14977 * reinitializes the device and brings it online. Note that as the driver 14978 * implements the minimum PM requirements to a power-aware driver's PM for 14979 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14980 * to the suspend() method call will be treated as SUSPEND and the driver 14981 * will fully reinitialize its device during resume() method call, the device 14982 * will be set to PCI_D0 directly in PCI config space before restoring the 14983 * state. 14984 * 14985 * Return code 14986 * 0 - driver suspended the device 14987 * Error otherwise 14988 **/ 14989 static int __maybe_unused 14990 lpfc_pci_resume_one_s4(struct device *dev_d) 14991 { 14992 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14993 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14994 uint32_t intr_mode; 14995 int error; 14996 14997 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14998 "0292 PCI device Power Management resume.\n"); 14999 15000 /* Startup the kernel thread for this host adapter. */ 15001 phba->worker_thread = kthread_run(lpfc_do_work, phba, 15002 "lpfc_worker_%d", phba->brd_no); 15003 if (IS_ERR(phba->worker_thread)) { 15004 error = PTR_ERR(phba->worker_thread); 15005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15006 "0293 PM resume failed to start worker " 15007 "thread: error=x%x.\n", error); 15008 return error; 15009 } 15010 15011 /* Configure and enable interrupt */ 15012 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15013 if (intr_mode == LPFC_INTR_ERROR) { 15014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15015 "0294 PM resume Failed to enable interrupt\n"); 15016 return -EIO; 15017 } else 15018 phba->intr_mode = intr_mode; 15019 15020 /* Restart HBA and bring it online */ 15021 lpfc_sli_brdrestart(phba); 15022 lpfc_online(phba); 15023 15024 /* Log the current active interrupt mode */ 15025 lpfc_log_intr_mode(phba, phba->intr_mode); 15026 15027 return 0; 15028 } 15029 15030 /** 15031 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 15032 * @phba: pointer to lpfc hba data structure. 15033 * 15034 * This routine is called to prepare the SLI4 device for PCI slot recover. It 15035 * aborts all the outstanding SCSI I/Os to the pci device. 15036 **/ 15037 static void 15038 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 15039 { 15040 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15041 "2828 PCI channel I/O abort preparing for recovery\n"); 15042 /* 15043 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 15044 * and let the SCSI mid-layer to retry them to recover. 15045 */ 15046 lpfc_sli_abort_fcp_rings(phba); 15047 } 15048 15049 /** 15050 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 15051 * @phba: pointer to lpfc hba data structure. 15052 * 15053 * This routine is called to prepare the SLI4 device for PCI slot reset. It 15054 * disables the device interrupt and pci device, and aborts the internal FCP 15055 * pending I/Os. 15056 **/ 15057 static void 15058 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15059 { 15060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15061 "2826 PCI channel disable preparing for reset\n"); 15062 15063 /* Block any management I/Os to the device */ 15064 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15065 15066 /* Block all SCSI devices' I/Os on the host */ 15067 lpfc_scsi_dev_block(phba); 15068 15069 /* Flush all driver's outstanding I/Os as we are to reset */ 15070 lpfc_sli_flush_io_rings(phba); 15071 15072 /* stop all timers */ 15073 lpfc_stop_hba_timers(phba); 15074 15075 /* Disable interrupt and pci device */ 15076 lpfc_sli4_disable_intr(phba); 15077 lpfc_sli4_queue_destroy(phba); 15078 pci_disable_device(phba->pcidev); 15079 } 15080 15081 /** 15082 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15083 * @phba: pointer to lpfc hba data structure. 15084 * 15085 * This routine is called to prepare the SLI4 device for PCI slot permanently 15086 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15087 * pending I/Os. 15088 **/ 15089 static void 15090 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15091 { 15092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15093 "2827 PCI channel permanent disable for failure\n"); 15094 15095 /* Block all SCSI devices' I/Os on the host */ 15096 lpfc_scsi_dev_block(phba); 15097 15098 /* stop all timers */ 15099 lpfc_stop_hba_timers(phba); 15100 15101 /* Clean up all driver's outstanding I/Os */ 15102 lpfc_sli_flush_io_rings(phba); 15103 } 15104 15105 /** 15106 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15107 * @pdev: pointer to PCI device. 15108 * @state: the current PCI connection state. 15109 * 15110 * This routine is called from the PCI subsystem for error handling to device 15111 * with SLI-4 interface spec. This function is called by the PCI subsystem 15112 * after a PCI bus error affecting this device has been detected. When this 15113 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15114 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15115 * for the PCI subsystem to perform proper recovery as desired. 15116 * 15117 * Return codes 15118 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15119 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15120 **/ 15121 static pci_ers_result_t 15122 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15123 { 15124 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15125 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15126 15127 switch (state) { 15128 case pci_channel_io_normal: 15129 /* Non-fatal error, prepare for recovery */ 15130 lpfc_sli4_prep_dev_for_recover(phba); 15131 return PCI_ERS_RESULT_CAN_RECOVER; 15132 case pci_channel_io_frozen: 15133 phba->hba_flag |= HBA_PCI_ERR; 15134 /* Fatal error, prepare for slot reset */ 15135 lpfc_sli4_prep_dev_for_reset(phba); 15136 return PCI_ERS_RESULT_NEED_RESET; 15137 case pci_channel_io_perm_failure: 15138 phba->hba_flag |= HBA_PCI_ERR; 15139 /* Permanent failure, prepare for device down */ 15140 lpfc_sli4_prep_dev_for_perm_failure(phba); 15141 return PCI_ERS_RESULT_DISCONNECT; 15142 default: 15143 phba->hba_flag |= HBA_PCI_ERR; 15144 /* Unknown state, prepare and request slot reset */ 15145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15146 "2825 Unknown PCI error state: x%x\n", state); 15147 lpfc_sli4_prep_dev_for_reset(phba); 15148 return PCI_ERS_RESULT_NEED_RESET; 15149 } 15150 } 15151 15152 /** 15153 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15154 * @pdev: pointer to PCI device. 15155 * 15156 * This routine is called from the PCI subsystem for error handling to device 15157 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15158 * restart the PCI card from scratch, as if from a cold-boot. During the 15159 * PCI subsystem error recovery, after the driver returns 15160 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15161 * recovery and then call this routine before calling the .resume method to 15162 * recover the device. This function will initialize the HBA device, enable 15163 * the interrupt, but it will just put the HBA to offline state without 15164 * passing any I/O traffic. 15165 * 15166 * Return codes 15167 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15168 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15169 */ 15170 static pci_ers_result_t 15171 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15172 { 15173 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15174 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15175 struct lpfc_sli *psli = &phba->sli; 15176 uint32_t intr_mode; 15177 15178 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15179 if (pci_enable_device_mem(pdev)) { 15180 printk(KERN_ERR "lpfc: Cannot re-enable " 15181 "PCI device after reset.\n"); 15182 return PCI_ERS_RESULT_DISCONNECT; 15183 } 15184 15185 pci_restore_state(pdev); 15186 15187 phba->hba_flag &= ~HBA_PCI_ERR; 15188 /* 15189 * As the new kernel behavior of pci_restore_state() API call clears 15190 * device saved_state flag, need to save the restored state again. 15191 */ 15192 pci_save_state(pdev); 15193 15194 if (pdev->is_busmaster) 15195 pci_set_master(pdev); 15196 15197 spin_lock_irq(&phba->hbalock); 15198 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15199 spin_unlock_irq(&phba->hbalock); 15200 15201 /* Configure and enable interrupt */ 15202 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15203 if (intr_mode == LPFC_INTR_ERROR) { 15204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15205 "2824 Cannot re-enable interrupt after " 15206 "slot reset.\n"); 15207 return PCI_ERS_RESULT_DISCONNECT; 15208 } else 15209 phba->intr_mode = intr_mode; 15210 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15211 15212 /* Log the current active interrupt mode */ 15213 lpfc_log_intr_mode(phba, phba->intr_mode); 15214 15215 return PCI_ERS_RESULT_RECOVERED; 15216 } 15217 15218 /** 15219 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15220 * @pdev: pointer to PCI device 15221 * 15222 * This routine is called from the PCI subsystem for error handling to device 15223 * with SLI-4 interface spec. It is called when kernel error recovery tells 15224 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15225 * error recovery. After this call, traffic can start to flow from this device 15226 * again. 15227 **/ 15228 static void 15229 lpfc_io_resume_s4(struct pci_dev *pdev) 15230 { 15231 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15232 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15233 15234 /* 15235 * In case of slot reset, as function reset is performed through 15236 * mailbox command which needs DMA to be enabled, this operation 15237 * has to be moved to the io resume phase. Taking device offline 15238 * will perform the necessary cleanup. 15239 */ 15240 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15241 /* Perform device reset */ 15242 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15243 lpfc_offline(phba); 15244 lpfc_sli_brdrestart(phba); 15245 /* Bring the device back online */ 15246 lpfc_online(phba); 15247 } 15248 } 15249 15250 /** 15251 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15252 * @pdev: pointer to PCI device 15253 * @pid: pointer to PCI device identifier 15254 * 15255 * This routine is to be registered to the kernel's PCI subsystem. When an 15256 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15257 * at PCI device-specific information of the device and driver to see if the 15258 * driver state that it can support this kind of device. If the match is 15259 * successful, the driver core invokes this routine. This routine dispatches 15260 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15261 * do all the initialization that it needs to do to handle the HBA device 15262 * properly. 15263 * 15264 * Return code 15265 * 0 - driver can claim the device 15266 * negative value - driver can not claim the device 15267 **/ 15268 static int 15269 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15270 { 15271 int rc; 15272 struct lpfc_sli_intf intf; 15273 15274 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15275 return -ENODEV; 15276 15277 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15278 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15279 rc = lpfc_pci_probe_one_s4(pdev, pid); 15280 else 15281 rc = lpfc_pci_probe_one_s3(pdev, pid); 15282 15283 return rc; 15284 } 15285 15286 /** 15287 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15288 * @pdev: pointer to PCI device 15289 * 15290 * This routine is to be registered to the kernel's PCI subsystem. When an 15291 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15292 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15293 * remove routine, which will perform all the necessary cleanup for the 15294 * device to be removed from the PCI subsystem properly. 15295 **/ 15296 static void 15297 lpfc_pci_remove_one(struct pci_dev *pdev) 15298 { 15299 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15300 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15301 15302 switch (phba->pci_dev_grp) { 15303 case LPFC_PCI_DEV_LP: 15304 lpfc_pci_remove_one_s3(pdev); 15305 break; 15306 case LPFC_PCI_DEV_OC: 15307 lpfc_pci_remove_one_s4(pdev); 15308 break; 15309 default: 15310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15311 "1424 Invalid PCI device group: 0x%x\n", 15312 phba->pci_dev_grp); 15313 break; 15314 } 15315 return; 15316 } 15317 15318 /** 15319 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15320 * @dev: pointer to device 15321 * 15322 * This routine is to be registered to the kernel's PCI subsystem to support 15323 * system Power Management (PM). When PM invokes this method, it dispatches 15324 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15325 * suspend the device. 15326 * 15327 * Return code 15328 * 0 - driver suspended the device 15329 * Error otherwise 15330 **/ 15331 static int __maybe_unused 15332 lpfc_pci_suspend_one(struct device *dev) 15333 { 15334 struct Scsi_Host *shost = dev_get_drvdata(dev); 15335 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15336 int rc = -ENODEV; 15337 15338 switch (phba->pci_dev_grp) { 15339 case LPFC_PCI_DEV_LP: 15340 rc = lpfc_pci_suspend_one_s3(dev); 15341 break; 15342 case LPFC_PCI_DEV_OC: 15343 rc = lpfc_pci_suspend_one_s4(dev); 15344 break; 15345 default: 15346 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15347 "1425 Invalid PCI device group: 0x%x\n", 15348 phba->pci_dev_grp); 15349 break; 15350 } 15351 return rc; 15352 } 15353 15354 /** 15355 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15356 * @dev: pointer to device 15357 * 15358 * This routine is to be registered to the kernel's PCI subsystem to support 15359 * system Power Management (PM). When PM invokes this method, it dispatches 15360 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15361 * resume the device. 15362 * 15363 * Return code 15364 * 0 - driver suspended the device 15365 * Error otherwise 15366 **/ 15367 static int __maybe_unused 15368 lpfc_pci_resume_one(struct device *dev) 15369 { 15370 struct Scsi_Host *shost = dev_get_drvdata(dev); 15371 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15372 int rc = -ENODEV; 15373 15374 switch (phba->pci_dev_grp) { 15375 case LPFC_PCI_DEV_LP: 15376 rc = lpfc_pci_resume_one_s3(dev); 15377 break; 15378 case LPFC_PCI_DEV_OC: 15379 rc = lpfc_pci_resume_one_s4(dev); 15380 break; 15381 default: 15382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15383 "1426 Invalid PCI device group: 0x%x\n", 15384 phba->pci_dev_grp); 15385 break; 15386 } 15387 return rc; 15388 } 15389 15390 /** 15391 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15392 * @pdev: pointer to PCI device. 15393 * @state: the current PCI connection state. 15394 * 15395 * This routine is registered to the PCI subsystem for error handling. This 15396 * function is called by the PCI subsystem after a PCI bus error affecting 15397 * this device has been detected. When this routine is invoked, it dispatches 15398 * the action to the proper SLI-3 or SLI-4 device error detected handling 15399 * routine, which will perform the proper error detected operation. 15400 * 15401 * Return codes 15402 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15403 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15404 **/ 15405 static pci_ers_result_t 15406 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15407 { 15408 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15409 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15410 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15411 15412 if (phba->link_state == LPFC_HBA_ERROR && 15413 phba->hba_flag & HBA_IOQ_FLUSH) 15414 return PCI_ERS_RESULT_NEED_RESET; 15415 15416 switch (phba->pci_dev_grp) { 15417 case LPFC_PCI_DEV_LP: 15418 rc = lpfc_io_error_detected_s3(pdev, state); 15419 break; 15420 case LPFC_PCI_DEV_OC: 15421 rc = lpfc_io_error_detected_s4(pdev, state); 15422 break; 15423 default: 15424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15425 "1427 Invalid PCI device group: 0x%x\n", 15426 phba->pci_dev_grp); 15427 break; 15428 } 15429 return rc; 15430 } 15431 15432 /** 15433 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15434 * @pdev: pointer to PCI device. 15435 * 15436 * This routine is registered to the PCI subsystem for error handling. This 15437 * function is called after PCI bus has been reset to restart the PCI card 15438 * from scratch, as if from a cold-boot. When this routine is invoked, it 15439 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15440 * routine, which will perform the proper device reset. 15441 * 15442 * Return codes 15443 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15444 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15445 **/ 15446 static pci_ers_result_t 15447 lpfc_io_slot_reset(struct pci_dev *pdev) 15448 { 15449 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15450 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15451 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15452 15453 switch (phba->pci_dev_grp) { 15454 case LPFC_PCI_DEV_LP: 15455 rc = lpfc_io_slot_reset_s3(pdev); 15456 break; 15457 case LPFC_PCI_DEV_OC: 15458 rc = lpfc_io_slot_reset_s4(pdev); 15459 break; 15460 default: 15461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15462 "1428 Invalid PCI device group: 0x%x\n", 15463 phba->pci_dev_grp); 15464 break; 15465 } 15466 return rc; 15467 } 15468 15469 /** 15470 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15471 * @pdev: pointer to PCI device 15472 * 15473 * This routine is registered to the PCI subsystem for error handling. It 15474 * is called when kernel error recovery tells the lpfc driver that it is 15475 * OK to resume normal PCI operation after PCI bus error recovery. When 15476 * this routine is invoked, it dispatches the action to the proper SLI-3 15477 * or SLI-4 device io_resume routine, which will resume the device operation. 15478 **/ 15479 static void 15480 lpfc_io_resume(struct pci_dev *pdev) 15481 { 15482 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15483 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15484 15485 switch (phba->pci_dev_grp) { 15486 case LPFC_PCI_DEV_LP: 15487 lpfc_io_resume_s3(pdev); 15488 break; 15489 case LPFC_PCI_DEV_OC: 15490 lpfc_io_resume_s4(pdev); 15491 break; 15492 default: 15493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15494 "1429 Invalid PCI device group: 0x%x\n", 15495 phba->pci_dev_grp); 15496 break; 15497 } 15498 return; 15499 } 15500 15501 /** 15502 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15503 * @phba: pointer to lpfc hba data structure. 15504 * 15505 * This routine checks to see if OAS is supported for this adapter. If 15506 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15507 * the enable oas flag is cleared and the pool created for OAS device data 15508 * is destroyed. 15509 * 15510 **/ 15511 static void 15512 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15513 { 15514 15515 if (!phba->cfg_EnableXLane) 15516 return; 15517 15518 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15519 phba->cfg_fof = 1; 15520 } else { 15521 phba->cfg_fof = 0; 15522 mempool_destroy(phba->device_data_mem_pool); 15523 phba->device_data_mem_pool = NULL; 15524 } 15525 15526 return; 15527 } 15528 15529 /** 15530 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15531 * @phba: pointer to lpfc hba data structure. 15532 * 15533 * This routine checks to see if RAS is supported by the adapter. Check the 15534 * function through which RAS support enablement is to be done. 15535 **/ 15536 void 15537 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15538 { 15539 /* if ASIC_GEN_NUM >= 0xC) */ 15540 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15541 LPFC_SLI_INTF_IF_TYPE_6) || 15542 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15543 LPFC_SLI_INTF_FAMILY_G6)) { 15544 phba->ras_fwlog.ras_hwsupport = true; 15545 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15546 phba->cfg_ras_fwlog_buffsize) 15547 phba->ras_fwlog.ras_enabled = true; 15548 else 15549 phba->ras_fwlog.ras_enabled = false; 15550 } else { 15551 phba->ras_fwlog.ras_hwsupport = false; 15552 } 15553 } 15554 15555 15556 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15557 15558 static const struct pci_error_handlers lpfc_err_handler = { 15559 .error_detected = lpfc_io_error_detected, 15560 .slot_reset = lpfc_io_slot_reset, 15561 .resume = lpfc_io_resume, 15562 }; 15563 15564 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15565 lpfc_pci_suspend_one, 15566 lpfc_pci_resume_one); 15567 15568 static struct pci_driver lpfc_driver = { 15569 .name = LPFC_DRIVER_NAME, 15570 .id_table = lpfc_id_table, 15571 .probe = lpfc_pci_probe_one, 15572 .remove = lpfc_pci_remove_one, 15573 .shutdown = lpfc_pci_remove_one, 15574 .driver.pm = &lpfc_pci_pm_ops_one, 15575 .err_handler = &lpfc_err_handler, 15576 }; 15577 15578 static const struct file_operations lpfc_mgmt_fop = { 15579 .owner = THIS_MODULE, 15580 }; 15581 15582 static struct miscdevice lpfc_mgmt_dev = { 15583 .minor = MISC_DYNAMIC_MINOR, 15584 .name = "lpfcmgmt", 15585 .fops = &lpfc_mgmt_fop, 15586 }; 15587 15588 /** 15589 * lpfc_init - lpfc module initialization routine 15590 * 15591 * This routine is to be invoked when the lpfc module is loaded into the 15592 * kernel. The special kernel macro module_init() is used to indicate the 15593 * role of this routine to the kernel as lpfc module entry point. 15594 * 15595 * Return codes 15596 * 0 - successful 15597 * -ENOMEM - FC attach transport failed 15598 * all others - failed 15599 */ 15600 static int __init 15601 lpfc_init(void) 15602 { 15603 int error = 0; 15604 15605 pr_info(LPFC_MODULE_DESC "\n"); 15606 pr_info(LPFC_COPYRIGHT "\n"); 15607 15608 error = misc_register(&lpfc_mgmt_dev); 15609 if (error) 15610 printk(KERN_ERR "Could not register lpfcmgmt device, " 15611 "misc_register returned with status %d", error); 15612 15613 error = -ENOMEM; 15614 lpfc_transport_functions.vport_create = lpfc_vport_create; 15615 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15616 lpfc_transport_template = 15617 fc_attach_transport(&lpfc_transport_functions); 15618 if (lpfc_transport_template == NULL) 15619 goto unregister; 15620 lpfc_vport_transport_template = 15621 fc_attach_transport(&lpfc_vport_transport_functions); 15622 if (lpfc_vport_transport_template == NULL) { 15623 fc_release_transport(lpfc_transport_template); 15624 goto unregister; 15625 } 15626 lpfc_wqe_cmd_template(); 15627 lpfc_nvmet_cmd_template(); 15628 15629 /* Initialize in case vector mapping is needed */ 15630 lpfc_present_cpu = num_present_cpus(); 15631 15632 lpfc_pldv_detect = false; 15633 15634 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15635 "lpfc/sli4:online", 15636 lpfc_cpu_online, lpfc_cpu_offline); 15637 if (error < 0) 15638 goto cpuhp_failure; 15639 lpfc_cpuhp_state = error; 15640 15641 error = pci_register_driver(&lpfc_driver); 15642 if (error) 15643 goto unwind; 15644 15645 return error; 15646 15647 unwind: 15648 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15649 cpuhp_failure: 15650 fc_release_transport(lpfc_transport_template); 15651 fc_release_transport(lpfc_vport_transport_template); 15652 unregister: 15653 misc_deregister(&lpfc_mgmt_dev); 15654 15655 return error; 15656 } 15657 15658 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15659 { 15660 unsigned int start_idx; 15661 unsigned int dbg_cnt; 15662 unsigned int temp_idx; 15663 int i; 15664 int j = 0; 15665 unsigned long rem_nsec, iflags; 15666 bool log_verbose = false; 15667 struct lpfc_vport *port_iterator; 15668 15669 /* Don't dump messages if we explicitly set log_verbose for the 15670 * physical port or any vport. 15671 */ 15672 if (phba->cfg_log_verbose) 15673 return; 15674 15675 spin_lock_irqsave(&phba->port_list_lock, iflags); 15676 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 15677 if (port_iterator->load_flag & FC_UNLOADING) 15678 continue; 15679 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 15680 if (port_iterator->cfg_log_verbose) 15681 log_verbose = true; 15682 15683 scsi_host_put(lpfc_shost_from_vport(port_iterator)); 15684 15685 if (log_verbose) { 15686 spin_unlock_irqrestore(&phba->port_list_lock, 15687 iflags); 15688 return; 15689 } 15690 } 15691 } 15692 spin_unlock_irqrestore(&phba->port_list_lock, iflags); 15693 15694 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15695 return; 15696 15697 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15698 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15699 if (!dbg_cnt) 15700 goto out; 15701 temp_idx = start_idx; 15702 if (dbg_cnt >= DBG_LOG_SZ) { 15703 dbg_cnt = DBG_LOG_SZ; 15704 temp_idx -= 1; 15705 } else { 15706 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15707 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15708 } else { 15709 if (start_idx < dbg_cnt) 15710 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15711 else 15712 start_idx -= dbg_cnt; 15713 } 15714 } 15715 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15716 start_idx, temp_idx, dbg_cnt); 15717 15718 for (i = 0; i < dbg_cnt; i++) { 15719 if ((start_idx + i) < DBG_LOG_SZ) 15720 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15721 else 15722 temp_idx = j++; 15723 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15724 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15725 temp_idx, 15726 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15727 rem_nsec / 1000, 15728 phba->dbg_log[temp_idx].log); 15729 } 15730 out: 15731 atomic_set(&phba->dbg_log_cnt, 0); 15732 atomic_set(&phba->dbg_log_dmping, 0); 15733 } 15734 15735 __printf(2, 3) 15736 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15737 { 15738 unsigned int idx; 15739 va_list args; 15740 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15741 struct va_format vaf; 15742 15743 15744 va_start(args, fmt); 15745 if (unlikely(dbg_dmping)) { 15746 vaf.fmt = fmt; 15747 vaf.va = &args; 15748 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15749 va_end(args); 15750 return; 15751 } 15752 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15753 DBG_LOG_SZ; 15754 15755 atomic_inc(&phba->dbg_log_cnt); 15756 15757 vscnprintf(phba->dbg_log[idx].log, 15758 sizeof(phba->dbg_log[idx].log), fmt, args); 15759 va_end(args); 15760 15761 phba->dbg_log[idx].t_ns = local_clock(); 15762 } 15763 15764 /** 15765 * lpfc_exit - lpfc module removal routine 15766 * 15767 * This routine is invoked when the lpfc module is removed from the kernel. 15768 * The special kernel macro module_exit() is used to indicate the role of 15769 * this routine to the kernel as lpfc module exit point. 15770 */ 15771 static void __exit 15772 lpfc_exit(void) 15773 { 15774 misc_deregister(&lpfc_mgmt_dev); 15775 pci_unregister_driver(&lpfc_driver); 15776 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15777 fc_release_transport(lpfc_transport_template); 15778 fc_release_transport(lpfc_vport_transport_template); 15779 idr_destroy(&lpfc_hba_index); 15780 } 15781 15782 module_init(lpfc_init); 15783 module_exit(lpfc_exit); 15784 MODULE_LICENSE("GPL"); 15785 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15786 MODULE_AUTHOR("Broadcom"); 15787 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15788