1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 76 static int lpfc_post_rcv_buf(struct lpfc_hba *); 77 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 79 static int lpfc_setup_endian_order(struct lpfc_hba *); 80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 81 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 83 static void lpfc_init_sgl_list(struct lpfc_hba *); 84 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 85 static void lpfc_free_active_sgl(struct lpfc_hba *); 86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 91 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 96 97 static struct scsi_transport_template *lpfc_transport_template = NULL; 98 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 99 static DEFINE_IDR(lpfc_hba_index); 100 #define LPFC_NVMET_BUF_POST 254 101 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 102 103 /** 104 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 105 * @phba: pointer to lpfc hba data structure. 106 * 107 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 108 * mailbox command. It retrieves the revision information from the HBA and 109 * collects the Vital Product Data (VPD) about the HBA for preparing the 110 * configuration of the HBA. 111 * 112 * Return codes: 113 * 0 - success. 114 * -ERESTART - requests the SLI layer to reset the HBA and try again. 115 * Any other value - indicates an error. 116 **/ 117 int 118 lpfc_config_port_prep(struct lpfc_hba *phba) 119 { 120 lpfc_vpd_t *vp = &phba->vpd; 121 int i = 0, rc; 122 LPFC_MBOXQ_t *pmb; 123 MAILBOX_t *mb; 124 char *lpfc_vpd_data = NULL; 125 uint16_t offset = 0; 126 static char licensed[56] = 127 "key unlock for use with gnu public licensed code only\0"; 128 static int init_key = 1; 129 130 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 131 if (!pmb) { 132 phba->link_state = LPFC_HBA_ERROR; 133 return -ENOMEM; 134 } 135 136 mb = &pmb->u.mb; 137 phba->link_state = LPFC_INIT_MBX_CMDS; 138 139 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 140 if (init_key) { 141 uint32_t *ptext = (uint32_t *) licensed; 142 143 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 144 *ptext = cpu_to_be32(*ptext); 145 init_key = 0; 146 } 147 148 lpfc_read_nv(phba, pmb); 149 memset((char*)mb->un.varRDnvp.rsvd3, 0, 150 sizeof (mb->un.varRDnvp.rsvd3)); 151 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 152 sizeof (licensed)); 153 154 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 155 156 if (rc != MBX_SUCCESS) { 157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 158 "0324 Config Port initialization " 159 "error, mbxCmd x%x READ_NVPARM, " 160 "mbxStatus x%x\n", 161 mb->mbxCommand, mb->mbxStatus); 162 mempool_free(pmb, phba->mbox_mem_pool); 163 return -ERESTART; 164 } 165 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 166 sizeof(phba->wwnn)); 167 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 168 sizeof(phba->wwpn)); 169 } 170 171 /* 172 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 173 * which was already set in lpfc_get_cfgparam() 174 */ 175 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 176 177 /* Setup and issue mailbox READ REV command */ 178 lpfc_read_rev(phba, pmb); 179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 180 if (rc != MBX_SUCCESS) { 181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 182 "0439 Adapter failed to init, mbxCmd x%x " 183 "READ_REV, mbxStatus x%x\n", 184 mb->mbxCommand, mb->mbxStatus); 185 mempool_free( pmb, phba->mbox_mem_pool); 186 return -ERESTART; 187 } 188 189 190 /* 191 * The value of rr must be 1 since the driver set the cv field to 1. 192 * This setting requires the FW to set all revision fields. 193 */ 194 if (mb->un.varRdRev.rr == 0) { 195 vp->rev.rBit = 0; 196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 197 "0440 Adapter failed to init, READ_REV has " 198 "missing revision information.\n"); 199 mempool_free(pmb, phba->mbox_mem_pool); 200 return -ERESTART; 201 } 202 203 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 204 mempool_free(pmb, phba->mbox_mem_pool); 205 return -EINVAL; 206 } 207 208 /* Save information as VPD data */ 209 vp->rev.rBit = 1; 210 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 211 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 212 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 213 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 214 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 215 vp->rev.biuRev = mb->un.varRdRev.biuRev; 216 vp->rev.smRev = mb->un.varRdRev.smRev; 217 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 218 vp->rev.endecRev = mb->un.varRdRev.endecRev; 219 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 220 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 221 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 222 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 223 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 224 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 225 226 /* If the sli feature level is less then 9, we must 227 * tear down all RPIs and VPIs on link down if NPIV 228 * is enabled. 229 */ 230 if (vp->rev.feaLevelHigh < 9) 231 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 232 233 if (lpfc_is_LC_HBA(phba->pcidev->device)) 234 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 235 sizeof (phba->RandomData)); 236 237 /* Get adapter VPD information */ 238 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 239 if (!lpfc_vpd_data) 240 goto out_free_mbox; 241 do { 242 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 243 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 244 245 if (rc != MBX_SUCCESS) { 246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 247 "0441 VPD not present on adapter, " 248 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 249 mb->mbxCommand, mb->mbxStatus); 250 mb->un.varDmp.word_cnt = 0; 251 } 252 /* dump mem may return a zero when finished or we got a 253 * mailbox error, either way we are done. 254 */ 255 if (mb->un.varDmp.word_cnt == 0) 256 break; 257 258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 261 lpfc_vpd_data + offset, 262 mb->un.varDmp.word_cnt); 263 offset += mb->un.varDmp.word_cnt; 264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 265 266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 267 268 kfree(lpfc_vpd_data); 269 out_free_mbox: 270 mempool_free(pmb, phba->mbox_mem_pool); 271 return 0; 272 } 273 274 /** 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 276 * @phba: pointer to lpfc hba data structure. 277 * @pmboxq: pointer to the driver internal queue element for mailbox command. 278 * 279 * This is the completion handler for driver's configuring asynchronous event 280 * mailbox command to the device. If the mailbox command returns successfully, 281 * it will set internal async event support flag to 1; otherwise, it will 282 * set internal async event support flag to 0. 283 **/ 284 static void 285 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 286 { 287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 288 phba->temp_sensor_support = 1; 289 else 290 phba->temp_sensor_support = 0; 291 mempool_free(pmboxq, phba->mbox_mem_pool); 292 return; 293 } 294 295 /** 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 297 * @phba: pointer to lpfc hba data structure. 298 * @pmboxq: pointer to the driver internal queue element for mailbox command. 299 * 300 * This is the completion handler for dump mailbox command for getting 301 * wake up parameters. When this command complete, the response contain 302 * Option rom version of the HBA. This function translate the version number 303 * into a human readable string and store it in OptionROMVersion. 304 **/ 305 static void 306 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 307 { 308 struct prog_id *prg; 309 uint32_t prog_id_word; 310 char dist = ' '; 311 /* character array used for decoding dist type. */ 312 char dist_char[] = "nabx"; 313 314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 prg = (struct prog_id *) &prog_id_word; 320 321 /* word 7 contain option rom version */ 322 prog_id_word = pmboxq->u.mb.un.varWords[7]; 323 324 /* Decode the Option rom version word to a readable string */ 325 if (prg->dist < 4) 326 dist = dist_char[prg->dist]; 327 328 if ((prg->dist == 3) && (prg->num == 0)) 329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 330 prg->ver, prg->rev, prg->lev); 331 else 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 333 prg->ver, prg->rev, prg->lev, 334 dist, prg->num); 335 mempool_free(pmboxq, phba->mbox_mem_pool); 336 return; 337 } 338 339 /** 340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 341 * cfg_soft_wwnn, cfg_soft_wwpn 342 * @vport: pointer to lpfc vport data structure. 343 * 344 * 345 * Return codes 346 * None. 347 **/ 348 void 349 lpfc_update_vport_wwn(struct lpfc_vport *vport) 350 { 351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 353 354 /* If the soft name exists then update it using the service params */ 355 if (vport->phba->cfg_soft_wwnn) 356 u64_to_wwn(vport->phba->cfg_soft_wwnn, 357 vport->fc_sparam.nodeName.u.wwn); 358 if (vport->phba->cfg_soft_wwpn) 359 u64_to_wwn(vport->phba->cfg_soft_wwpn, 360 vport->fc_sparam.portName.u.wwn); 361 362 /* 363 * If the name is empty or there exists a soft name 364 * then copy the service params name, otherwise use the fc name 365 */ 366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 368 sizeof(struct lpfc_name)); 369 else 370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 371 sizeof(struct lpfc_name)); 372 373 /* 374 * If the port name has changed, then set the Param changes flag 375 * to unreg the login 376 */ 377 if (vport->fc_portname.u.wwn[0] != 0 && 378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 379 sizeof(struct lpfc_name))) 380 vport->vport_flag |= FAWWPN_PARAM_CHG; 381 382 if (vport->fc_portname.u.wwn[0] == 0 || 383 vport->phba->cfg_soft_wwpn || 384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 385 vport->vport_flag & FAWWPN_SET) { 386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 387 sizeof(struct lpfc_name)); 388 vport->vport_flag &= ~FAWWPN_SET; 389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 390 vport->vport_flag |= FAWWPN_SET; 391 } 392 else 393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 394 sizeof(struct lpfc_name)); 395 } 396 397 /** 398 * lpfc_config_port_post - Perform lpfc initialization after config port 399 * @phba: pointer to lpfc hba data structure. 400 * 401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 402 * command call. It performs all internal resource and state setups on the 403 * port: post IOCB buffers, enable appropriate host interrupt attentions, 404 * ELS ring timers, etc. 405 * 406 * Return codes 407 * 0 - success. 408 * Any other value - error. 409 **/ 410 int 411 lpfc_config_port_post(struct lpfc_hba *phba) 412 { 413 struct lpfc_vport *vport = phba->pport; 414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 415 LPFC_MBOXQ_t *pmb; 416 MAILBOX_t *mb; 417 struct lpfc_dmabuf *mp; 418 struct lpfc_sli *psli = &phba->sli; 419 uint32_t status, timeout; 420 int i, j; 421 int rc; 422 423 spin_lock_irq(&phba->hbalock); 424 /* 425 * If the Config port completed correctly the HBA is not 426 * over heated any more. 427 */ 428 if (phba->over_temp_state == HBA_OVER_TEMP) 429 phba->over_temp_state = HBA_NORMAL_TEMP; 430 spin_unlock_irq(&phba->hbalock); 431 432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 433 if (!pmb) { 434 phba->link_state = LPFC_HBA_ERROR; 435 return -ENOMEM; 436 } 437 mb = &pmb->u.mb; 438 439 /* Get login parameters for NID. */ 440 rc = lpfc_read_sparam(phba, pmb, 0); 441 if (rc) { 442 mempool_free(pmb, phba->mbox_mem_pool); 443 return -ENOMEM; 444 } 445 446 pmb->vport = vport; 447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 449 "0448 Adapter failed init, mbxCmd x%x " 450 "READ_SPARM mbxStatus x%x\n", 451 mb->mbxCommand, mb->mbxStatus); 452 phba->link_state = LPFC_HBA_ERROR; 453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 454 mempool_free(pmb, phba->mbox_mem_pool); 455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 456 kfree(mp); 457 return -EIO; 458 } 459 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 461 462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 463 lpfc_mbuf_free(phba, mp->virt, mp->phys); 464 kfree(mp); 465 pmb->ctx_buf = NULL; 466 lpfc_update_vport_wwn(vport); 467 468 /* Update the fc_host data structures with new wwn. */ 469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 471 fc_host_max_npiv_vports(shost) = phba->max_vpi; 472 473 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 474 /* This should be consolidated into parse_vpd ? - mr */ 475 if (phba->SerialNumber[0] == 0) { 476 uint8_t *outptr; 477 478 outptr = &vport->fc_nodename.u.s.IEEE[0]; 479 for (i = 0; i < 12; i++) { 480 status = *outptr++; 481 j = ((status & 0xf0) >> 4); 482 if (j <= 9) 483 phba->SerialNumber[i] = 484 (char)((uint8_t) 0x30 + (uint8_t) j); 485 else 486 phba->SerialNumber[i] = 487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 488 i++; 489 j = (status & 0xf); 490 if (j <= 9) 491 phba->SerialNumber[i] = 492 (char)((uint8_t) 0x30 + (uint8_t) j); 493 else 494 phba->SerialNumber[i] = 495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 496 } 497 } 498 499 lpfc_read_config(phba, pmb); 500 pmb->vport = vport; 501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 503 "0453 Adapter failed to init, mbxCmd x%x " 504 "READ_CONFIG, mbxStatus x%x\n", 505 mb->mbxCommand, mb->mbxStatus); 506 phba->link_state = LPFC_HBA_ERROR; 507 mempool_free( pmb, phba->mbox_mem_pool); 508 return -EIO; 509 } 510 511 /* Check if the port is disabled */ 512 lpfc_sli_read_link_ste(phba); 513 514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 517 "3359 HBA queue depth changed from %d to %d\n", 518 phba->cfg_hba_queue_depth, 519 mb->un.varRdConfig.max_xri); 520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 521 } 522 523 phba->lmt = mb->un.varRdConfig.lmt; 524 525 /* Get the default values for Model Name and Description */ 526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 527 528 phba->link_state = LPFC_LINK_DOWN; 529 530 /* Only process IOCBs on ELS ring till hba_state is READY */ 531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 536 /* Post receive buffers for desired rings */ 537 if (phba->sli_rev != 3) 538 lpfc_post_rcv_buf(phba); 539 540 /* 541 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 542 */ 543 if (phba->intr_type == MSIX) { 544 rc = lpfc_config_msi(phba, pmb); 545 if (rc) { 546 mempool_free(pmb, phba->mbox_mem_pool); 547 return -EIO; 548 } 549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 550 if (rc != MBX_SUCCESS) { 551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 552 "0352 Config MSI mailbox command " 553 "failed, mbxCmd x%x, mbxStatus x%x\n", 554 pmb->u.mb.mbxCommand, 555 pmb->u.mb.mbxStatus); 556 mempool_free(pmb, phba->mbox_mem_pool); 557 return -EIO; 558 } 559 } 560 561 spin_lock_irq(&phba->hbalock); 562 /* Initialize ERATT handling flag */ 563 phba->hba_flag &= ~HBA_ERATT_HANDLED; 564 565 /* Enable appropriate host interrupts */ 566 if (lpfc_readl(phba->HCregaddr, &status)) { 567 spin_unlock_irq(&phba->hbalock); 568 return -EIO; 569 } 570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 571 if (psli->num_rings > 0) 572 status |= HC_R0INT_ENA; 573 if (psli->num_rings > 1) 574 status |= HC_R1INT_ENA; 575 if (psli->num_rings > 2) 576 status |= HC_R2INT_ENA; 577 if (psli->num_rings > 3) 578 status |= HC_R3INT_ENA; 579 580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 581 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 582 status &= ~(HC_R0INT_ENA); 583 584 writel(status, phba->HCregaddr); 585 readl(phba->HCregaddr); /* flush */ 586 spin_unlock_irq(&phba->hbalock); 587 588 /* Set up ring-0 (ELS) timer */ 589 timeout = phba->fc_ratov * 2; 590 mod_timer(&vport->els_tmofunc, 591 jiffies + msecs_to_jiffies(1000 * timeout)); 592 /* Set up heart beat (HB) timer */ 593 mod_timer(&phba->hb_tmofunc, 594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 595 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 596 phba->last_completion_time = jiffies; 597 /* Set up error attention (ERATT) polling timer */ 598 mod_timer(&phba->eratt_poll, 599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 600 601 if (phba->hba_flag & LINK_DISABLED) { 602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 603 "2598 Adapter Link is disabled.\n"); 604 lpfc_down_link(phba, pmb); 605 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 607 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 609 "2599 Adapter failed to issue DOWN_LINK" 610 " mbox command rc 0x%x\n", rc); 611 612 mempool_free(pmb, phba->mbox_mem_pool); 613 return -EIO; 614 } 615 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 616 mempool_free(pmb, phba->mbox_mem_pool); 617 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 618 if (rc) 619 return rc; 620 } 621 /* MBOX buffer will be freed in mbox compl */ 622 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 623 if (!pmb) { 624 phba->link_state = LPFC_HBA_ERROR; 625 return -ENOMEM; 626 } 627 628 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 629 pmb->mbox_cmpl = lpfc_config_async_cmpl; 630 pmb->vport = phba->pport; 631 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 632 633 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 635 "0456 Adapter failed to issue " 636 "ASYNCEVT_ENABLE mbox status x%x\n", 637 rc); 638 mempool_free(pmb, phba->mbox_mem_pool); 639 } 640 641 /* Get Option rom version */ 642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 643 if (!pmb) { 644 phba->link_state = LPFC_HBA_ERROR; 645 return -ENOMEM; 646 } 647 648 lpfc_dump_wakeup_param(phba, pmb); 649 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 650 pmb->vport = phba->pport; 651 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 652 653 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 655 "0435 Adapter failed " 656 "to get Option ROM version status x%x\n", rc); 657 mempool_free(pmb, phba->mbox_mem_pool); 658 } 659 660 return 0; 661 } 662 663 /** 664 * lpfc_hba_init_link - Initialize the FC link 665 * @phba: pointer to lpfc hba data structure. 666 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 667 * 668 * This routine will issue the INIT_LINK mailbox command call. 669 * It is available to other drivers through the lpfc_hba data 670 * structure for use as a delayed link up mechanism with the 671 * module parameter lpfc_suppress_link_up. 672 * 673 * Return code 674 * 0 - success 675 * Any other value - error 676 **/ 677 static int 678 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 679 { 680 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 681 } 682 683 /** 684 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 685 * @phba: pointer to lpfc hba data structure. 686 * @fc_topology: desired fc topology. 687 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 688 * 689 * This routine will issue the INIT_LINK mailbox command call. 690 * It is available to other drivers through the lpfc_hba data 691 * structure for use as a delayed link up mechanism with the 692 * module parameter lpfc_suppress_link_up. 693 * 694 * Return code 695 * 0 - success 696 * Any other value - error 697 **/ 698 int 699 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 700 uint32_t flag) 701 { 702 struct lpfc_vport *vport = phba->pport; 703 LPFC_MBOXQ_t *pmb; 704 MAILBOX_t *mb; 705 int rc; 706 707 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 708 if (!pmb) { 709 phba->link_state = LPFC_HBA_ERROR; 710 return -ENOMEM; 711 } 712 mb = &pmb->u.mb; 713 pmb->vport = vport; 714 715 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 716 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 717 !(phba->lmt & LMT_1Gb)) || 718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 719 !(phba->lmt & LMT_2Gb)) || 720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 721 !(phba->lmt & LMT_4Gb)) || 722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 723 !(phba->lmt & LMT_8Gb)) || 724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 725 !(phba->lmt & LMT_10Gb)) || 726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 727 !(phba->lmt & LMT_16Gb)) || 728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 729 !(phba->lmt & LMT_32Gb)) || 730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 731 !(phba->lmt & LMT_64Gb))) { 732 /* Reset link speed to auto */ 733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 734 "1302 Invalid speed for this board:%d " 735 "Reset link speed to auto.\n", 736 phba->cfg_link_speed); 737 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 738 } 739 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 740 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 741 if (phba->sli_rev < LPFC_SLI_REV4) 742 lpfc_set_loopback_flag(phba); 743 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 744 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 746 "0498 Adapter failed to init, mbxCmd x%x " 747 "INIT_LINK, mbxStatus x%x\n", 748 mb->mbxCommand, mb->mbxStatus); 749 if (phba->sli_rev <= LPFC_SLI_REV3) { 750 /* Clear all interrupt enable conditions */ 751 writel(0, phba->HCregaddr); 752 readl(phba->HCregaddr); /* flush */ 753 /* Clear all pending interrupts */ 754 writel(0xffffffff, phba->HAregaddr); 755 readl(phba->HAregaddr); /* flush */ 756 } 757 phba->link_state = LPFC_HBA_ERROR; 758 if (rc != MBX_BUSY || flag == MBX_POLL) 759 mempool_free(pmb, phba->mbox_mem_pool); 760 return -EIO; 761 } 762 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 763 if (flag == MBX_POLL) 764 mempool_free(pmb, phba->mbox_mem_pool); 765 766 return 0; 767 } 768 769 /** 770 * lpfc_hba_down_link - this routine downs the FC link 771 * @phba: pointer to lpfc hba data structure. 772 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 773 * 774 * This routine will issue the DOWN_LINK mailbox command call. 775 * It is available to other drivers through the lpfc_hba data 776 * structure for use to stop the link. 777 * 778 * Return code 779 * 0 - success 780 * Any other value - error 781 **/ 782 static int 783 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 784 { 785 LPFC_MBOXQ_t *pmb; 786 int rc; 787 788 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 789 if (!pmb) { 790 phba->link_state = LPFC_HBA_ERROR; 791 return -ENOMEM; 792 } 793 794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 795 "0491 Adapter Link is disabled.\n"); 796 lpfc_down_link(phba, pmb); 797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 798 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 799 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 801 "2522 Adapter failed to issue DOWN_LINK" 802 " mbox command rc 0x%x\n", rc); 803 804 mempool_free(pmb, phba->mbox_mem_pool); 805 return -EIO; 806 } 807 if (flag == MBX_POLL) 808 mempool_free(pmb, phba->mbox_mem_pool); 809 810 return 0; 811 } 812 813 /** 814 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 815 * @phba: pointer to lpfc HBA data structure. 816 * 817 * This routine will do LPFC uninitialization before the HBA is reset when 818 * bringing down the SLI Layer. 819 * 820 * Return codes 821 * 0 - success. 822 * Any other value - error. 823 **/ 824 int 825 lpfc_hba_down_prep(struct lpfc_hba *phba) 826 { 827 struct lpfc_vport **vports; 828 int i; 829 830 if (phba->sli_rev <= LPFC_SLI_REV3) { 831 /* Disable interrupts */ 832 writel(0, phba->HCregaddr); 833 readl(phba->HCregaddr); /* flush */ 834 } 835 836 if (phba->pport->load_flag & FC_UNLOADING) 837 lpfc_cleanup_discovery_resources(phba->pport); 838 else { 839 vports = lpfc_create_vport_work_array(phba); 840 if (vports != NULL) 841 for (i = 0; i <= phba->max_vports && 842 vports[i] != NULL; i++) 843 lpfc_cleanup_discovery_resources(vports[i]); 844 lpfc_destroy_vport_work_array(phba, vports); 845 } 846 return 0; 847 } 848 849 /** 850 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 851 * rspiocb which got deferred 852 * 853 * @phba: pointer to lpfc HBA data structure. 854 * 855 * This routine will cleanup completed slow path events after HBA is reset 856 * when bringing down the SLI Layer. 857 * 858 * 859 * Return codes 860 * void. 861 **/ 862 static void 863 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 864 { 865 struct lpfc_iocbq *rspiocbq; 866 struct hbq_dmabuf *dmabuf; 867 struct lpfc_cq_event *cq_event; 868 869 spin_lock_irq(&phba->hbalock); 870 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 871 spin_unlock_irq(&phba->hbalock); 872 873 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 874 /* Get the response iocb from the head of work queue */ 875 spin_lock_irq(&phba->hbalock); 876 list_remove_head(&phba->sli4_hba.sp_queue_event, 877 cq_event, struct lpfc_cq_event, list); 878 spin_unlock_irq(&phba->hbalock); 879 880 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 881 case CQE_CODE_COMPL_WQE: 882 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 883 cq_event); 884 lpfc_sli_release_iocbq(phba, rspiocbq); 885 break; 886 case CQE_CODE_RECEIVE: 887 case CQE_CODE_RECEIVE_V1: 888 dmabuf = container_of(cq_event, struct hbq_dmabuf, 889 cq_event); 890 lpfc_in_buf_free(phba, &dmabuf->dbuf); 891 } 892 } 893 } 894 895 /** 896 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 897 * @phba: pointer to lpfc HBA data structure. 898 * 899 * This routine will cleanup posted ELS buffers after the HBA is reset 900 * when bringing down the SLI Layer. 901 * 902 * 903 * Return codes 904 * void. 905 **/ 906 static void 907 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 908 { 909 struct lpfc_sli *psli = &phba->sli; 910 struct lpfc_sli_ring *pring; 911 struct lpfc_dmabuf *mp, *next_mp; 912 LIST_HEAD(buflist); 913 int count; 914 915 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 916 lpfc_sli_hbqbuf_free_all(phba); 917 else { 918 /* Cleanup preposted buffers on the ELS ring */ 919 pring = &psli->sli3_ring[LPFC_ELS_RING]; 920 spin_lock_irq(&phba->hbalock); 921 list_splice_init(&pring->postbufq, &buflist); 922 spin_unlock_irq(&phba->hbalock); 923 924 count = 0; 925 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 926 list_del(&mp->list); 927 count++; 928 lpfc_mbuf_free(phba, mp->virt, mp->phys); 929 kfree(mp); 930 } 931 932 spin_lock_irq(&phba->hbalock); 933 pring->postbufq_cnt -= count; 934 spin_unlock_irq(&phba->hbalock); 935 } 936 } 937 938 /** 939 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 940 * @phba: pointer to lpfc HBA data structure. 941 * 942 * This routine will cleanup the txcmplq after the HBA is reset when bringing 943 * down the SLI Layer. 944 * 945 * Return codes 946 * void 947 **/ 948 static void 949 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 950 { 951 struct lpfc_sli *psli = &phba->sli; 952 struct lpfc_queue *qp = NULL; 953 struct lpfc_sli_ring *pring; 954 LIST_HEAD(completions); 955 int i; 956 struct lpfc_iocbq *piocb, *next_iocb; 957 958 if (phba->sli_rev != LPFC_SLI_REV4) { 959 for (i = 0; i < psli->num_rings; i++) { 960 pring = &psli->sli3_ring[i]; 961 spin_lock_irq(&phba->hbalock); 962 /* At this point in time the HBA is either reset or DOA 963 * Nothing should be on txcmplq as it will 964 * NEVER complete. 965 */ 966 list_splice_init(&pring->txcmplq, &completions); 967 pring->txcmplq_cnt = 0; 968 spin_unlock_irq(&phba->hbalock); 969 970 lpfc_sli_abort_iocb_ring(phba, pring); 971 } 972 /* Cancel all the IOCBs from the completions list */ 973 lpfc_sli_cancel_iocbs(phba, &completions, 974 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 975 return; 976 } 977 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 978 pring = qp->pring; 979 if (!pring) 980 continue; 981 spin_lock_irq(&pring->ring_lock); 982 list_for_each_entry_safe(piocb, next_iocb, 983 &pring->txcmplq, list) 984 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 985 list_splice_init(&pring->txcmplq, &completions); 986 pring->txcmplq_cnt = 0; 987 spin_unlock_irq(&pring->ring_lock); 988 lpfc_sli_abort_iocb_ring(phba, pring); 989 } 990 /* Cancel all the IOCBs from the completions list */ 991 lpfc_sli_cancel_iocbs(phba, &completions, 992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 993 } 994 995 /** 996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 997 * @phba: pointer to lpfc HBA data structure. 998 * 999 * This routine will do uninitialization after the HBA is reset when bring 1000 * down the SLI Layer. 1001 * 1002 * Return codes 1003 * 0 - success. 1004 * Any other value - error. 1005 **/ 1006 static int 1007 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1008 { 1009 lpfc_hba_free_post_buf(phba); 1010 lpfc_hba_clean_txcmplq(phba); 1011 return 0; 1012 } 1013 1014 /** 1015 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1016 * @phba: pointer to lpfc HBA data structure. 1017 * 1018 * This routine will do uninitialization after the HBA is reset when bring 1019 * down the SLI Layer. 1020 * 1021 * Return codes 1022 * 0 - success. 1023 * Any other value - error. 1024 **/ 1025 static int 1026 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1027 { 1028 struct lpfc_io_buf *psb, *psb_next; 1029 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1030 struct lpfc_sli4_hdw_queue *qp; 1031 LIST_HEAD(aborts); 1032 LIST_HEAD(nvme_aborts); 1033 LIST_HEAD(nvmet_aborts); 1034 struct lpfc_sglq *sglq_entry = NULL; 1035 int cnt, idx; 1036 1037 1038 lpfc_sli_hbqbuf_free_all(phba); 1039 lpfc_hba_clean_txcmplq(phba); 1040 1041 /* At this point in time the HBA is either reset or DOA. Either 1042 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1043 * on the lpfc_els_sgl_list so that it can either be freed if the 1044 * driver is unloading or reposted if the driver is restarting 1045 * the port. 1046 */ 1047 1048 /* sgl_list_lock required because worker thread uses this 1049 * list. 1050 */ 1051 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1052 list_for_each_entry(sglq_entry, 1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1054 sglq_entry->state = SGL_FREED; 1055 1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1057 &phba->sli4_hba.lpfc_els_sgl_list); 1058 1059 1060 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1061 1062 /* abts_xxxx_buf_list_lock required because worker thread uses this 1063 * list. 1064 */ 1065 spin_lock_irq(&phba->hbalock); 1066 cnt = 0; 1067 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1068 qp = &phba->sli4_hba.hdwq[idx]; 1069 1070 spin_lock(&qp->abts_io_buf_list_lock); 1071 list_splice_init(&qp->lpfc_abts_io_buf_list, 1072 &aborts); 1073 1074 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1075 psb->pCmd = NULL; 1076 psb->status = IOSTAT_SUCCESS; 1077 cnt++; 1078 } 1079 spin_lock(&qp->io_buf_list_put_lock); 1080 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1081 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1082 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1083 qp->abts_scsi_io_bufs = 0; 1084 qp->abts_nvme_io_bufs = 0; 1085 spin_unlock(&qp->io_buf_list_put_lock); 1086 spin_unlock(&qp->abts_io_buf_list_lock); 1087 } 1088 spin_unlock_irq(&phba->hbalock); 1089 1090 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1091 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1092 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1093 &nvmet_aborts); 1094 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1095 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1096 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1097 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1098 } 1099 } 1100 1101 lpfc_sli4_free_sp_events(phba); 1102 return cnt; 1103 } 1104 1105 /** 1106 * lpfc_hba_down_post - Wrapper func for hba down post routine 1107 * @phba: pointer to lpfc HBA data structure. 1108 * 1109 * This routine wraps the actual SLI3 or SLI4 routine for performing 1110 * uninitialization after the HBA is reset when bring down the SLI Layer. 1111 * 1112 * Return codes 1113 * 0 - success. 1114 * Any other value - error. 1115 **/ 1116 int 1117 lpfc_hba_down_post(struct lpfc_hba *phba) 1118 { 1119 return (*phba->lpfc_hba_down_post)(phba); 1120 } 1121 1122 /** 1123 * lpfc_hb_timeout - The HBA-timer timeout handler 1124 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1125 * 1126 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1127 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1128 * work-port-events bitmap and the worker thread is notified. This timeout 1129 * event will be used by the worker thread to invoke the actual timeout 1130 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1131 * be performed in the timeout handler and the HBA timeout event bit shall 1132 * be cleared by the worker thread after it has taken the event bitmap out. 1133 **/ 1134 static void 1135 lpfc_hb_timeout(struct timer_list *t) 1136 { 1137 struct lpfc_hba *phba; 1138 uint32_t tmo_posted; 1139 unsigned long iflag; 1140 1141 phba = from_timer(phba, t, hb_tmofunc); 1142 1143 /* Check for heart beat timeout conditions */ 1144 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1145 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1146 if (!tmo_posted) 1147 phba->pport->work_port_events |= WORKER_HB_TMO; 1148 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1149 1150 /* Tell the worker thread there is work to do */ 1151 if (!tmo_posted) 1152 lpfc_worker_wake_up(phba); 1153 return; 1154 } 1155 1156 /** 1157 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1158 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1159 * 1160 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1161 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1162 * work-port-events bitmap and the worker thread is notified. This timeout 1163 * event will be used by the worker thread to invoke the actual timeout 1164 * handler routine, lpfc_rrq_handler. Any periodical operations will 1165 * be performed in the timeout handler and the RRQ timeout event bit shall 1166 * be cleared by the worker thread after it has taken the event bitmap out. 1167 **/ 1168 static void 1169 lpfc_rrq_timeout(struct timer_list *t) 1170 { 1171 struct lpfc_hba *phba; 1172 unsigned long iflag; 1173 1174 phba = from_timer(phba, t, rrq_tmr); 1175 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1176 if (!(phba->pport->load_flag & FC_UNLOADING)) 1177 phba->hba_flag |= HBA_RRQ_ACTIVE; 1178 else 1179 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1181 1182 if (!(phba->pport->load_flag & FC_UNLOADING)) 1183 lpfc_worker_wake_up(phba); 1184 } 1185 1186 /** 1187 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1188 * @phba: pointer to lpfc hba data structure. 1189 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1190 * 1191 * This is the callback function to the lpfc heart-beat mailbox command. 1192 * If configured, the lpfc driver issues the heart-beat mailbox command to 1193 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1194 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1195 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1196 * heart-beat outstanding state. Once the mailbox command comes back and 1197 * no error conditions detected, the heart-beat mailbox command timer is 1198 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1199 * state is cleared for the next heart-beat. If the timer expired with the 1200 * heart-beat outstanding state set, the driver will put the HBA offline. 1201 **/ 1202 static void 1203 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1204 { 1205 unsigned long drvr_flag; 1206 1207 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1208 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1209 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1210 1211 /* Check and reset heart-beat timer if necessary */ 1212 mempool_free(pmboxq, phba->mbox_mem_pool); 1213 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1214 !(phba->link_state == LPFC_HBA_ERROR) && 1215 !(phba->pport->load_flag & FC_UNLOADING)) 1216 mod_timer(&phba->hb_tmofunc, 1217 jiffies + 1218 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1219 return; 1220 } 1221 1222 /* 1223 * lpfc_idle_stat_delay_work - idle_stat tracking 1224 * 1225 * This routine tracks per-cq idle_stat and determines polling decisions. 1226 * 1227 * Return codes: 1228 * None 1229 **/ 1230 static void 1231 lpfc_idle_stat_delay_work(struct work_struct *work) 1232 { 1233 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1234 struct lpfc_hba, 1235 idle_stat_delay_work); 1236 struct lpfc_queue *cq; 1237 struct lpfc_sli4_hdw_queue *hdwq; 1238 struct lpfc_idle_stat *idle_stat; 1239 u32 i, idle_percent; 1240 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1241 1242 if (phba->pport->load_flag & FC_UNLOADING) 1243 return; 1244 1245 if (phba->link_state == LPFC_HBA_ERROR || 1246 phba->pport->fc_flag & FC_OFFLINE_MODE) 1247 goto requeue; 1248 1249 for_each_present_cpu(i) { 1250 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1251 cq = hdwq->io_cq; 1252 1253 /* Skip if we've already handled this cq's primary CPU */ 1254 if (cq->chann != i) 1255 continue; 1256 1257 idle_stat = &phba->sli4_hba.idle_stat[i]; 1258 1259 /* get_cpu_idle_time returns values as running counters. Thus, 1260 * to know the amount for this period, the prior counter values 1261 * need to be subtracted from the current counter values. 1262 * From there, the idle time stat can be calculated as a 1263 * percentage of 100 - the sum of the other consumption times. 1264 */ 1265 wall_idle = get_cpu_idle_time(i, &wall, 1); 1266 diff_idle = wall_idle - idle_stat->prev_idle; 1267 diff_wall = wall - idle_stat->prev_wall; 1268 1269 if (diff_wall <= diff_idle) 1270 busy_time = 0; 1271 else 1272 busy_time = diff_wall - diff_idle; 1273 1274 idle_percent = div64_u64(100 * busy_time, diff_wall); 1275 idle_percent = 100 - idle_percent; 1276 1277 if (idle_percent < 15) 1278 cq->poll_mode = LPFC_QUEUE_WORK; 1279 else 1280 cq->poll_mode = LPFC_IRQ_POLL; 1281 1282 idle_stat->prev_idle = wall_idle; 1283 idle_stat->prev_wall = wall; 1284 } 1285 1286 requeue: 1287 schedule_delayed_work(&phba->idle_stat_delay_work, 1288 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1289 } 1290 1291 static void 1292 lpfc_hb_eq_delay_work(struct work_struct *work) 1293 { 1294 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1295 struct lpfc_hba, eq_delay_work); 1296 struct lpfc_eq_intr_info *eqi, *eqi_new; 1297 struct lpfc_queue *eq, *eq_next; 1298 unsigned char *ena_delay = NULL; 1299 uint32_t usdelay; 1300 int i; 1301 1302 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1303 return; 1304 1305 if (phba->link_state == LPFC_HBA_ERROR || 1306 phba->pport->fc_flag & FC_OFFLINE_MODE) 1307 goto requeue; 1308 1309 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1310 GFP_KERNEL); 1311 if (!ena_delay) 1312 goto requeue; 1313 1314 for (i = 0; i < phba->cfg_irq_chann; i++) { 1315 /* Get the EQ corresponding to the IRQ vector */ 1316 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1317 if (!eq) 1318 continue; 1319 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1320 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1321 ena_delay[eq->last_cpu] = 1; 1322 } 1323 } 1324 1325 for_each_present_cpu(i) { 1326 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1327 if (ena_delay[i]) { 1328 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1329 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1330 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1331 } else { 1332 usdelay = 0; 1333 } 1334 1335 eqi->icnt = 0; 1336 1337 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1338 if (unlikely(eq->last_cpu != i)) { 1339 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1340 eq->last_cpu); 1341 list_move_tail(&eq->cpu_list, &eqi_new->list); 1342 continue; 1343 } 1344 if (usdelay != eq->q_mode) 1345 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1346 usdelay); 1347 } 1348 } 1349 1350 kfree(ena_delay); 1351 1352 requeue: 1353 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1354 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1355 } 1356 1357 /** 1358 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1359 * @phba: pointer to lpfc hba data structure. 1360 * 1361 * For each heartbeat, this routine does some heuristic methods to adjust 1362 * XRI distribution. The goal is to fully utilize free XRIs. 1363 **/ 1364 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1365 { 1366 u32 i; 1367 u32 hwq_count; 1368 1369 hwq_count = phba->cfg_hdw_queue; 1370 for (i = 0; i < hwq_count; i++) { 1371 /* Adjust XRIs in private pool */ 1372 lpfc_adjust_pvt_pool_count(phba, i); 1373 1374 /* Adjust high watermark */ 1375 lpfc_adjust_high_watermark(phba, i); 1376 1377 #ifdef LPFC_MXP_STAT 1378 /* Snapshot pbl, pvt and busy count */ 1379 lpfc_snapshot_mxp(phba, i); 1380 #endif 1381 } 1382 } 1383 1384 /** 1385 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1386 * @phba: pointer to lpfc hba data structure. 1387 * 1388 * If a HB mbox is not already in progrees, this routine will allocate 1389 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1390 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1391 **/ 1392 int 1393 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1394 { 1395 LPFC_MBOXQ_t *pmboxq; 1396 int retval; 1397 1398 /* Is a Heartbeat mbox already in progress */ 1399 if (phba->hba_flag & HBA_HBEAT_INP) 1400 return 0; 1401 1402 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1403 if (!pmboxq) 1404 return -ENOMEM; 1405 1406 lpfc_heart_beat(phba, pmboxq); 1407 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1408 pmboxq->vport = phba->pport; 1409 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1410 1411 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1412 mempool_free(pmboxq, phba->mbox_mem_pool); 1413 return -ENXIO; 1414 } 1415 phba->hba_flag |= HBA_HBEAT_INP; 1416 1417 return 0; 1418 } 1419 1420 /** 1421 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1422 * @phba: pointer to lpfc hba data structure. 1423 * 1424 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1425 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1426 * of the value of lpfc_enable_hba_heartbeat. 1427 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1428 * try to issue a MBX_HEARTBEAT mbox command. 1429 **/ 1430 void 1431 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1432 { 1433 if (phba->cfg_enable_hba_heartbeat) 1434 return; 1435 phba->hba_flag |= HBA_HBEAT_TMO; 1436 } 1437 1438 /** 1439 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1440 * @phba: pointer to lpfc hba data structure. 1441 * 1442 * This is the actual HBA-timer timeout handler to be invoked by the worker 1443 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1444 * handler performs any periodic operations needed for the device. If such 1445 * periodic event has already been attended to either in the interrupt handler 1446 * or by processing slow-ring or fast-ring events within the HBA-timer 1447 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1448 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1449 * is configured and there is no heart-beat mailbox command outstanding, a 1450 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1451 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1452 * to offline. 1453 **/ 1454 void 1455 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1456 { 1457 struct lpfc_vport **vports; 1458 struct lpfc_dmabuf *buf_ptr; 1459 int retval = 0; 1460 int i, tmo; 1461 struct lpfc_sli *psli = &phba->sli; 1462 LIST_HEAD(completions); 1463 1464 if (phba->cfg_xri_rebalancing) { 1465 /* Multi-XRI pools handler */ 1466 lpfc_hb_mxp_handler(phba); 1467 } 1468 1469 vports = lpfc_create_vport_work_array(phba); 1470 if (vports != NULL) 1471 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1472 lpfc_rcv_seq_check_edtov(vports[i]); 1473 lpfc_fdmi_change_check(vports[i]); 1474 } 1475 lpfc_destroy_vport_work_array(phba, vports); 1476 1477 if ((phba->link_state == LPFC_HBA_ERROR) || 1478 (phba->pport->load_flag & FC_UNLOADING) || 1479 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1480 return; 1481 1482 if (phba->elsbuf_cnt && 1483 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1484 spin_lock_irq(&phba->hbalock); 1485 list_splice_init(&phba->elsbuf, &completions); 1486 phba->elsbuf_cnt = 0; 1487 phba->elsbuf_prev_cnt = 0; 1488 spin_unlock_irq(&phba->hbalock); 1489 1490 while (!list_empty(&completions)) { 1491 list_remove_head(&completions, buf_ptr, 1492 struct lpfc_dmabuf, list); 1493 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1494 kfree(buf_ptr); 1495 } 1496 } 1497 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1498 1499 /* If there is no heart beat outstanding, issue a heartbeat command */ 1500 if (phba->cfg_enable_hba_heartbeat) { 1501 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1502 spin_lock_irq(&phba->pport->work_port_lock); 1503 if (time_after(phba->last_completion_time + 1504 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1505 jiffies)) { 1506 spin_unlock_irq(&phba->pport->work_port_lock); 1507 if (phba->hba_flag & HBA_HBEAT_INP) 1508 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1509 else 1510 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1511 goto out; 1512 } 1513 spin_unlock_irq(&phba->pport->work_port_lock); 1514 1515 /* Check if a MBX_HEARTBEAT is already in progress */ 1516 if (phba->hba_flag & HBA_HBEAT_INP) { 1517 /* 1518 * If heart beat timeout called with HBA_HBEAT_INP set 1519 * we need to give the hb mailbox cmd a chance to 1520 * complete or TMO. 1521 */ 1522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1523 "0459 Adapter heartbeat still outstanding: " 1524 "last compl time was %d ms.\n", 1525 jiffies_to_msecs(jiffies 1526 - phba->last_completion_time)); 1527 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1528 } else { 1529 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1530 (list_empty(&psli->mboxq))) { 1531 1532 retval = lpfc_issue_hb_mbox(phba); 1533 if (retval) { 1534 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1535 goto out; 1536 } 1537 phba->skipped_hb = 0; 1538 } else if (time_before_eq(phba->last_completion_time, 1539 phba->skipped_hb)) { 1540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1541 "2857 Last completion time not " 1542 " updated in %d ms\n", 1543 jiffies_to_msecs(jiffies 1544 - phba->last_completion_time)); 1545 } else 1546 phba->skipped_hb = jiffies; 1547 1548 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1549 goto out; 1550 } 1551 } else { 1552 /* Check to see if we want to force a MBX_HEARTBEAT */ 1553 if (phba->hba_flag & HBA_HBEAT_TMO) { 1554 retval = lpfc_issue_hb_mbox(phba); 1555 if (retval) 1556 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1557 else 1558 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1559 goto out; 1560 } 1561 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1562 } 1563 out: 1564 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1565 } 1566 1567 /** 1568 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1569 * @phba: pointer to lpfc hba data structure. 1570 * 1571 * This routine is called to bring the HBA offline when HBA hardware error 1572 * other than Port Error 6 has been detected. 1573 **/ 1574 static void 1575 lpfc_offline_eratt(struct lpfc_hba *phba) 1576 { 1577 struct lpfc_sli *psli = &phba->sli; 1578 1579 spin_lock_irq(&phba->hbalock); 1580 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1581 spin_unlock_irq(&phba->hbalock); 1582 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1583 1584 lpfc_offline(phba); 1585 lpfc_reset_barrier(phba); 1586 spin_lock_irq(&phba->hbalock); 1587 lpfc_sli_brdreset(phba); 1588 spin_unlock_irq(&phba->hbalock); 1589 lpfc_hba_down_post(phba); 1590 lpfc_sli_brdready(phba, HS_MBRDY); 1591 lpfc_unblock_mgmt_io(phba); 1592 phba->link_state = LPFC_HBA_ERROR; 1593 return; 1594 } 1595 1596 /** 1597 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1598 * @phba: pointer to lpfc hba data structure. 1599 * 1600 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1601 * other than Port Error 6 has been detected. 1602 **/ 1603 void 1604 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1605 { 1606 spin_lock_irq(&phba->hbalock); 1607 phba->link_state = LPFC_HBA_ERROR; 1608 spin_unlock_irq(&phba->hbalock); 1609 1610 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1611 lpfc_sli_flush_io_rings(phba); 1612 lpfc_offline(phba); 1613 lpfc_hba_down_post(phba); 1614 lpfc_unblock_mgmt_io(phba); 1615 } 1616 1617 /** 1618 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1619 * @phba: pointer to lpfc hba data structure. 1620 * 1621 * This routine is invoked to handle the deferred HBA hardware error 1622 * conditions. This type of error is indicated by HBA by setting ER1 1623 * and another ER bit in the host status register. The driver will 1624 * wait until the ER1 bit clears before handling the error condition. 1625 **/ 1626 static void 1627 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1628 { 1629 uint32_t old_host_status = phba->work_hs; 1630 struct lpfc_sli *psli = &phba->sli; 1631 1632 /* If the pci channel is offline, ignore possible errors, 1633 * since we cannot communicate with the pci card anyway. 1634 */ 1635 if (pci_channel_offline(phba->pcidev)) { 1636 spin_lock_irq(&phba->hbalock); 1637 phba->hba_flag &= ~DEFER_ERATT; 1638 spin_unlock_irq(&phba->hbalock); 1639 return; 1640 } 1641 1642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1643 "0479 Deferred Adapter Hardware Error " 1644 "Data: x%x x%x x%x\n", 1645 phba->work_hs, phba->work_status[0], 1646 phba->work_status[1]); 1647 1648 spin_lock_irq(&phba->hbalock); 1649 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1650 spin_unlock_irq(&phba->hbalock); 1651 1652 1653 /* 1654 * Firmware stops when it triggred erratt. That could cause the I/Os 1655 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1656 * SCSI layer retry it after re-establishing link. 1657 */ 1658 lpfc_sli_abort_fcp_rings(phba); 1659 1660 /* 1661 * There was a firmware error. Take the hba offline and then 1662 * attempt to restart it. 1663 */ 1664 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1665 lpfc_offline(phba); 1666 1667 /* Wait for the ER1 bit to clear.*/ 1668 while (phba->work_hs & HS_FFER1) { 1669 msleep(100); 1670 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1671 phba->work_hs = UNPLUG_ERR ; 1672 break; 1673 } 1674 /* If driver is unloading let the worker thread continue */ 1675 if (phba->pport->load_flag & FC_UNLOADING) { 1676 phba->work_hs = 0; 1677 break; 1678 } 1679 } 1680 1681 /* 1682 * This is to ptrotect against a race condition in which 1683 * first write to the host attention register clear the 1684 * host status register. 1685 */ 1686 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1687 phba->work_hs = old_host_status & ~HS_FFER1; 1688 1689 spin_lock_irq(&phba->hbalock); 1690 phba->hba_flag &= ~DEFER_ERATT; 1691 spin_unlock_irq(&phba->hbalock); 1692 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1693 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1694 } 1695 1696 static void 1697 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1698 { 1699 struct lpfc_board_event_header board_event; 1700 struct Scsi_Host *shost; 1701 1702 board_event.event_type = FC_REG_BOARD_EVENT; 1703 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1704 shost = lpfc_shost_from_vport(phba->pport); 1705 fc_host_post_vendor_event(shost, fc_get_event_number(), 1706 sizeof(board_event), 1707 (char *) &board_event, 1708 LPFC_NL_VENDOR_ID); 1709 } 1710 1711 /** 1712 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1713 * @phba: pointer to lpfc hba data structure. 1714 * 1715 * This routine is invoked to handle the following HBA hardware error 1716 * conditions: 1717 * 1 - HBA error attention interrupt 1718 * 2 - DMA ring index out of range 1719 * 3 - Mailbox command came back as unknown 1720 **/ 1721 static void 1722 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1723 { 1724 struct lpfc_vport *vport = phba->pport; 1725 struct lpfc_sli *psli = &phba->sli; 1726 uint32_t event_data; 1727 unsigned long temperature; 1728 struct temp_event temp_event_data; 1729 struct Scsi_Host *shost; 1730 1731 /* If the pci channel is offline, ignore possible errors, 1732 * since we cannot communicate with the pci card anyway. 1733 */ 1734 if (pci_channel_offline(phba->pcidev)) { 1735 spin_lock_irq(&phba->hbalock); 1736 phba->hba_flag &= ~DEFER_ERATT; 1737 spin_unlock_irq(&phba->hbalock); 1738 return; 1739 } 1740 1741 /* If resets are disabled then leave the HBA alone and return */ 1742 if (!phba->cfg_enable_hba_reset) 1743 return; 1744 1745 /* Send an internal error event to mgmt application */ 1746 lpfc_board_errevt_to_mgmt(phba); 1747 1748 if (phba->hba_flag & DEFER_ERATT) 1749 lpfc_handle_deferred_eratt(phba); 1750 1751 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1752 if (phba->work_hs & HS_FFER6) 1753 /* Re-establishing Link */ 1754 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1755 "1301 Re-establishing Link " 1756 "Data: x%x x%x x%x\n", 1757 phba->work_hs, phba->work_status[0], 1758 phba->work_status[1]); 1759 if (phba->work_hs & HS_FFER8) 1760 /* Device Zeroization */ 1761 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1762 "2861 Host Authentication device " 1763 "zeroization Data:x%x x%x x%x\n", 1764 phba->work_hs, phba->work_status[0], 1765 phba->work_status[1]); 1766 1767 spin_lock_irq(&phba->hbalock); 1768 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1769 spin_unlock_irq(&phba->hbalock); 1770 1771 /* 1772 * Firmware stops when it triggled erratt with HS_FFER6. 1773 * That could cause the I/Os dropped by the firmware. 1774 * Error iocb (I/O) on txcmplq and let the SCSI layer 1775 * retry it after re-establishing link. 1776 */ 1777 lpfc_sli_abort_fcp_rings(phba); 1778 1779 /* 1780 * There was a firmware error. Take the hba offline and then 1781 * attempt to restart it. 1782 */ 1783 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1784 lpfc_offline(phba); 1785 lpfc_sli_brdrestart(phba); 1786 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1787 lpfc_unblock_mgmt_io(phba); 1788 return; 1789 } 1790 lpfc_unblock_mgmt_io(phba); 1791 } else if (phba->work_hs & HS_CRIT_TEMP) { 1792 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1793 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1794 temp_event_data.event_code = LPFC_CRIT_TEMP; 1795 temp_event_data.data = (uint32_t)temperature; 1796 1797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1798 "0406 Adapter maximum temperature exceeded " 1799 "(%ld), taking this port offline " 1800 "Data: x%x x%x x%x\n", 1801 temperature, phba->work_hs, 1802 phba->work_status[0], phba->work_status[1]); 1803 1804 shost = lpfc_shost_from_vport(phba->pport); 1805 fc_host_post_vendor_event(shost, fc_get_event_number(), 1806 sizeof(temp_event_data), 1807 (char *) &temp_event_data, 1808 SCSI_NL_VID_TYPE_PCI 1809 | PCI_VENDOR_ID_EMULEX); 1810 1811 spin_lock_irq(&phba->hbalock); 1812 phba->over_temp_state = HBA_OVER_TEMP; 1813 spin_unlock_irq(&phba->hbalock); 1814 lpfc_offline_eratt(phba); 1815 1816 } else { 1817 /* The if clause above forces this code path when the status 1818 * failure is a value other than FFER6. Do not call the offline 1819 * twice. This is the adapter hardware error path. 1820 */ 1821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1822 "0457 Adapter Hardware Error " 1823 "Data: x%x x%x x%x\n", 1824 phba->work_hs, 1825 phba->work_status[0], phba->work_status[1]); 1826 1827 event_data = FC_REG_DUMP_EVENT; 1828 shost = lpfc_shost_from_vport(vport); 1829 fc_host_post_vendor_event(shost, fc_get_event_number(), 1830 sizeof(event_data), (char *) &event_data, 1831 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1832 1833 lpfc_offline_eratt(phba); 1834 } 1835 return; 1836 } 1837 1838 /** 1839 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1840 * @phba: pointer to lpfc hba data structure. 1841 * @mbx_action: flag for mailbox shutdown action. 1842 * @en_rn_msg: send reset/port recovery message. 1843 * This routine is invoked to perform an SLI4 port PCI function reset in 1844 * response to port status register polling attention. It waits for port 1845 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1846 * During this process, interrupt vectors are freed and later requested 1847 * for handling possible port resource change. 1848 **/ 1849 static int 1850 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1851 bool en_rn_msg) 1852 { 1853 int rc; 1854 uint32_t intr_mode; 1855 LPFC_MBOXQ_t *mboxq; 1856 1857 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1858 LPFC_SLI_INTF_IF_TYPE_2) { 1859 /* 1860 * On error status condition, driver need to wait for port 1861 * ready before performing reset. 1862 */ 1863 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1864 if (rc) 1865 return rc; 1866 } 1867 1868 /* need reset: attempt for port recovery */ 1869 if (en_rn_msg) 1870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1871 "2887 Reset Needed: Attempting Port " 1872 "Recovery...\n"); 1873 1874 /* If we are no wait, the HBA has been reset and is not 1875 * functional, thus we should clear 1876 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1877 */ 1878 if (mbx_action == LPFC_MBX_NO_WAIT) { 1879 spin_lock_irq(&phba->hbalock); 1880 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1881 if (phba->sli.mbox_active) { 1882 mboxq = phba->sli.mbox_active; 1883 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1884 __lpfc_mbox_cmpl_put(phba, mboxq); 1885 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1886 phba->sli.mbox_active = NULL; 1887 } 1888 spin_unlock_irq(&phba->hbalock); 1889 } 1890 1891 lpfc_offline_prep(phba, mbx_action); 1892 lpfc_sli_flush_io_rings(phba); 1893 lpfc_offline(phba); 1894 /* release interrupt for possible resource change */ 1895 lpfc_sli4_disable_intr(phba); 1896 rc = lpfc_sli_brdrestart(phba); 1897 if (rc) { 1898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1899 "6309 Failed to restart board\n"); 1900 return rc; 1901 } 1902 /* request and enable interrupt */ 1903 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1904 if (intr_mode == LPFC_INTR_ERROR) { 1905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1906 "3175 Failed to enable interrupt\n"); 1907 return -EIO; 1908 } 1909 phba->intr_mode = intr_mode; 1910 rc = lpfc_online(phba); 1911 if (rc == 0) 1912 lpfc_unblock_mgmt_io(phba); 1913 1914 return rc; 1915 } 1916 1917 /** 1918 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1919 * @phba: pointer to lpfc hba data structure. 1920 * 1921 * This routine is invoked to handle the SLI4 HBA hardware error attention 1922 * conditions. 1923 **/ 1924 static void 1925 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1926 { 1927 struct lpfc_vport *vport = phba->pport; 1928 uint32_t event_data; 1929 struct Scsi_Host *shost; 1930 uint32_t if_type; 1931 struct lpfc_register portstat_reg = {0}; 1932 uint32_t reg_err1, reg_err2; 1933 uint32_t uerrlo_reg, uemasklo_reg; 1934 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1935 bool en_rn_msg = true; 1936 struct temp_event temp_event_data; 1937 struct lpfc_register portsmphr_reg; 1938 int rc, i; 1939 1940 /* If the pci channel is offline, ignore possible errors, since 1941 * we cannot communicate with the pci card anyway. 1942 */ 1943 if (pci_channel_offline(phba->pcidev)) { 1944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1945 "3166 pci channel is offline\n"); 1946 lpfc_sli4_offline_eratt(phba); 1947 return; 1948 } 1949 1950 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1951 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1952 switch (if_type) { 1953 case LPFC_SLI_INTF_IF_TYPE_0: 1954 pci_rd_rc1 = lpfc_readl( 1955 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1956 &uerrlo_reg); 1957 pci_rd_rc2 = lpfc_readl( 1958 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1959 &uemasklo_reg); 1960 /* consider PCI bus read error as pci_channel_offline */ 1961 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1962 return; 1963 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1964 lpfc_sli4_offline_eratt(phba); 1965 return; 1966 } 1967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1968 "7623 Checking UE recoverable"); 1969 1970 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1971 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1972 &portsmphr_reg.word0)) 1973 continue; 1974 1975 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1976 &portsmphr_reg); 1977 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1978 LPFC_PORT_SEM_UE_RECOVERABLE) 1979 break; 1980 /*Sleep for 1Sec, before checking SEMAPHORE */ 1981 msleep(1000); 1982 } 1983 1984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1985 "4827 smphr_port_status x%x : Waited %dSec", 1986 smphr_port_status, i); 1987 1988 /* Recoverable UE, reset the HBA device */ 1989 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1990 LPFC_PORT_SEM_UE_RECOVERABLE) { 1991 for (i = 0; i < 20; i++) { 1992 msleep(1000); 1993 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1994 &portsmphr_reg.word0) && 1995 (LPFC_POST_STAGE_PORT_READY == 1996 bf_get(lpfc_port_smphr_port_status, 1997 &portsmphr_reg))) { 1998 rc = lpfc_sli4_port_sta_fn_reset(phba, 1999 LPFC_MBX_NO_WAIT, en_rn_msg); 2000 if (rc == 0) 2001 return; 2002 lpfc_printf_log(phba, KERN_ERR, 2003 LOG_TRACE_EVENT, 2004 "4215 Failed to recover UE"); 2005 break; 2006 } 2007 } 2008 } 2009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2010 "7624 Firmware not ready: Failing UE recovery," 2011 " waited %dSec", i); 2012 phba->link_state = LPFC_HBA_ERROR; 2013 break; 2014 2015 case LPFC_SLI_INTF_IF_TYPE_2: 2016 case LPFC_SLI_INTF_IF_TYPE_6: 2017 pci_rd_rc1 = lpfc_readl( 2018 phba->sli4_hba.u.if_type2.STATUSregaddr, 2019 &portstat_reg.word0); 2020 /* consider PCI bus read error as pci_channel_offline */ 2021 if (pci_rd_rc1 == -EIO) { 2022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2023 "3151 PCI bus read access failure: x%x\n", 2024 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2025 lpfc_sli4_offline_eratt(phba); 2026 return; 2027 } 2028 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2029 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2030 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2032 "2889 Port Overtemperature event, " 2033 "taking port offline Data: x%x x%x\n", 2034 reg_err1, reg_err2); 2035 2036 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2037 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2038 temp_event_data.event_code = LPFC_CRIT_TEMP; 2039 temp_event_data.data = 0xFFFFFFFF; 2040 2041 shost = lpfc_shost_from_vport(phba->pport); 2042 fc_host_post_vendor_event(shost, fc_get_event_number(), 2043 sizeof(temp_event_data), 2044 (char *)&temp_event_data, 2045 SCSI_NL_VID_TYPE_PCI 2046 | PCI_VENDOR_ID_EMULEX); 2047 2048 spin_lock_irq(&phba->hbalock); 2049 phba->over_temp_state = HBA_OVER_TEMP; 2050 spin_unlock_irq(&phba->hbalock); 2051 lpfc_sli4_offline_eratt(phba); 2052 return; 2053 } 2054 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2055 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2057 "3143 Port Down: Firmware Update " 2058 "Detected\n"); 2059 en_rn_msg = false; 2060 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2061 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2063 "3144 Port Down: Debug Dump\n"); 2064 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2065 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2066 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2067 "3145 Port Down: Provisioning\n"); 2068 2069 /* If resets are disabled then leave the HBA alone and return */ 2070 if (!phba->cfg_enable_hba_reset) 2071 return; 2072 2073 /* Check port status register for function reset */ 2074 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2075 en_rn_msg); 2076 if (rc == 0) { 2077 /* don't report event on forced debug dump */ 2078 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2079 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2080 return; 2081 else 2082 break; 2083 } 2084 /* fall through for not able to recover */ 2085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2086 "3152 Unrecoverable error\n"); 2087 phba->link_state = LPFC_HBA_ERROR; 2088 break; 2089 case LPFC_SLI_INTF_IF_TYPE_1: 2090 default: 2091 break; 2092 } 2093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2094 "3123 Report dump event to upper layer\n"); 2095 /* Send an internal error event to mgmt application */ 2096 lpfc_board_errevt_to_mgmt(phba); 2097 2098 event_data = FC_REG_DUMP_EVENT; 2099 shost = lpfc_shost_from_vport(vport); 2100 fc_host_post_vendor_event(shost, fc_get_event_number(), 2101 sizeof(event_data), (char *) &event_data, 2102 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2103 } 2104 2105 /** 2106 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2107 * @phba: pointer to lpfc HBA data structure. 2108 * 2109 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2110 * routine from the API jump table function pointer from the lpfc_hba struct. 2111 * 2112 * Return codes 2113 * 0 - success. 2114 * Any other value - error. 2115 **/ 2116 void 2117 lpfc_handle_eratt(struct lpfc_hba *phba) 2118 { 2119 (*phba->lpfc_handle_eratt)(phba); 2120 } 2121 2122 /** 2123 * lpfc_handle_latt - The HBA link event handler 2124 * @phba: pointer to lpfc hba data structure. 2125 * 2126 * This routine is invoked from the worker thread to handle a HBA host 2127 * attention link event. SLI3 only. 2128 **/ 2129 void 2130 lpfc_handle_latt(struct lpfc_hba *phba) 2131 { 2132 struct lpfc_vport *vport = phba->pport; 2133 struct lpfc_sli *psli = &phba->sli; 2134 LPFC_MBOXQ_t *pmb; 2135 volatile uint32_t control; 2136 struct lpfc_dmabuf *mp; 2137 int rc = 0; 2138 2139 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2140 if (!pmb) { 2141 rc = 1; 2142 goto lpfc_handle_latt_err_exit; 2143 } 2144 2145 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2146 if (!mp) { 2147 rc = 2; 2148 goto lpfc_handle_latt_free_pmb; 2149 } 2150 2151 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2152 if (!mp->virt) { 2153 rc = 3; 2154 goto lpfc_handle_latt_free_mp; 2155 } 2156 2157 /* Cleanup any outstanding ELS commands */ 2158 lpfc_els_flush_all_cmd(phba); 2159 2160 psli->slistat.link_event++; 2161 lpfc_read_topology(phba, pmb, mp); 2162 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2163 pmb->vport = vport; 2164 /* Block ELS IOCBs until we have processed this mbox command */ 2165 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2166 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2167 if (rc == MBX_NOT_FINISHED) { 2168 rc = 4; 2169 goto lpfc_handle_latt_free_mbuf; 2170 } 2171 2172 /* Clear Link Attention in HA REG */ 2173 spin_lock_irq(&phba->hbalock); 2174 writel(HA_LATT, phba->HAregaddr); 2175 readl(phba->HAregaddr); /* flush */ 2176 spin_unlock_irq(&phba->hbalock); 2177 2178 return; 2179 2180 lpfc_handle_latt_free_mbuf: 2181 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2182 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2183 lpfc_handle_latt_free_mp: 2184 kfree(mp); 2185 lpfc_handle_latt_free_pmb: 2186 mempool_free(pmb, phba->mbox_mem_pool); 2187 lpfc_handle_latt_err_exit: 2188 /* Enable Link attention interrupts */ 2189 spin_lock_irq(&phba->hbalock); 2190 psli->sli_flag |= LPFC_PROCESS_LA; 2191 control = readl(phba->HCregaddr); 2192 control |= HC_LAINT_ENA; 2193 writel(control, phba->HCregaddr); 2194 readl(phba->HCregaddr); /* flush */ 2195 2196 /* Clear Link Attention in HA REG */ 2197 writel(HA_LATT, phba->HAregaddr); 2198 readl(phba->HAregaddr); /* flush */ 2199 spin_unlock_irq(&phba->hbalock); 2200 lpfc_linkdown(phba); 2201 phba->link_state = LPFC_HBA_ERROR; 2202 2203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2204 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2205 2206 return; 2207 } 2208 2209 /** 2210 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2211 * @phba: pointer to lpfc hba data structure. 2212 * @vpd: pointer to the vital product data. 2213 * @len: length of the vital product data in bytes. 2214 * 2215 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2216 * an array of characters. In this routine, the ModelName, ProgramType, and 2217 * ModelDesc, etc. fields of the phba data structure will be populated. 2218 * 2219 * Return codes 2220 * 0 - pointer to the VPD passed in is NULL 2221 * 1 - success 2222 **/ 2223 int 2224 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2225 { 2226 uint8_t lenlo, lenhi; 2227 int Length; 2228 int i, j; 2229 int finished = 0; 2230 int index = 0; 2231 2232 if (!vpd) 2233 return 0; 2234 2235 /* Vital Product */ 2236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2237 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2238 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2239 (uint32_t) vpd[3]); 2240 while (!finished && (index < (len - 4))) { 2241 switch (vpd[index]) { 2242 case 0x82: 2243 case 0x91: 2244 index += 1; 2245 lenlo = vpd[index]; 2246 index += 1; 2247 lenhi = vpd[index]; 2248 index += 1; 2249 i = ((((unsigned short)lenhi) << 8) + lenlo); 2250 index += i; 2251 break; 2252 case 0x90: 2253 index += 1; 2254 lenlo = vpd[index]; 2255 index += 1; 2256 lenhi = vpd[index]; 2257 index += 1; 2258 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2259 if (Length > len - index) 2260 Length = len - index; 2261 while (Length > 0) { 2262 /* Look for Serial Number */ 2263 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2264 index += 2; 2265 i = vpd[index]; 2266 index += 1; 2267 j = 0; 2268 Length -= (3+i); 2269 while(i--) { 2270 phba->SerialNumber[j++] = vpd[index++]; 2271 if (j == 31) 2272 break; 2273 } 2274 phba->SerialNumber[j] = 0; 2275 continue; 2276 } 2277 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2278 phba->vpd_flag |= VPD_MODEL_DESC; 2279 index += 2; 2280 i = vpd[index]; 2281 index += 1; 2282 j = 0; 2283 Length -= (3+i); 2284 while(i--) { 2285 phba->ModelDesc[j++] = vpd[index++]; 2286 if (j == 255) 2287 break; 2288 } 2289 phba->ModelDesc[j] = 0; 2290 continue; 2291 } 2292 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2293 phba->vpd_flag |= VPD_MODEL_NAME; 2294 index += 2; 2295 i = vpd[index]; 2296 index += 1; 2297 j = 0; 2298 Length -= (3+i); 2299 while(i--) { 2300 phba->ModelName[j++] = vpd[index++]; 2301 if (j == 79) 2302 break; 2303 } 2304 phba->ModelName[j] = 0; 2305 continue; 2306 } 2307 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2308 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2309 index += 2; 2310 i = vpd[index]; 2311 index += 1; 2312 j = 0; 2313 Length -= (3+i); 2314 while(i--) { 2315 phba->ProgramType[j++] = vpd[index++]; 2316 if (j == 255) 2317 break; 2318 } 2319 phba->ProgramType[j] = 0; 2320 continue; 2321 } 2322 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2323 phba->vpd_flag |= VPD_PORT; 2324 index += 2; 2325 i = vpd[index]; 2326 index += 1; 2327 j = 0; 2328 Length -= (3+i); 2329 while(i--) { 2330 if ((phba->sli_rev == LPFC_SLI_REV4) && 2331 (phba->sli4_hba.pport_name_sta == 2332 LPFC_SLI4_PPNAME_GET)) { 2333 j++; 2334 index++; 2335 } else 2336 phba->Port[j++] = vpd[index++]; 2337 if (j == 19) 2338 break; 2339 } 2340 if ((phba->sli_rev != LPFC_SLI_REV4) || 2341 (phba->sli4_hba.pport_name_sta == 2342 LPFC_SLI4_PPNAME_NON)) 2343 phba->Port[j] = 0; 2344 continue; 2345 } 2346 else { 2347 index += 2; 2348 i = vpd[index]; 2349 index += 1; 2350 index += i; 2351 Length -= (3 + i); 2352 } 2353 } 2354 finished = 0; 2355 break; 2356 case 0x78: 2357 finished = 1; 2358 break; 2359 default: 2360 index ++; 2361 break; 2362 } 2363 } 2364 2365 return(1); 2366 } 2367 2368 /** 2369 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2370 * @phba: pointer to lpfc hba data structure. 2371 * @mdp: pointer to the data structure to hold the derived model name. 2372 * @descp: pointer to the data structure to hold the derived description. 2373 * 2374 * This routine retrieves HBA's description based on its registered PCI device 2375 * ID. The @descp passed into this function points to an array of 256 chars. It 2376 * shall be returned with the model name, maximum speed, and the host bus type. 2377 * The @mdp passed into this function points to an array of 80 chars. When the 2378 * function returns, the @mdp will be filled with the model name. 2379 **/ 2380 static void 2381 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2382 { 2383 lpfc_vpd_t *vp; 2384 uint16_t dev_id = phba->pcidev->device; 2385 int max_speed; 2386 int GE = 0; 2387 int oneConnect = 0; /* default is not a oneConnect */ 2388 struct { 2389 char *name; 2390 char *bus; 2391 char *function; 2392 } m = {"<Unknown>", "", ""}; 2393 2394 if (mdp && mdp[0] != '\0' 2395 && descp && descp[0] != '\0') 2396 return; 2397 2398 if (phba->lmt & LMT_64Gb) 2399 max_speed = 64; 2400 else if (phba->lmt & LMT_32Gb) 2401 max_speed = 32; 2402 else if (phba->lmt & LMT_16Gb) 2403 max_speed = 16; 2404 else if (phba->lmt & LMT_10Gb) 2405 max_speed = 10; 2406 else if (phba->lmt & LMT_8Gb) 2407 max_speed = 8; 2408 else if (phba->lmt & LMT_4Gb) 2409 max_speed = 4; 2410 else if (phba->lmt & LMT_2Gb) 2411 max_speed = 2; 2412 else if (phba->lmt & LMT_1Gb) 2413 max_speed = 1; 2414 else 2415 max_speed = 0; 2416 2417 vp = &phba->vpd; 2418 2419 switch (dev_id) { 2420 case PCI_DEVICE_ID_FIREFLY: 2421 m = (typeof(m)){"LP6000", "PCI", 2422 "Obsolete, Unsupported Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_SUPERFLY: 2425 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2426 m = (typeof(m)){"LP7000", "PCI", ""}; 2427 else 2428 m = (typeof(m)){"LP7000E", "PCI", ""}; 2429 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2430 break; 2431 case PCI_DEVICE_ID_DRAGONFLY: 2432 m = (typeof(m)){"LP8000", "PCI", 2433 "Obsolete, Unsupported Fibre Channel Adapter"}; 2434 break; 2435 case PCI_DEVICE_ID_CENTAUR: 2436 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2437 m = (typeof(m)){"LP9002", "PCI", ""}; 2438 else 2439 m = (typeof(m)){"LP9000", "PCI", ""}; 2440 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2441 break; 2442 case PCI_DEVICE_ID_RFLY: 2443 m = (typeof(m)){"LP952", "PCI", 2444 "Obsolete, Unsupported Fibre Channel Adapter"}; 2445 break; 2446 case PCI_DEVICE_ID_PEGASUS: 2447 m = (typeof(m)){"LP9802", "PCI-X", 2448 "Obsolete, Unsupported Fibre Channel Adapter"}; 2449 break; 2450 case PCI_DEVICE_ID_THOR: 2451 m = (typeof(m)){"LP10000", "PCI-X", 2452 "Obsolete, Unsupported Fibre Channel Adapter"}; 2453 break; 2454 case PCI_DEVICE_ID_VIPER: 2455 m = (typeof(m)){"LPX1000", "PCI-X", 2456 "Obsolete, Unsupported Fibre Channel Adapter"}; 2457 break; 2458 case PCI_DEVICE_ID_PFLY: 2459 m = (typeof(m)){"LP982", "PCI-X", 2460 "Obsolete, Unsupported Fibre Channel Adapter"}; 2461 break; 2462 case PCI_DEVICE_ID_TFLY: 2463 m = (typeof(m)){"LP1050", "PCI-X", 2464 "Obsolete, Unsupported Fibre Channel Adapter"}; 2465 break; 2466 case PCI_DEVICE_ID_HELIOS: 2467 m = (typeof(m)){"LP11000", "PCI-X2", 2468 "Obsolete, Unsupported Fibre Channel Adapter"}; 2469 break; 2470 case PCI_DEVICE_ID_HELIOS_SCSP: 2471 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2472 "Obsolete, Unsupported Fibre Channel Adapter"}; 2473 break; 2474 case PCI_DEVICE_ID_HELIOS_DCSP: 2475 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2476 "Obsolete, Unsupported Fibre Channel Adapter"}; 2477 break; 2478 case PCI_DEVICE_ID_NEPTUNE: 2479 m = (typeof(m)){"LPe1000", "PCIe", 2480 "Obsolete, Unsupported Fibre Channel Adapter"}; 2481 break; 2482 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2483 m = (typeof(m)){"LPe1000-SP", "PCIe", 2484 "Obsolete, Unsupported Fibre Channel Adapter"}; 2485 break; 2486 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2487 m = (typeof(m)){"LPe1002-SP", "PCIe", 2488 "Obsolete, Unsupported Fibre Channel Adapter"}; 2489 break; 2490 case PCI_DEVICE_ID_BMID: 2491 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2492 break; 2493 case PCI_DEVICE_ID_BSMB: 2494 m = (typeof(m)){"LP111", "PCI-X2", 2495 "Obsolete, Unsupported Fibre Channel Adapter"}; 2496 break; 2497 case PCI_DEVICE_ID_ZEPHYR: 2498 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2499 break; 2500 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2501 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2502 break; 2503 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2504 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2505 GE = 1; 2506 break; 2507 case PCI_DEVICE_ID_ZMID: 2508 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2509 break; 2510 case PCI_DEVICE_ID_ZSMB: 2511 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2512 break; 2513 case PCI_DEVICE_ID_LP101: 2514 m = (typeof(m)){"LP101", "PCI-X", 2515 "Obsolete, Unsupported Fibre Channel Adapter"}; 2516 break; 2517 case PCI_DEVICE_ID_LP10000S: 2518 m = (typeof(m)){"LP10000-S", "PCI", 2519 "Obsolete, Unsupported Fibre Channel Adapter"}; 2520 break; 2521 case PCI_DEVICE_ID_LP11000S: 2522 m = (typeof(m)){"LP11000-S", "PCI-X2", 2523 "Obsolete, Unsupported Fibre Channel Adapter"}; 2524 break; 2525 case PCI_DEVICE_ID_LPE11000S: 2526 m = (typeof(m)){"LPe11000-S", "PCIe", 2527 "Obsolete, Unsupported Fibre Channel Adapter"}; 2528 break; 2529 case PCI_DEVICE_ID_SAT: 2530 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2531 break; 2532 case PCI_DEVICE_ID_SAT_MID: 2533 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2534 break; 2535 case PCI_DEVICE_ID_SAT_SMB: 2536 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2537 break; 2538 case PCI_DEVICE_ID_SAT_DCSP: 2539 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2540 break; 2541 case PCI_DEVICE_ID_SAT_SCSP: 2542 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2543 break; 2544 case PCI_DEVICE_ID_SAT_S: 2545 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2546 break; 2547 case PCI_DEVICE_ID_HORNET: 2548 m = (typeof(m)){"LP21000", "PCIe", 2549 "Obsolete, Unsupported FCoE Adapter"}; 2550 GE = 1; 2551 break; 2552 case PCI_DEVICE_ID_PROTEUS_VF: 2553 m = (typeof(m)){"LPev12000", "PCIe IOV", 2554 "Obsolete, Unsupported Fibre Channel Adapter"}; 2555 break; 2556 case PCI_DEVICE_ID_PROTEUS_PF: 2557 m = (typeof(m)){"LPev12000", "PCIe IOV", 2558 "Obsolete, Unsupported Fibre Channel Adapter"}; 2559 break; 2560 case PCI_DEVICE_ID_PROTEUS_S: 2561 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2562 "Obsolete, Unsupported Fibre Channel Adapter"}; 2563 break; 2564 case PCI_DEVICE_ID_TIGERSHARK: 2565 oneConnect = 1; 2566 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2567 break; 2568 case PCI_DEVICE_ID_TOMCAT: 2569 oneConnect = 1; 2570 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2571 break; 2572 case PCI_DEVICE_ID_FALCON: 2573 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2574 "EmulexSecure Fibre"}; 2575 break; 2576 case PCI_DEVICE_ID_BALIUS: 2577 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2578 "Obsolete, Unsupported Fibre Channel Adapter"}; 2579 break; 2580 case PCI_DEVICE_ID_LANCER_FC: 2581 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2582 break; 2583 case PCI_DEVICE_ID_LANCER_FC_VF: 2584 m = (typeof(m)){"LPe16000", "PCIe", 2585 "Obsolete, Unsupported Fibre Channel Adapter"}; 2586 break; 2587 case PCI_DEVICE_ID_LANCER_FCOE: 2588 oneConnect = 1; 2589 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2590 break; 2591 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2592 oneConnect = 1; 2593 m = (typeof(m)){"OCe15100", "PCIe", 2594 "Obsolete, Unsupported FCoE"}; 2595 break; 2596 case PCI_DEVICE_ID_LANCER_G6_FC: 2597 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2598 break; 2599 case PCI_DEVICE_ID_LANCER_G7_FC: 2600 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2601 break; 2602 case PCI_DEVICE_ID_LANCER_G7P_FC: 2603 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2604 break; 2605 case PCI_DEVICE_ID_SKYHAWK: 2606 case PCI_DEVICE_ID_SKYHAWK_VF: 2607 oneConnect = 1; 2608 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2609 break; 2610 default: 2611 m = (typeof(m)){"Unknown", "", ""}; 2612 break; 2613 } 2614 2615 if (mdp && mdp[0] == '\0') 2616 snprintf(mdp, 79,"%s", m.name); 2617 /* 2618 * oneConnect hba requires special processing, they are all initiators 2619 * and we put the port number on the end 2620 */ 2621 if (descp && descp[0] == '\0') { 2622 if (oneConnect) 2623 snprintf(descp, 255, 2624 "Emulex OneConnect %s, %s Initiator %s", 2625 m.name, m.function, 2626 phba->Port); 2627 else if (max_speed == 0) 2628 snprintf(descp, 255, 2629 "Emulex %s %s %s", 2630 m.name, m.bus, m.function); 2631 else 2632 snprintf(descp, 255, 2633 "Emulex %s %d%s %s %s", 2634 m.name, max_speed, (GE) ? "GE" : "Gb", 2635 m.bus, m.function); 2636 } 2637 } 2638 2639 /** 2640 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2641 * @phba: pointer to lpfc hba data structure. 2642 * @pring: pointer to a IOCB ring. 2643 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2644 * 2645 * This routine posts a given number of IOCBs with the associated DMA buffer 2646 * descriptors specified by the cnt argument to the given IOCB ring. 2647 * 2648 * Return codes 2649 * The number of IOCBs NOT able to be posted to the IOCB ring. 2650 **/ 2651 int 2652 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2653 { 2654 IOCB_t *icmd; 2655 struct lpfc_iocbq *iocb; 2656 struct lpfc_dmabuf *mp1, *mp2; 2657 2658 cnt += pring->missbufcnt; 2659 2660 /* While there are buffers to post */ 2661 while (cnt > 0) { 2662 /* Allocate buffer for command iocb */ 2663 iocb = lpfc_sli_get_iocbq(phba); 2664 if (iocb == NULL) { 2665 pring->missbufcnt = cnt; 2666 return cnt; 2667 } 2668 icmd = &iocb->iocb; 2669 2670 /* 2 buffers can be posted per command */ 2671 /* Allocate buffer to post */ 2672 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2673 if (mp1) 2674 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2675 if (!mp1 || !mp1->virt) { 2676 kfree(mp1); 2677 lpfc_sli_release_iocbq(phba, iocb); 2678 pring->missbufcnt = cnt; 2679 return cnt; 2680 } 2681 2682 INIT_LIST_HEAD(&mp1->list); 2683 /* Allocate buffer to post */ 2684 if (cnt > 1) { 2685 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2686 if (mp2) 2687 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2688 &mp2->phys); 2689 if (!mp2 || !mp2->virt) { 2690 kfree(mp2); 2691 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2692 kfree(mp1); 2693 lpfc_sli_release_iocbq(phba, iocb); 2694 pring->missbufcnt = cnt; 2695 return cnt; 2696 } 2697 2698 INIT_LIST_HEAD(&mp2->list); 2699 } else { 2700 mp2 = NULL; 2701 } 2702 2703 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2704 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2705 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2706 icmd->ulpBdeCount = 1; 2707 cnt--; 2708 if (mp2) { 2709 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2710 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2711 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2712 cnt--; 2713 icmd->ulpBdeCount = 2; 2714 } 2715 2716 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2717 icmd->ulpLe = 1; 2718 2719 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2720 IOCB_ERROR) { 2721 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2722 kfree(mp1); 2723 cnt++; 2724 if (mp2) { 2725 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2726 kfree(mp2); 2727 cnt++; 2728 } 2729 lpfc_sli_release_iocbq(phba, iocb); 2730 pring->missbufcnt = cnt; 2731 return cnt; 2732 } 2733 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2734 if (mp2) 2735 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2736 } 2737 pring->missbufcnt = 0; 2738 return 0; 2739 } 2740 2741 /** 2742 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2743 * @phba: pointer to lpfc hba data structure. 2744 * 2745 * This routine posts initial receive IOCB buffers to the ELS ring. The 2746 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2747 * set to 64 IOCBs. SLI3 only. 2748 * 2749 * Return codes 2750 * 0 - success (currently always success) 2751 **/ 2752 static int 2753 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2754 { 2755 struct lpfc_sli *psli = &phba->sli; 2756 2757 /* Ring 0, ELS / CT buffers */ 2758 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2759 /* Ring 2 - FCP no buffers needed */ 2760 2761 return 0; 2762 } 2763 2764 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2765 2766 /** 2767 * lpfc_sha_init - Set up initial array of hash table entries 2768 * @HashResultPointer: pointer to an array as hash table. 2769 * 2770 * This routine sets up the initial values to the array of hash table entries 2771 * for the LC HBAs. 2772 **/ 2773 static void 2774 lpfc_sha_init(uint32_t * HashResultPointer) 2775 { 2776 HashResultPointer[0] = 0x67452301; 2777 HashResultPointer[1] = 0xEFCDAB89; 2778 HashResultPointer[2] = 0x98BADCFE; 2779 HashResultPointer[3] = 0x10325476; 2780 HashResultPointer[4] = 0xC3D2E1F0; 2781 } 2782 2783 /** 2784 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2785 * @HashResultPointer: pointer to an initial/result hash table. 2786 * @HashWorkingPointer: pointer to an working hash table. 2787 * 2788 * This routine iterates an initial hash table pointed by @HashResultPointer 2789 * with the values from the working hash table pointeed by @HashWorkingPointer. 2790 * The results are putting back to the initial hash table, returned through 2791 * the @HashResultPointer as the result hash table. 2792 **/ 2793 static void 2794 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2795 { 2796 int t; 2797 uint32_t TEMP; 2798 uint32_t A, B, C, D, E; 2799 t = 16; 2800 do { 2801 HashWorkingPointer[t] = 2802 S(1, 2803 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2804 8] ^ 2805 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2806 } while (++t <= 79); 2807 t = 0; 2808 A = HashResultPointer[0]; 2809 B = HashResultPointer[1]; 2810 C = HashResultPointer[2]; 2811 D = HashResultPointer[3]; 2812 E = HashResultPointer[4]; 2813 2814 do { 2815 if (t < 20) { 2816 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2817 } else if (t < 40) { 2818 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2819 } else if (t < 60) { 2820 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2821 } else { 2822 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2823 } 2824 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2825 E = D; 2826 D = C; 2827 C = S(30, B); 2828 B = A; 2829 A = TEMP; 2830 } while (++t <= 79); 2831 2832 HashResultPointer[0] += A; 2833 HashResultPointer[1] += B; 2834 HashResultPointer[2] += C; 2835 HashResultPointer[3] += D; 2836 HashResultPointer[4] += E; 2837 2838 } 2839 2840 /** 2841 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2842 * @RandomChallenge: pointer to the entry of host challenge random number array. 2843 * @HashWorking: pointer to the entry of the working hash array. 2844 * 2845 * This routine calculates the working hash array referred by @HashWorking 2846 * from the challenge random numbers associated with the host, referred by 2847 * @RandomChallenge. The result is put into the entry of the working hash 2848 * array and returned by reference through @HashWorking. 2849 **/ 2850 static void 2851 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2852 { 2853 *HashWorking = (*RandomChallenge ^ *HashWorking); 2854 } 2855 2856 /** 2857 * lpfc_hba_init - Perform special handling for LC HBA initialization 2858 * @phba: pointer to lpfc hba data structure. 2859 * @hbainit: pointer to an array of unsigned 32-bit integers. 2860 * 2861 * This routine performs the special handling for LC HBA initialization. 2862 **/ 2863 void 2864 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2865 { 2866 int t; 2867 uint32_t *HashWorking; 2868 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2869 2870 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2871 if (!HashWorking) 2872 return; 2873 2874 HashWorking[0] = HashWorking[78] = *pwwnn++; 2875 HashWorking[1] = HashWorking[79] = *pwwnn; 2876 2877 for (t = 0; t < 7; t++) 2878 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2879 2880 lpfc_sha_init(hbainit); 2881 lpfc_sha_iterate(hbainit, HashWorking); 2882 kfree(HashWorking); 2883 } 2884 2885 /** 2886 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2887 * @vport: pointer to a virtual N_Port data structure. 2888 * 2889 * This routine performs the necessary cleanups before deleting the @vport. 2890 * It invokes the discovery state machine to perform necessary state 2891 * transitions and to release the ndlps associated with the @vport. Note, 2892 * the physical port is treated as @vport 0. 2893 **/ 2894 void 2895 lpfc_cleanup(struct lpfc_vport *vport) 2896 { 2897 struct lpfc_hba *phba = vport->phba; 2898 struct lpfc_nodelist *ndlp, *next_ndlp; 2899 int i = 0; 2900 2901 if (phba->link_state > LPFC_LINK_DOWN) 2902 lpfc_port_link_failure(vport); 2903 2904 /* Clean up VMID resources */ 2905 if (lpfc_is_vmid_enabled(phba)) 2906 lpfc_vmid_vport_cleanup(vport); 2907 2908 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2909 if (vport->port_type != LPFC_PHYSICAL_PORT && 2910 ndlp->nlp_DID == Fabric_DID) { 2911 /* Just free up ndlp with Fabric_DID for vports */ 2912 lpfc_nlp_put(ndlp); 2913 continue; 2914 } 2915 2916 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2917 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2918 lpfc_nlp_put(ndlp); 2919 continue; 2920 } 2921 2922 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2923 * DEVICE_RM event. 2924 */ 2925 if (ndlp->nlp_type & NLP_FABRIC && 2926 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2927 lpfc_disc_state_machine(vport, ndlp, NULL, 2928 NLP_EVT_DEVICE_RECOVERY); 2929 2930 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2931 lpfc_disc_state_machine(vport, ndlp, NULL, 2932 NLP_EVT_DEVICE_RM); 2933 } 2934 2935 /* At this point, ALL ndlp's should be gone 2936 * because of the previous NLP_EVT_DEVICE_RM. 2937 * Lets wait for this to happen, if needed. 2938 */ 2939 while (!list_empty(&vport->fc_nodes)) { 2940 if (i++ > 3000) { 2941 lpfc_printf_vlog(vport, KERN_ERR, 2942 LOG_TRACE_EVENT, 2943 "0233 Nodelist not empty\n"); 2944 list_for_each_entry_safe(ndlp, next_ndlp, 2945 &vport->fc_nodes, nlp_listp) { 2946 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2947 LOG_TRACE_EVENT, 2948 "0282 did:x%x ndlp:x%px " 2949 "refcnt:%d xflags x%x nflag x%x\n", 2950 ndlp->nlp_DID, (void *)ndlp, 2951 kref_read(&ndlp->kref), 2952 ndlp->fc4_xpt_flags, 2953 ndlp->nlp_flag); 2954 } 2955 break; 2956 } 2957 2958 /* Wait for any activity on ndlps to settle */ 2959 msleep(10); 2960 } 2961 lpfc_cleanup_vports_rrqs(vport, NULL); 2962 } 2963 2964 /** 2965 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2966 * @vport: pointer to a virtual N_Port data structure. 2967 * 2968 * This routine stops all the timers associated with a @vport. This function 2969 * is invoked before disabling or deleting a @vport. Note that the physical 2970 * port is treated as @vport 0. 2971 **/ 2972 void 2973 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2974 { 2975 del_timer_sync(&vport->els_tmofunc); 2976 del_timer_sync(&vport->delayed_disc_tmo); 2977 lpfc_can_disctmo(vport); 2978 return; 2979 } 2980 2981 /** 2982 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2983 * @phba: pointer to lpfc hba data structure. 2984 * 2985 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2986 * caller of this routine should already hold the host lock. 2987 **/ 2988 void 2989 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2990 { 2991 /* Clear pending FCF rediscovery wait flag */ 2992 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2993 2994 /* Now, try to stop the timer */ 2995 del_timer(&phba->fcf.redisc_wait); 2996 } 2997 2998 /** 2999 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3000 * @phba: pointer to lpfc hba data structure. 3001 * 3002 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3003 * checks whether the FCF rediscovery wait timer is pending with the host 3004 * lock held before proceeding with disabling the timer and clearing the 3005 * wait timer pendig flag. 3006 **/ 3007 void 3008 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3009 { 3010 spin_lock_irq(&phba->hbalock); 3011 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3012 /* FCF rediscovery timer already fired or stopped */ 3013 spin_unlock_irq(&phba->hbalock); 3014 return; 3015 } 3016 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3017 /* Clear failover in progress flags */ 3018 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3019 spin_unlock_irq(&phba->hbalock); 3020 } 3021 3022 /** 3023 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3024 * @phba: pointer to lpfc hba data structure. 3025 * 3026 * This routine stops all the timers associated with a HBA. This function is 3027 * invoked before either putting a HBA offline or unloading the driver. 3028 **/ 3029 void 3030 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3031 { 3032 if (phba->pport) 3033 lpfc_stop_vport_timers(phba->pport); 3034 cancel_delayed_work_sync(&phba->eq_delay_work); 3035 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3036 del_timer_sync(&phba->sli.mbox_tmo); 3037 del_timer_sync(&phba->fabric_block_timer); 3038 del_timer_sync(&phba->eratt_poll); 3039 del_timer_sync(&phba->hb_tmofunc); 3040 if (phba->sli_rev == LPFC_SLI_REV4) { 3041 del_timer_sync(&phba->rrq_tmr); 3042 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3043 } 3044 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3045 3046 switch (phba->pci_dev_grp) { 3047 case LPFC_PCI_DEV_LP: 3048 /* Stop any LightPulse device specific driver timers */ 3049 del_timer_sync(&phba->fcp_poll_timer); 3050 break; 3051 case LPFC_PCI_DEV_OC: 3052 /* Stop any OneConnect device specific driver timers */ 3053 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3054 break; 3055 default: 3056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3057 "0297 Invalid device group (x%x)\n", 3058 phba->pci_dev_grp); 3059 break; 3060 } 3061 return; 3062 } 3063 3064 /** 3065 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3066 * @phba: pointer to lpfc hba data structure. 3067 * @mbx_action: flag for mailbox no wait action. 3068 * 3069 * This routine marks a HBA's management interface as blocked. Once the HBA's 3070 * management interface is marked as blocked, all the user space access to 3071 * the HBA, whether they are from sysfs interface or libdfc interface will 3072 * all be blocked. The HBA is set to block the management interface when the 3073 * driver prepares the HBA interface for online or offline. 3074 **/ 3075 static void 3076 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3077 { 3078 unsigned long iflag; 3079 uint8_t actcmd = MBX_HEARTBEAT; 3080 unsigned long timeout; 3081 3082 spin_lock_irqsave(&phba->hbalock, iflag); 3083 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3084 spin_unlock_irqrestore(&phba->hbalock, iflag); 3085 if (mbx_action == LPFC_MBX_NO_WAIT) 3086 return; 3087 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3088 spin_lock_irqsave(&phba->hbalock, iflag); 3089 if (phba->sli.mbox_active) { 3090 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3091 /* Determine how long we might wait for the active mailbox 3092 * command to be gracefully completed by firmware. 3093 */ 3094 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3095 phba->sli.mbox_active) * 1000) + jiffies; 3096 } 3097 spin_unlock_irqrestore(&phba->hbalock, iflag); 3098 3099 /* Wait for the outstnading mailbox command to complete */ 3100 while (phba->sli.mbox_active) { 3101 /* Check active mailbox complete status every 2ms */ 3102 msleep(2); 3103 if (time_after(jiffies, timeout)) { 3104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3105 "2813 Mgmt IO is Blocked %x " 3106 "- mbox cmd %x still active\n", 3107 phba->sli.sli_flag, actcmd); 3108 break; 3109 } 3110 } 3111 } 3112 3113 /** 3114 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3115 * @phba: pointer to lpfc hba data structure. 3116 * 3117 * Allocate RPIs for all active remote nodes. This is needed whenever 3118 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3119 * is to fixup the temporary rpi assignments. 3120 **/ 3121 void 3122 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3123 { 3124 struct lpfc_nodelist *ndlp, *next_ndlp; 3125 struct lpfc_vport **vports; 3126 int i, rpi; 3127 3128 if (phba->sli_rev != LPFC_SLI_REV4) 3129 return; 3130 3131 vports = lpfc_create_vport_work_array(phba); 3132 if (vports == NULL) 3133 return; 3134 3135 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3136 if (vports[i]->load_flag & FC_UNLOADING) 3137 continue; 3138 3139 list_for_each_entry_safe(ndlp, next_ndlp, 3140 &vports[i]->fc_nodes, 3141 nlp_listp) { 3142 rpi = lpfc_sli4_alloc_rpi(phba); 3143 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3144 /* TODO print log? */ 3145 continue; 3146 } 3147 ndlp->nlp_rpi = rpi; 3148 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3149 LOG_NODE | LOG_DISCOVERY, 3150 "0009 Assign RPI x%x to ndlp x%px " 3151 "DID:x%06x flg:x%x\n", 3152 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3153 ndlp->nlp_flag); 3154 } 3155 } 3156 lpfc_destroy_vport_work_array(phba, vports); 3157 } 3158 3159 /** 3160 * lpfc_create_expedite_pool - create expedite pool 3161 * @phba: pointer to lpfc hba data structure. 3162 * 3163 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3164 * to expedite pool. Mark them as expedite. 3165 **/ 3166 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3167 { 3168 struct lpfc_sli4_hdw_queue *qp; 3169 struct lpfc_io_buf *lpfc_ncmd; 3170 struct lpfc_io_buf *lpfc_ncmd_next; 3171 struct lpfc_epd_pool *epd_pool; 3172 unsigned long iflag; 3173 3174 epd_pool = &phba->epd_pool; 3175 qp = &phba->sli4_hba.hdwq[0]; 3176 3177 spin_lock_init(&epd_pool->lock); 3178 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3179 spin_lock(&epd_pool->lock); 3180 INIT_LIST_HEAD(&epd_pool->list); 3181 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3182 &qp->lpfc_io_buf_list_put, list) { 3183 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3184 lpfc_ncmd->expedite = true; 3185 qp->put_io_bufs--; 3186 epd_pool->count++; 3187 if (epd_pool->count >= XRI_BATCH) 3188 break; 3189 } 3190 spin_unlock(&epd_pool->lock); 3191 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3192 } 3193 3194 /** 3195 * lpfc_destroy_expedite_pool - destroy expedite pool 3196 * @phba: pointer to lpfc hba data structure. 3197 * 3198 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3199 * of HWQ 0. Clear the mark. 3200 **/ 3201 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3202 { 3203 struct lpfc_sli4_hdw_queue *qp; 3204 struct lpfc_io_buf *lpfc_ncmd; 3205 struct lpfc_io_buf *lpfc_ncmd_next; 3206 struct lpfc_epd_pool *epd_pool; 3207 unsigned long iflag; 3208 3209 epd_pool = &phba->epd_pool; 3210 qp = &phba->sli4_hba.hdwq[0]; 3211 3212 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3213 spin_lock(&epd_pool->lock); 3214 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3215 &epd_pool->list, list) { 3216 list_move_tail(&lpfc_ncmd->list, 3217 &qp->lpfc_io_buf_list_put); 3218 lpfc_ncmd->flags = false; 3219 qp->put_io_bufs++; 3220 epd_pool->count--; 3221 } 3222 spin_unlock(&epd_pool->lock); 3223 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3224 } 3225 3226 /** 3227 * lpfc_create_multixri_pools - create multi-XRI pools 3228 * @phba: pointer to lpfc hba data structure. 3229 * 3230 * This routine initialize public, private per HWQ. Then, move XRIs from 3231 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3232 * Initialized. 3233 **/ 3234 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3235 { 3236 u32 i, j; 3237 u32 hwq_count; 3238 u32 count_per_hwq; 3239 struct lpfc_io_buf *lpfc_ncmd; 3240 struct lpfc_io_buf *lpfc_ncmd_next; 3241 unsigned long iflag; 3242 struct lpfc_sli4_hdw_queue *qp; 3243 struct lpfc_multixri_pool *multixri_pool; 3244 struct lpfc_pbl_pool *pbl_pool; 3245 struct lpfc_pvt_pool *pvt_pool; 3246 3247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3248 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3249 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3250 phba->sli4_hba.io_xri_cnt); 3251 3252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3253 lpfc_create_expedite_pool(phba); 3254 3255 hwq_count = phba->cfg_hdw_queue; 3256 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3257 3258 for (i = 0; i < hwq_count; i++) { 3259 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3260 3261 if (!multixri_pool) { 3262 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3263 "1238 Failed to allocate memory for " 3264 "multixri_pool\n"); 3265 3266 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3267 lpfc_destroy_expedite_pool(phba); 3268 3269 j = 0; 3270 while (j < i) { 3271 qp = &phba->sli4_hba.hdwq[j]; 3272 kfree(qp->p_multixri_pool); 3273 j++; 3274 } 3275 phba->cfg_xri_rebalancing = 0; 3276 return; 3277 } 3278 3279 qp = &phba->sli4_hba.hdwq[i]; 3280 qp->p_multixri_pool = multixri_pool; 3281 3282 multixri_pool->xri_limit = count_per_hwq; 3283 multixri_pool->rrb_next_hwqid = i; 3284 3285 /* Deal with public free xri pool */ 3286 pbl_pool = &multixri_pool->pbl_pool; 3287 spin_lock_init(&pbl_pool->lock); 3288 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3289 spin_lock(&pbl_pool->lock); 3290 INIT_LIST_HEAD(&pbl_pool->list); 3291 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3292 &qp->lpfc_io_buf_list_put, list) { 3293 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3294 qp->put_io_bufs--; 3295 pbl_pool->count++; 3296 } 3297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3298 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3299 pbl_pool->count, i); 3300 spin_unlock(&pbl_pool->lock); 3301 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3302 3303 /* Deal with private free xri pool */ 3304 pvt_pool = &multixri_pool->pvt_pool; 3305 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3306 pvt_pool->low_watermark = XRI_BATCH; 3307 spin_lock_init(&pvt_pool->lock); 3308 spin_lock_irqsave(&pvt_pool->lock, iflag); 3309 INIT_LIST_HEAD(&pvt_pool->list); 3310 pvt_pool->count = 0; 3311 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3312 } 3313 } 3314 3315 /** 3316 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3317 * @phba: pointer to lpfc hba data structure. 3318 * 3319 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3320 **/ 3321 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3322 { 3323 u32 i; 3324 u32 hwq_count; 3325 struct lpfc_io_buf *lpfc_ncmd; 3326 struct lpfc_io_buf *lpfc_ncmd_next; 3327 unsigned long iflag; 3328 struct lpfc_sli4_hdw_queue *qp; 3329 struct lpfc_multixri_pool *multixri_pool; 3330 struct lpfc_pbl_pool *pbl_pool; 3331 struct lpfc_pvt_pool *pvt_pool; 3332 3333 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3334 lpfc_destroy_expedite_pool(phba); 3335 3336 if (!(phba->pport->load_flag & FC_UNLOADING)) 3337 lpfc_sli_flush_io_rings(phba); 3338 3339 hwq_count = phba->cfg_hdw_queue; 3340 3341 for (i = 0; i < hwq_count; i++) { 3342 qp = &phba->sli4_hba.hdwq[i]; 3343 multixri_pool = qp->p_multixri_pool; 3344 if (!multixri_pool) 3345 continue; 3346 3347 qp->p_multixri_pool = NULL; 3348 3349 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3350 3351 /* Deal with public free xri pool */ 3352 pbl_pool = &multixri_pool->pbl_pool; 3353 spin_lock(&pbl_pool->lock); 3354 3355 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3356 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3357 pbl_pool->count, i); 3358 3359 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3360 &pbl_pool->list, list) { 3361 list_move_tail(&lpfc_ncmd->list, 3362 &qp->lpfc_io_buf_list_put); 3363 qp->put_io_bufs++; 3364 pbl_pool->count--; 3365 } 3366 3367 INIT_LIST_HEAD(&pbl_pool->list); 3368 pbl_pool->count = 0; 3369 3370 spin_unlock(&pbl_pool->lock); 3371 3372 /* Deal with private free xri pool */ 3373 pvt_pool = &multixri_pool->pvt_pool; 3374 spin_lock(&pvt_pool->lock); 3375 3376 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3377 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3378 pvt_pool->count, i); 3379 3380 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3381 &pvt_pool->list, list) { 3382 list_move_tail(&lpfc_ncmd->list, 3383 &qp->lpfc_io_buf_list_put); 3384 qp->put_io_bufs++; 3385 pvt_pool->count--; 3386 } 3387 3388 INIT_LIST_HEAD(&pvt_pool->list); 3389 pvt_pool->count = 0; 3390 3391 spin_unlock(&pvt_pool->lock); 3392 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3393 3394 kfree(multixri_pool); 3395 } 3396 } 3397 3398 /** 3399 * lpfc_online - Initialize and bring a HBA online 3400 * @phba: pointer to lpfc hba data structure. 3401 * 3402 * This routine initializes the HBA and brings a HBA online. During this 3403 * process, the management interface is blocked to prevent user space access 3404 * to the HBA interfering with the driver initialization. 3405 * 3406 * Return codes 3407 * 0 - successful 3408 * 1 - failed 3409 **/ 3410 int 3411 lpfc_online(struct lpfc_hba *phba) 3412 { 3413 struct lpfc_vport *vport; 3414 struct lpfc_vport **vports; 3415 int i, error = 0; 3416 bool vpis_cleared = false; 3417 3418 if (!phba) 3419 return 0; 3420 vport = phba->pport; 3421 3422 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3423 return 0; 3424 3425 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3426 "0458 Bring Adapter online\n"); 3427 3428 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3429 3430 if (phba->sli_rev == LPFC_SLI_REV4) { 3431 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3432 lpfc_unblock_mgmt_io(phba); 3433 return 1; 3434 } 3435 spin_lock_irq(&phba->hbalock); 3436 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3437 vpis_cleared = true; 3438 spin_unlock_irq(&phba->hbalock); 3439 3440 /* Reestablish the local initiator port. 3441 * The offline process destroyed the previous lport. 3442 */ 3443 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3444 !phba->nvmet_support) { 3445 error = lpfc_nvme_create_localport(phba->pport); 3446 if (error) 3447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3448 "6132 NVME restore reg failed " 3449 "on nvmei error x%x\n", error); 3450 } 3451 } else { 3452 lpfc_sli_queue_init(phba); 3453 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3454 lpfc_unblock_mgmt_io(phba); 3455 return 1; 3456 } 3457 } 3458 3459 vports = lpfc_create_vport_work_array(phba); 3460 if (vports != NULL) { 3461 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3462 struct Scsi_Host *shost; 3463 shost = lpfc_shost_from_vport(vports[i]); 3464 spin_lock_irq(shost->host_lock); 3465 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3466 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3467 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3468 if (phba->sli_rev == LPFC_SLI_REV4) { 3469 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3470 if ((vpis_cleared) && 3471 (vports[i]->port_type != 3472 LPFC_PHYSICAL_PORT)) 3473 vports[i]->vpi = 0; 3474 } 3475 spin_unlock_irq(shost->host_lock); 3476 } 3477 } 3478 lpfc_destroy_vport_work_array(phba, vports); 3479 3480 if (phba->cfg_xri_rebalancing) 3481 lpfc_create_multixri_pools(phba); 3482 3483 lpfc_cpuhp_add(phba); 3484 3485 lpfc_unblock_mgmt_io(phba); 3486 return 0; 3487 } 3488 3489 /** 3490 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3491 * @phba: pointer to lpfc hba data structure. 3492 * 3493 * This routine marks a HBA's management interface as not blocked. Once the 3494 * HBA's management interface is marked as not blocked, all the user space 3495 * access to the HBA, whether they are from sysfs interface or libdfc 3496 * interface will be allowed. The HBA is set to block the management interface 3497 * when the driver prepares the HBA interface for online or offline and then 3498 * set to unblock the management interface afterwards. 3499 **/ 3500 void 3501 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3502 { 3503 unsigned long iflag; 3504 3505 spin_lock_irqsave(&phba->hbalock, iflag); 3506 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3507 spin_unlock_irqrestore(&phba->hbalock, iflag); 3508 } 3509 3510 /** 3511 * lpfc_offline_prep - Prepare a HBA to be brought offline 3512 * @phba: pointer to lpfc hba data structure. 3513 * @mbx_action: flag for mailbox shutdown action. 3514 * 3515 * This routine is invoked to prepare a HBA to be brought offline. It performs 3516 * unregistration login to all the nodes on all vports and flushes the mailbox 3517 * queue to make it ready to be brought offline. 3518 **/ 3519 void 3520 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3521 { 3522 struct lpfc_vport *vport = phba->pport; 3523 struct lpfc_nodelist *ndlp, *next_ndlp; 3524 struct lpfc_vport **vports; 3525 struct Scsi_Host *shost; 3526 int i; 3527 3528 if (vport->fc_flag & FC_OFFLINE_MODE) 3529 return; 3530 3531 lpfc_block_mgmt_io(phba, mbx_action); 3532 3533 lpfc_linkdown(phba); 3534 3535 /* Issue an unreg_login to all nodes on all vports */ 3536 vports = lpfc_create_vport_work_array(phba); 3537 if (vports != NULL) { 3538 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3539 if (vports[i]->load_flag & FC_UNLOADING) 3540 continue; 3541 shost = lpfc_shost_from_vport(vports[i]); 3542 spin_lock_irq(shost->host_lock); 3543 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3544 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3545 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3546 spin_unlock_irq(shost->host_lock); 3547 3548 shost = lpfc_shost_from_vport(vports[i]); 3549 list_for_each_entry_safe(ndlp, next_ndlp, 3550 &vports[i]->fc_nodes, 3551 nlp_listp) { 3552 3553 spin_lock_irq(&ndlp->lock); 3554 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3555 spin_unlock_irq(&ndlp->lock); 3556 3557 lpfc_unreg_rpi(vports[i], ndlp); 3558 /* 3559 * Whenever an SLI4 port goes offline, free the 3560 * RPI. Get a new RPI when the adapter port 3561 * comes back online. 3562 */ 3563 if (phba->sli_rev == LPFC_SLI_REV4) { 3564 lpfc_printf_vlog(vports[i], KERN_INFO, 3565 LOG_NODE | LOG_DISCOVERY, 3566 "0011 Free RPI x%x on " 3567 "ndlp: x%px did x%x\n", 3568 ndlp->nlp_rpi, ndlp, 3569 ndlp->nlp_DID); 3570 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3571 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3572 } 3573 3574 if (ndlp->nlp_type & NLP_FABRIC) { 3575 lpfc_disc_state_machine(vports[i], ndlp, 3576 NULL, NLP_EVT_DEVICE_RECOVERY); 3577 3578 /* Don't remove the node unless the 3579 * has been unregistered with the 3580 * transport. If so, let dev_loss 3581 * take care of the node. 3582 */ 3583 if (!(ndlp->fc4_xpt_flags & 3584 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3585 lpfc_disc_state_machine 3586 (vports[i], ndlp, 3587 NULL, 3588 NLP_EVT_DEVICE_RM); 3589 } 3590 } 3591 } 3592 } 3593 lpfc_destroy_vport_work_array(phba, vports); 3594 3595 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3596 3597 if (phba->wq) 3598 flush_workqueue(phba->wq); 3599 } 3600 3601 /** 3602 * lpfc_offline - Bring a HBA offline 3603 * @phba: pointer to lpfc hba data structure. 3604 * 3605 * This routine actually brings a HBA offline. It stops all the timers 3606 * associated with the HBA, brings down the SLI layer, and eventually 3607 * marks the HBA as in offline state for the upper layer protocol. 3608 **/ 3609 void 3610 lpfc_offline(struct lpfc_hba *phba) 3611 { 3612 struct Scsi_Host *shost; 3613 struct lpfc_vport **vports; 3614 int i; 3615 3616 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3617 return; 3618 3619 /* stop port and all timers associated with this hba */ 3620 lpfc_stop_port(phba); 3621 3622 /* Tear down the local and target port registrations. The 3623 * nvme transports need to cleanup. 3624 */ 3625 lpfc_nvmet_destroy_targetport(phba); 3626 lpfc_nvme_destroy_localport(phba->pport); 3627 3628 vports = lpfc_create_vport_work_array(phba); 3629 if (vports != NULL) 3630 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3631 lpfc_stop_vport_timers(vports[i]); 3632 lpfc_destroy_vport_work_array(phba, vports); 3633 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3634 "0460 Bring Adapter offline\n"); 3635 /* Bring down the SLI Layer and cleanup. The HBA is offline 3636 now. */ 3637 lpfc_sli_hba_down(phba); 3638 spin_lock_irq(&phba->hbalock); 3639 phba->work_ha = 0; 3640 spin_unlock_irq(&phba->hbalock); 3641 vports = lpfc_create_vport_work_array(phba); 3642 if (vports != NULL) 3643 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3644 shost = lpfc_shost_from_vport(vports[i]); 3645 spin_lock_irq(shost->host_lock); 3646 vports[i]->work_port_events = 0; 3647 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3648 spin_unlock_irq(shost->host_lock); 3649 } 3650 lpfc_destroy_vport_work_array(phba, vports); 3651 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3652 * in hba_unset 3653 */ 3654 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3655 __lpfc_cpuhp_remove(phba); 3656 3657 if (phba->cfg_xri_rebalancing) 3658 lpfc_destroy_multixri_pools(phba); 3659 } 3660 3661 /** 3662 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3663 * @phba: pointer to lpfc hba data structure. 3664 * 3665 * This routine is to free all the SCSI buffers and IOCBs from the driver 3666 * list back to kernel. It is called from lpfc_pci_remove_one to free 3667 * the internal resources before the device is removed from the system. 3668 **/ 3669 static void 3670 lpfc_scsi_free(struct lpfc_hba *phba) 3671 { 3672 struct lpfc_io_buf *sb, *sb_next; 3673 3674 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3675 return; 3676 3677 spin_lock_irq(&phba->hbalock); 3678 3679 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3680 3681 spin_lock(&phba->scsi_buf_list_put_lock); 3682 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3683 list) { 3684 list_del(&sb->list); 3685 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3686 sb->dma_handle); 3687 kfree(sb); 3688 phba->total_scsi_bufs--; 3689 } 3690 spin_unlock(&phba->scsi_buf_list_put_lock); 3691 3692 spin_lock(&phba->scsi_buf_list_get_lock); 3693 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3694 list) { 3695 list_del(&sb->list); 3696 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3697 sb->dma_handle); 3698 kfree(sb); 3699 phba->total_scsi_bufs--; 3700 } 3701 spin_unlock(&phba->scsi_buf_list_get_lock); 3702 spin_unlock_irq(&phba->hbalock); 3703 } 3704 3705 /** 3706 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3707 * @phba: pointer to lpfc hba data structure. 3708 * 3709 * This routine is to free all the IO buffers and IOCBs from the driver 3710 * list back to kernel. It is called from lpfc_pci_remove_one to free 3711 * the internal resources before the device is removed from the system. 3712 **/ 3713 void 3714 lpfc_io_free(struct lpfc_hba *phba) 3715 { 3716 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3717 struct lpfc_sli4_hdw_queue *qp; 3718 int idx; 3719 3720 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3721 qp = &phba->sli4_hba.hdwq[idx]; 3722 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3723 spin_lock(&qp->io_buf_list_put_lock); 3724 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3725 &qp->lpfc_io_buf_list_put, 3726 list) { 3727 list_del(&lpfc_ncmd->list); 3728 qp->put_io_bufs--; 3729 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3730 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3731 if (phba->cfg_xpsgl && !phba->nvmet_support) 3732 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3733 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3734 kfree(lpfc_ncmd); 3735 qp->total_io_bufs--; 3736 } 3737 spin_unlock(&qp->io_buf_list_put_lock); 3738 3739 spin_lock(&qp->io_buf_list_get_lock); 3740 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3741 &qp->lpfc_io_buf_list_get, 3742 list) { 3743 list_del(&lpfc_ncmd->list); 3744 qp->get_io_bufs--; 3745 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3746 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3747 if (phba->cfg_xpsgl && !phba->nvmet_support) 3748 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3749 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3750 kfree(lpfc_ncmd); 3751 qp->total_io_bufs--; 3752 } 3753 spin_unlock(&qp->io_buf_list_get_lock); 3754 } 3755 } 3756 3757 /** 3758 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3759 * @phba: pointer to lpfc hba data structure. 3760 * 3761 * This routine first calculates the sizes of the current els and allocated 3762 * scsi sgl lists, and then goes through all sgls to updates the physical 3763 * XRIs assigned due to port function reset. During port initialization, the 3764 * current els and allocated scsi sgl lists are 0s. 3765 * 3766 * Return codes 3767 * 0 - successful (for now, it always returns 0) 3768 **/ 3769 int 3770 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3771 { 3772 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3773 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3774 LIST_HEAD(els_sgl_list); 3775 int rc; 3776 3777 /* 3778 * update on pci function's els xri-sgl list 3779 */ 3780 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3781 3782 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3783 /* els xri-sgl expanded */ 3784 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3785 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3786 "3157 ELS xri-sgl count increased from " 3787 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3788 els_xri_cnt); 3789 /* allocate the additional els sgls */ 3790 for (i = 0; i < xri_cnt; i++) { 3791 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3792 GFP_KERNEL); 3793 if (sglq_entry == NULL) { 3794 lpfc_printf_log(phba, KERN_ERR, 3795 LOG_TRACE_EVENT, 3796 "2562 Failure to allocate an " 3797 "ELS sgl entry:%d\n", i); 3798 rc = -ENOMEM; 3799 goto out_free_mem; 3800 } 3801 sglq_entry->buff_type = GEN_BUFF_TYPE; 3802 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3803 &sglq_entry->phys); 3804 if (sglq_entry->virt == NULL) { 3805 kfree(sglq_entry); 3806 lpfc_printf_log(phba, KERN_ERR, 3807 LOG_TRACE_EVENT, 3808 "2563 Failure to allocate an " 3809 "ELS mbuf:%d\n", i); 3810 rc = -ENOMEM; 3811 goto out_free_mem; 3812 } 3813 sglq_entry->sgl = sglq_entry->virt; 3814 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3815 sglq_entry->state = SGL_FREED; 3816 list_add_tail(&sglq_entry->list, &els_sgl_list); 3817 } 3818 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 3819 list_splice_init(&els_sgl_list, 3820 &phba->sli4_hba.lpfc_els_sgl_list); 3821 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 3822 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3823 /* els xri-sgl shrinked */ 3824 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3825 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3826 "3158 ELS xri-sgl count decreased from " 3827 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3828 els_xri_cnt); 3829 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 3830 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3831 &els_sgl_list); 3832 /* release extra els sgls from list */ 3833 for (i = 0; i < xri_cnt; i++) { 3834 list_remove_head(&els_sgl_list, 3835 sglq_entry, struct lpfc_sglq, list); 3836 if (sglq_entry) { 3837 __lpfc_mbuf_free(phba, sglq_entry->virt, 3838 sglq_entry->phys); 3839 kfree(sglq_entry); 3840 } 3841 } 3842 list_splice_init(&els_sgl_list, 3843 &phba->sli4_hba.lpfc_els_sgl_list); 3844 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 3845 } else 3846 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3847 "3163 ELS xri-sgl count unchanged: %d\n", 3848 els_xri_cnt); 3849 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3850 3851 /* update xris to els sgls on the list */ 3852 sglq_entry = NULL; 3853 sglq_entry_next = NULL; 3854 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3855 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3856 lxri = lpfc_sli4_next_xritag(phba); 3857 if (lxri == NO_XRI) { 3858 lpfc_printf_log(phba, KERN_ERR, 3859 LOG_TRACE_EVENT, 3860 "2400 Failed to allocate xri for " 3861 "ELS sgl\n"); 3862 rc = -ENOMEM; 3863 goto out_free_mem; 3864 } 3865 sglq_entry->sli4_lxritag = lxri; 3866 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3867 } 3868 return 0; 3869 3870 out_free_mem: 3871 lpfc_free_els_sgl_list(phba); 3872 return rc; 3873 } 3874 3875 /** 3876 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3877 * @phba: pointer to lpfc hba data structure. 3878 * 3879 * This routine first calculates the sizes of the current els and allocated 3880 * scsi sgl lists, and then goes through all sgls to updates the physical 3881 * XRIs assigned due to port function reset. During port initialization, the 3882 * current els and allocated scsi sgl lists are 0s. 3883 * 3884 * Return codes 3885 * 0 - successful (for now, it always returns 0) 3886 **/ 3887 int 3888 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3889 { 3890 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3891 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3892 uint16_t nvmet_xri_cnt; 3893 LIST_HEAD(nvmet_sgl_list); 3894 int rc; 3895 3896 /* 3897 * update on pci function's nvmet xri-sgl list 3898 */ 3899 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3900 3901 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3902 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3903 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3904 /* els xri-sgl expanded */ 3905 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3906 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3907 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3908 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3909 /* allocate the additional nvmet sgls */ 3910 for (i = 0; i < xri_cnt; i++) { 3911 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3912 GFP_KERNEL); 3913 if (sglq_entry == NULL) { 3914 lpfc_printf_log(phba, KERN_ERR, 3915 LOG_TRACE_EVENT, 3916 "6303 Failure to allocate an " 3917 "NVMET sgl entry:%d\n", i); 3918 rc = -ENOMEM; 3919 goto out_free_mem; 3920 } 3921 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3922 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3923 &sglq_entry->phys); 3924 if (sglq_entry->virt == NULL) { 3925 kfree(sglq_entry); 3926 lpfc_printf_log(phba, KERN_ERR, 3927 LOG_TRACE_EVENT, 3928 "6304 Failure to allocate an " 3929 "NVMET buf:%d\n", i); 3930 rc = -ENOMEM; 3931 goto out_free_mem; 3932 } 3933 sglq_entry->sgl = sglq_entry->virt; 3934 memset(sglq_entry->sgl, 0, 3935 phba->cfg_sg_dma_buf_size); 3936 sglq_entry->state = SGL_FREED; 3937 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3938 } 3939 spin_lock_irq(&phba->hbalock); 3940 spin_lock(&phba->sli4_hba.sgl_list_lock); 3941 list_splice_init(&nvmet_sgl_list, 3942 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3943 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3944 spin_unlock_irq(&phba->hbalock); 3945 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3946 /* nvmet xri-sgl shrunk */ 3947 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3948 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3949 "6305 NVMET xri-sgl count decreased from " 3950 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3951 nvmet_xri_cnt); 3952 spin_lock_irq(&phba->hbalock); 3953 spin_lock(&phba->sli4_hba.sgl_list_lock); 3954 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3955 &nvmet_sgl_list); 3956 /* release extra nvmet sgls from list */ 3957 for (i = 0; i < xri_cnt; i++) { 3958 list_remove_head(&nvmet_sgl_list, 3959 sglq_entry, struct lpfc_sglq, list); 3960 if (sglq_entry) { 3961 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3962 sglq_entry->phys); 3963 kfree(sglq_entry); 3964 } 3965 } 3966 list_splice_init(&nvmet_sgl_list, 3967 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3968 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3969 spin_unlock_irq(&phba->hbalock); 3970 } else 3971 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3972 "6306 NVMET xri-sgl count unchanged: %d\n", 3973 nvmet_xri_cnt); 3974 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3975 3976 /* update xris to nvmet sgls on the list */ 3977 sglq_entry = NULL; 3978 sglq_entry_next = NULL; 3979 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3980 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3981 lxri = lpfc_sli4_next_xritag(phba); 3982 if (lxri == NO_XRI) { 3983 lpfc_printf_log(phba, KERN_ERR, 3984 LOG_TRACE_EVENT, 3985 "6307 Failed to allocate xri for " 3986 "NVMET sgl\n"); 3987 rc = -ENOMEM; 3988 goto out_free_mem; 3989 } 3990 sglq_entry->sli4_lxritag = lxri; 3991 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3992 } 3993 return 0; 3994 3995 out_free_mem: 3996 lpfc_free_nvmet_sgl_list(phba); 3997 return rc; 3998 } 3999 4000 int 4001 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4002 { 4003 LIST_HEAD(blist); 4004 struct lpfc_sli4_hdw_queue *qp; 4005 struct lpfc_io_buf *lpfc_cmd; 4006 struct lpfc_io_buf *iobufp, *prev_iobufp; 4007 int idx, cnt, xri, inserted; 4008 4009 cnt = 0; 4010 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4011 qp = &phba->sli4_hba.hdwq[idx]; 4012 spin_lock_irq(&qp->io_buf_list_get_lock); 4013 spin_lock(&qp->io_buf_list_put_lock); 4014 4015 /* Take everything off the get and put lists */ 4016 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4017 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4018 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4019 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4020 cnt += qp->get_io_bufs + qp->put_io_bufs; 4021 qp->get_io_bufs = 0; 4022 qp->put_io_bufs = 0; 4023 qp->total_io_bufs = 0; 4024 spin_unlock(&qp->io_buf_list_put_lock); 4025 spin_unlock_irq(&qp->io_buf_list_get_lock); 4026 } 4027 4028 /* 4029 * Take IO buffers off blist and put on cbuf sorted by XRI. 4030 * This is because POST_SGL takes a sequential range of XRIs 4031 * to post to the firmware. 4032 */ 4033 for (idx = 0; idx < cnt; idx++) { 4034 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4035 if (!lpfc_cmd) 4036 return cnt; 4037 if (idx == 0) { 4038 list_add_tail(&lpfc_cmd->list, cbuf); 4039 continue; 4040 } 4041 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4042 inserted = 0; 4043 prev_iobufp = NULL; 4044 list_for_each_entry(iobufp, cbuf, list) { 4045 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4046 if (prev_iobufp) 4047 list_add(&lpfc_cmd->list, 4048 &prev_iobufp->list); 4049 else 4050 list_add(&lpfc_cmd->list, cbuf); 4051 inserted = 1; 4052 break; 4053 } 4054 prev_iobufp = iobufp; 4055 } 4056 if (!inserted) 4057 list_add_tail(&lpfc_cmd->list, cbuf); 4058 } 4059 return cnt; 4060 } 4061 4062 int 4063 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4064 { 4065 struct lpfc_sli4_hdw_queue *qp; 4066 struct lpfc_io_buf *lpfc_cmd; 4067 int idx, cnt; 4068 4069 qp = phba->sli4_hba.hdwq; 4070 cnt = 0; 4071 while (!list_empty(cbuf)) { 4072 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4073 list_remove_head(cbuf, lpfc_cmd, 4074 struct lpfc_io_buf, list); 4075 if (!lpfc_cmd) 4076 return cnt; 4077 cnt++; 4078 qp = &phba->sli4_hba.hdwq[idx]; 4079 lpfc_cmd->hdwq_no = idx; 4080 lpfc_cmd->hdwq = qp; 4081 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 4082 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 4083 spin_lock(&qp->io_buf_list_put_lock); 4084 list_add_tail(&lpfc_cmd->list, 4085 &qp->lpfc_io_buf_list_put); 4086 qp->put_io_bufs++; 4087 qp->total_io_bufs++; 4088 spin_unlock(&qp->io_buf_list_put_lock); 4089 } 4090 } 4091 return cnt; 4092 } 4093 4094 /** 4095 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4096 * @phba: pointer to lpfc hba data structure. 4097 * 4098 * This routine first calculates the sizes of the current els and allocated 4099 * scsi sgl lists, and then goes through all sgls to updates the physical 4100 * XRIs assigned due to port function reset. During port initialization, the 4101 * current els and allocated scsi sgl lists are 0s. 4102 * 4103 * Return codes 4104 * 0 - successful (for now, it always returns 0) 4105 **/ 4106 int 4107 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4108 { 4109 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4110 uint16_t i, lxri, els_xri_cnt; 4111 uint16_t io_xri_cnt, io_xri_max; 4112 LIST_HEAD(io_sgl_list); 4113 int rc, cnt; 4114 4115 /* 4116 * update on pci function's allocated nvme xri-sgl list 4117 */ 4118 4119 /* maximum number of xris available for nvme buffers */ 4120 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4121 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4122 phba->sli4_hba.io_xri_max = io_xri_max; 4123 4124 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4125 "6074 Current allocated XRI sgl count:%d, " 4126 "maximum XRI count:%d\n", 4127 phba->sli4_hba.io_xri_cnt, 4128 phba->sli4_hba.io_xri_max); 4129 4130 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4131 4132 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4133 /* max nvme xri shrunk below the allocated nvme buffers */ 4134 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4135 phba->sli4_hba.io_xri_max; 4136 /* release the extra allocated nvme buffers */ 4137 for (i = 0; i < io_xri_cnt; i++) { 4138 list_remove_head(&io_sgl_list, lpfc_ncmd, 4139 struct lpfc_io_buf, list); 4140 if (lpfc_ncmd) { 4141 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4142 lpfc_ncmd->data, 4143 lpfc_ncmd->dma_handle); 4144 kfree(lpfc_ncmd); 4145 } 4146 } 4147 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4148 } 4149 4150 /* update xris associated to remaining allocated nvme buffers */ 4151 lpfc_ncmd = NULL; 4152 lpfc_ncmd_next = NULL; 4153 phba->sli4_hba.io_xri_cnt = cnt; 4154 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4155 &io_sgl_list, list) { 4156 lxri = lpfc_sli4_next_xritag(phba); 4157 if (lxri == NO_XRI) { 4158 lpfc_printf_log(phba, KERN_ERR, 4159 LOG_TRACE_EVENT, 4160 "6075 Failed to allocate xri for " 4161 "nvme buffer\n"); 4162 rc = -ENOMEM; 4163 goto out_free_mem; 4164 } 4165 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4166 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4167 } 4168 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4169 return 0; 4170 4171 out_free_mem: 4172 lpfc_io_free(phba); 4173 return rc; 4174 } 4175 4176 /** 4177 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4178 * @phba: Pointer to lpfc hba data structure. 4179 * @num_to_alloc: The requested number of buffers to allocate. 4180 * 4181 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4182 * the nvme buffer contains all the necessary information needed to initiate 4183 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4184 * them on a list, it post them to the port by using SGL block post. 4185 * 4186 * Return codes: 4187 * int - number of IO buffers that were allocated and posted. 4188 * 0 = failure, less than num_to_alloc is a partial failure. 4189 **/ 4190 int 4191 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4192 { 4193 struct lpfc_io_buf *lpfc_ncmd; 4194 struct lpfc_iocbq *pwqeq; 4195 uint16_t iotag, lxri = 0; 4196 int bcnt, num_posted; 4197 LIST_HEAD(prep_nblist); 4198 LIST_HEAD(post_nblist); 4199 LIST_HEAD(nvme_nblist); 4200 4201 phba->sli4_hba.io_xri_cnt = 0; 4202 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4203 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4204 if (!lpfc_ncmd) 4205 break; 4206 /* 4207 * Get memory from the pci pool to map the virt space to 4208 * pci bus space for an I/O. The DMA buffer includes the 4209 * number of SGE's necessary to support the sg_tablesize. 4210 */ 4211 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4212 GFP_KERNEL, 4213 &lpfc_ncmd->dma_handle); 4214 if (!lpfc_ncmd->data) { 4215 kfree(lpfc_ncmd); 4216 break; 4217 } 4218 4219 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4220 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4221 } else { 4222 /* 4223 * 4K Page alignment is CRITICAL to BlockGuard, double 4224 * check to be sure. 4225 */ 4226 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4227 (((unsigned long)(lpfc_ncmd->data) & 4228 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4229 lpfc_printf_log(phba, KERN_ERR, 4230 LOG_TRACE_EVENT, 4231 "3369 Memory alignment err: " 4232 "addr=%lx\n", 4233 (unsigned long)lpfc_ncmd->data); 4234 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4235 lpfc_ncmd->data, 4236 lpfc_ncmd->dma_handle); 4237 kfree(lpfc_ncmd); 4238 break; 4239 } 4240 } 4241 4242 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4243 4244 lxri = lpfc_sli4_next_xritag(phba); 4245 if (lxri == NO_XRI) { 4246 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4247 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4248 kfree(lpfc_ncmd); 4249 break; 4250 } 4251 pwqeq = &lpfc_ncmd->cur_iocbq; 4252 4253 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4254 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4255 if (iotag == 0) { 4256 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4257 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4258 kfree(lpfc_ncmd); 4259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4260 "6121 Failed to allocate IOTAG for" 4261 " XRI:0x%x\n", lxri); 4262 lpfc_sli4_free_xri(phba, lxri); 4263 break; 4264 } 4265 pwqeq->sli4_lxritag = lxri; 4266 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4267 pwqeq->context1 = lpfc_ncmd; 4268 4269 /* Initialize local short-hand pointers. */ 4270 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4271 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4272 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4273 spin_lock_init(&lpfc_ncmd->buf_lock); 4274 4275 /* add the nvme buffer to a post list */ 4276 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4277 phba->sli4_hba.io_xri_cnt++; 4278 } 4279 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4280 "6114 Allocate %d out of %d requested new NVME " 4281 "buffers\n", bcnt, num_to_alloc); 4282 4283 /* post the list of nvme buffer sgls to port if available */ 4284 if (!list_empty(&post_nblist)) 4285 num_posted = lpfc_sli4_post_io_sgl_list( 4286 phba, &post_nblist, bcnt); 4287 else 4288 num_posted = 0; 4289 4290 return num_posted; 4291 } 4292 4293 static uint64_t 4294 lpfc_get_wwpn(struct lpfc_hba *phba) 4295 { 4296 uint64_t wwn; 4297 int rc; 4298 LPFC_MBOXQ_t *mboxq; 4299 MAILBOX_t *mb; 4300 4301 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4302 GFP_KERNEL); 4303 if (!mboxq) 4304 return (uint64_t)-1; 4305 4306 /* First get WWN of HBA instance */ 4307 lpfc_read_nv(phba, mboxq); 4308 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4309 if (rc != MBX_SUCCESS) { 4310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4311 "6019 Mailbox failed , mbxCmd x%x " 4312 "READ_NV, mbxStatus x%x\n", 4313 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4314 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4315 mempool_free(mboxq, phba->mbox_mem_pool); 4316 return (uint64_t) -1; 4317 } 4318 mb = &mboxq->u.mb; 4319 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4320 /* wwn is WWPN of HBA instance */ 4321 mempool_free(mboxq, phba->mbox_mem_pool); 4322 if (phba->sli_rev == LPFC_SLI_REV4) 4323 return be64_to_cpu(wwn); 4324 else 4325 return rol64(wwn, 32); 4326 } 4327 4328 /** 4329 * lpfc_vmid_res_alloc - Allocates resources for VMID 4330 * @phba: pointer to lpfc hba data structure. 4331 * @vport: pointer to vport data structure 4332 * 4333 * This routine allocated the resources needed for the VMID. 4334 * 4335 * Return codes 4336 * 0 on Success 4337 * Non-0 on Failure 4338 */ 4339 static int 4340 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4341 { 4342 /* VMID feature is supported only on SLI4 */ 4343 if (phba->sli_rev == LPFC_SLI_REV3) { 4344 phba->cfg_vmid_app_header = 0; 4345 phba->cfg_vmid_priority_tagging = 0; 4346 } 4347 4348 if (lpfc_is_vmid_enabled(phba)) { 4349 vport->vmid = 4350 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4351 GFP_KERNEL); 4352 if (!vport->vmid) 4353 return -ENOMEM; 4354 4355 rwlock_init(&vport->vmid_lock); 4356 4357 /* Set the VMID parameters for the vport */ 4358 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4359 vport->vmid_inactivity_timeout = 4360 phba->cfg_vmid_inactivity_timeout; 4361 vport->max_vmid = phba->cfg_max_vmid; 4362 vport->cur_vmid_cnt = 0; 4363 4364 vport->vmid_priority_range = bitmap_zalloc 4365 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4366 4367 if (!vport->vmid_priority_range) { 4368 kfree(vport->vmid); 4369 return -ENOMEM; 4370 } 4371 4372 hash_init(vport->hash_table); 4373 } 4374 return 0; 4375 } 4376 4377 /** 4378 * lpfc_create_port - Create an FC port 4379 * @phba: pointer to lpfc hba data structure. 4380 * @instance: a unique integer ID to this FC port. 4381 * @dev: pointer to the device data structure. 4382 * 4383 * This routine creates a FC port for the upper layer protocol. The FC port 4384 * can be created on top of either a physical port or a virtual port provided 4385 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4386 * and associates the FC port created before adding the shost into the SCSI 4387 * layer. 4388 * 4389 * Return codes 4390 * @vport - pointer to the virtual N_Port data structure. 4391 * NULL - port create failed. 4392 **/ 4393 struct lpfc_vport * 4394 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4395 { 4396 struct lpfc_vport *vport; 4397 struct Scsi_Host *shost = NULL; 4398 struct scsi_host_template *template; 4399 int error = 0; 4400 int i; 4401 uint64_t wwn; 4402 bool use_no_reset_hba = false; 4403 int rc; 4404 4405 if (lpfc_no_hba_reset_cnt) { 4406 if (phba->sli_rev < LPFC_SLI_REV4 && 4407 dev == &phba->pcidev->dev) { 4408 /* Reset the port first */ 4409 lpfc_sli_brdrestart(phba); 4410 rc = lpfc_sli_chipset_init(phba); 4411 if (rc) 4412 return NULL; 4413 } 4414 wwn = lpfc_get_wwpn(phba); 4415 } 4416 4417 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4418 if (wwn == lpfc_no_hba_reset[i]) { 4419 lpfc_printf_log(phba, KERN_ERR, 4420 LOG_TRACE_EVENT, 4421 "6020 Setting use_no_reset port=%llx\n", 4422 wwn); 4423 use_no_reset_hba = true; 4424 break; 4425 } 4426 } 4427 4428 /* Seed template for SCSI host registration */ 4429 if (dev == &phba->pcidev->dev) { 4430 template = &phba->port_template; 4431 4432 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4433 /* Seed physical port template */ 4434 memcpy(template, &lpfc_template, sizeof(*template)); 4435 4436 if (use_no_reset_hba) 4437 /* template is for a no reset SCSI Host */ 4438 template->eh_host_reset_handler = NULL; 4439 4440 /* Template for all vports this physical port creates */ 4441 memcpy(&phba->vport_template, &lpfc_template, 4442 sizeof(*template)); 4443 phba->vport_template.shost_attrs = lpfc_vport_attrs; 4444 phba->vport_template.eh_bus_reset_handler = NULL; 4445 phba->vport_template.eh_host_reset_handler = NULL; 4446 phba->vport_template.vendor_id = 0; 4447 4448 /* Initialize the host templates with updated value */ 4449 if (phba->sli_rev == LPFC_SLI_REV4) { 4450 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4451 phba->vport_template.sg_tablesize = 4452 phba->cfg_scsi_seg_cnt; 4453 } else { 4454 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4455 phba->vport_template.sg_tablesize = 4456 phba->cfg_sg_seg_cnt; 4457 } 4458 4459 } else { 4460 /* NVMET is for physical port only */ 4461 memcpy(template, &lpfc_template_nvme, 4462 sizeof(*template)); 4463 } 4464 } else { 4465 template = &phba->vport_template; 4466 } 4467 4468 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4469 if (!shost) 4470 goto out; 4471 4472 vport = (struct lpfc_vport *) shost->hostdata; 4473 vport->phba = phba; 4474 vport->load_flag |= FC_LOADING; 4475 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4476 vport->fc_rscn_flush = 0; 4477 lpfc_get_vport_cfgparam(vport); 4478 4479 /* Adjust value in vport */ 4480 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4481 4482 shost->unique_id = instance; 4483 shost->max_id = LPFC_MAX_TARGET; 4484 shost->max_lun = vport->cfg_max_luns; 4485 shost->this_id = -1; 4486 shost->max_cmd_len = 16; 4487 4488 if (phba->sli_rev == LPFC_SLI_REV4) { 4489 if (!phba->cfg_fcp_mq_threshold || 4490 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4491 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4492 4493 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4494 phba->cfg_fcp_mq_threshold); 4495 4496 shost->dma_boundary = 4497 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4498 4499 if (phba->cfg_xpsgl && !phba->nvmet_support) 4500 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4501 else 4502 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4503 } else 4504 /* SLI-3 has a limited number of hardware queues (3), 4505 * thus there is only one for FCP processing. 4506 */ 4507 shost->nr_hw_queues = 1; 4508 4509 /* 4510 * Set initial can_queue value since 0 is no longer supported and 4511 * scsi_add_host will fail. This will be adjusted later based on the 4512 * max xri value determined in hba setup. 4513 */ 4514 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4515 if (dev != &phba->pcidev->dev) { 4516 shost->transportt = lpfc_vport_transport_template; 4517 vport->port_type = LPFC_NPIV_PORT; 4518 } else { 4519 shost->transportt = lpfc_transport_template; 4520 vport->port_type = LPFC_PHYSICAL_PORT; 4521 } 4522 4523 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4524 "9081 CreatePort TMPLATE type %x TBLsize %d " 4525 "SEGcnt %d/%d\n", 4526 vport->port_type, shost->sg_tablesize, 4527 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4528 4529 /* Allocate the resources for VMID */ 4530 rc = lpfc_vmid_res_alloc(phba, vport); 4531 4532 if (rc) 4533 goto out; 4534 4535 /* Initialize all internally managed lists. */ 4536 INIT_LIST_HEAD(&vport->fc_nodes); 4537 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4538 spin_lock_init(&vport->work_port_lock); 4539 4540 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4541 4542 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4543 4544 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4545 4546 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4547 lpfc_setup_bg(phba, shost); 4548 4549 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4550 if (error) 4551 goto out_put_shost; 4552 4553 spin_lock_irq(&phba->port_list_lock); 4554 list_add_tail(&vport->listentry, &phba->port_list); 4555 spin_unlock_irq(&phba->port_list_lock); 4556 return vport; 4557 4558 out_put_shost: 4559 kfree(vport->vmid); 4560 bitmap_free(vport->vmid_priority_range); 4561 scsi_host_put(shost); 4562 out: 4563 return NULL; 4564 } 4565 4566 /** 4567 * destroy_port - destroy an FC port 4568 * @vport: pointer to an lpfc virtual N_Port data structure. 4569 * 4570 * This routine destroys a FC port from the upper layer protocol. All the 4571 * resources associated with the port are released. 4572 **/ 4573 void 4574 destroy_port(struct lpfc_vport *vport) 4575 { 4576 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4577 struct lpfc_hba *phba = vport->phba; 4578 4579 lpfc_debugfs_terminate(vport); 4580 fc_remove_host(shost); 4581 scsi_remove_host(shost); 4582 4583 spin_lock_irq(&phba->port_list_lock); 4584 list_del_init(&vport->listentry); 4585 spin_unlock_irq(&phba->port_list_lock); 4586 4587 lpfc_cleanup(vport); 4588 return; 4589 } 4590 4591 /** 4592 * lpfc_get_instance - Get a unique integer ID 4593 * 4594 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4595 * uses the kernel idr facility to perform the task. 4596 * 4597 * Return codes: 4598 * instance - a unique integer ID allocated as the new instance. 4599 * -1 - lpfc get instance failed. 4600 **/ 4601 int 4602 lpfc_get_instance(void) 4603 { 4604 int ret; 4605 4606 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4607 return ret < 0 ? -1 : ret; 4608 } 4609 4610 /** 4611 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4612 * @shost: pointer to SCSI host data structure. 4613 * @time: elapsed time of the scan in jiffies. 4614 * 4615 * This routine is called by the SCSI layer with a SCSI host to determine 4616 * whether the scan host is finished. 4617 * 4618 * Note: there is no scan_start function as adapter initialization will have 4619 * asynchronously kicked off the link initialization. 4620 * 4621 * Return codes 4622 * 0 - SCSI host scan is not over yet. 4623 * 1 - SCSI host scan is over. 4624 **/ 4625 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4626 { 4627 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4628 struct lpfc_hba *phba = vport->phba; 4629 int stat = 0; 4630 4631 spin_lock_irq(shost->host_lock); 4632 4633 if (vport->load_flag & FC_UNLOADING) { 4634 stat = 1; 4635 goto finished; 4636 } 4637 if (time >= msecs_to_jiffies(30 * 1000)) { 4638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4639 "0461 Scanning longer than 30 " 4640 "seconds. Continuing initialization\n"); 4641 stat = 1; 4642 goto finished; 4643 } 4644 if (time >= msecs_to_jiffies(15 * 1000) && 4645 phba->link_state <= LPFC_LINK_DOWN) { 4646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4647 "0465 Link down longer than 15 " 4648 "seconds. Continuing initialization\n"); 4649 stat = 1; 4650 goto finished; 4651 } 4652 4653 if (vport->port_state != LPFC_VPORT_READY) 4654 goto finished; 4655 if (vport->num_disc_nodes || vport->fc_prli_sent) 4656 goto finished; 4657 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4658 goto finished; 4659 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4660 goto finished; 4661 4662 stat = 1; 4663 4664 finished: 4665 spin_unlock_irq(shost->host_lock); 4666 return stat; 4667 } 4668 4669 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4670 { 4671 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4672 struct lpfc_hba *phba = vport->phba; 4673 4674 fc_host_supported_speeds(shost) = 0; 4675 /* 4676 * Avoid reporting supported link speed for FCoE as it can't be 4677 * controlled via FCoE. 4678 */ 4679 if (phba->hba_flag & HBA_FCOE_MODE) 4680 return; 4681 4682 if (phba->lmt & LMT_256Gb) 4683 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4684 if (phba->lmt & LMT_128Gb) 4685 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4686 if (phba->lmt & LMT_64Gb) 4687 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4688 if (phba->lmt & LMT_32Gb) 4689 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4690 if (phba->lmt & LMT_16Gb) 4691 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4692 if (phba->lmt & LMT_10Gb) 4693 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4694 if (phba->lmt & LMT_8Gb) 4695 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4696 if (phba->lmt & LMT_4Gb) 4697 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4698 if (phba->lmt & LMT_2Gb) 4699 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4700 if (phba->lmt & LMT_1Gb) 4701 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4702 } 4703 4704 /** 4705 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4706 * @shost: pointer to SCSI host data structure. 4707 * 4708 * This routine initializes a given SCSI host attributes on a FC port. The 4709 * SCSI host can be either on top of a physical port or a virtual port. 4710 **/ 4711 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4712 { 4713 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4714 struct lpfc_hba *phba = vport->phba; 4715 /* 4716 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4717 */ 4718 4719 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4720 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4721 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4722 4723 memset(fc_host_supported_fc4s(shost), 0, 4724 sizeof(fc_host_supported_fc4s(shost))); 4725 fc_host_supported_fc4s(shost)[2] = 1; 4726 fc_host_supported_fc4s(shost)[7] = 1; 4727 4728 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4729 sizeof fc_host_symbolic_name(shost)); 4730 4731 lpfc_host_supported_speeds_set(shost); 4732 4733 fc_host_maxframe_size(shost) = 4734 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4735 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4736 4737 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4738 4739 /* This value is also unchanging */ 4740 memset(fc_host_active_fc4s(shost), 0, 4741 sizeof(fc_host_active_fc4s(shost))); 4742 fc_host_active_fc4s(shost)[2] = 1; 4743 fc_host_active_fc4s(shost)[7] = 1; 4744 4745 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4746 spin_lock_irq(shost->host_lock); 4747 vport->load_flag &= ~FC_LOADING; 4748 spin_unlock_irq(shost->host_lock); 4749 } 4750 4751 /** 4752 * lpfc_stop_port_s3 - Stop SLI3 device port 4753 * @phba: pointer to lpfc hba data structure. 4754 * 4755 * This routine is invoked to stop an SLI3 device port, it stops the device 4756 * from generating interrupts and stops the device driver's timers for the 4757 * device. 4758 **/ 4759 static void 4760 lpfc_stop_port_s3(struct lpfc_hba *phba) 4761 { 4762 /* Clear all interrupt enable conditions */ 4763 writel(0, phba->HCregaddr); 4764 readl(phba->HCregaddr); /* flush */ 4765 /* Clear all pending interrupts */ 4766 writel(0xffffffff, phba->HAregaddr); 4767 readl(phba->HAregaddr); /* flush */ 4768 4769 /* Reset some HBA SLI setup states */ 4770 lpfc_stop_hba_timers(phba); 4771 phba->pport->work_port_events = 0; 4772 } 4773 4774 /** 4775 * lpfc_stop_port_s4 - Stop SLI4 device port 4776 * @phba: pointer to lpfc hba data structure. 4777 * 4778 * This routine is invoked to stop an SLI4 device port, it stops the device 4779 * from generating interrupts and stops the device driver's timers for the 4780 * device. 4781 **/ 4782 static void 4783 lpfc_stop_port_s4(struct lpfc_hba *phba) 4784 { 4785 /* Reset some HBA SLI4 setup states */ 4786 lpfc_stop_hba_timers(phba); 4787 if (phba->pport) 4788 phba->pport->work_port_events = 0; 4789 phba->sli4_hba.intr_enable = 0; 4790 } 4791 4792 /** 4793 * lpfc_stop_port - Wrapper function for stopping hba port 4794 * @phba: Pointer to HBA context object. 4795 * 4796 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4797 * the API jump table function pointer from the lpfc_hba struct. 4798 **/ 4799 void 4800 lpfc_stop_port(struct lpfc_hba *phba) 4801 { 4802 phba->lpfc_stop_port(phba); 4803 4804 if (phba->wq) 4805 flush_workqueue(phba->wq); 4806 } 4807 4808 /** 4809 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4810 * @phba: Pointer to hba for which this call is being executed. 4811 * 4812 * This routine starts the timer waiting for the FCF rediscovery to complete. 4813 **/ 4814 void 4815 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4816 { 4817 unsigned long fcf_redisc_wait_tmo = 4818 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4819 /* Start fcf rediscovery wait period timer */ 4820 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4821 spin_lock_irq(&phba->hbalock); 4822 /* Allow action to new fcf asynchronous event */ 4823 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4824 /* Mark the FCF rediscovery pending state */ 4825 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4826 spin_unlock_irq(&phba->hbalock); 4827 } 4828 4829 /** 4830 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4831 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 4832 * 4833 * This routine is invoked when waiting for FCF table rediscover has been 4834 * timed out. If new FCF record(s) has (have) been discovered during the 4835 * wait period, a new FCF event shall be added to the FCOE async event 4836 * list, and then worker thread shall be waked up for processing from the 4837 * worker thread context. 4838 **/ 4839 static void 4840 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4841 { 4842 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4843 4844 /* Don't send FCF rediscovery event if timer cancelled */ 4845 spin_lock_irq(&phba->hbalock); 4846 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4847 spin_unlock_irq(&phba->hbalock); 4848 return; 4849 } 4850 /* Clear FCF rediscovery timer pending flag */ 4851 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4852 /* FCF rediscovery event to worker thread */ 4853 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4854 spin_unlock_irq(&phba->hbalock); 4855 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4856 "2776 FCF rediscover quiescent timer expired\n"); 4857 /* wake up worker thread */ 4858 lpfc_worker_wake_up(phba); 4859 } 4860 4861 /** 4862 * lpfc_vmid_poll - VMID timeout detection 4863 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 4864 * 4865 * This routine is invoked when there is no I/O on by a VM for the specified 4866 * amount of time. When this situation is detected, the VMID has to be 4867 * deregistered from the switch and all the local resources freed. The VMID 4868 * will be reassigned to the VM once the I/O begins. 4869 **/ 4870 static void 4871 lpfc_vmid_poll(struct timer_list *t) 4872 { 4873 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 4874 u32 wake_up = 0; 4875 4876 /* check if there is a need to issue QFPA */ 4877 if (phba->pport->vmid_priority_tagging) { 4878 wake_up = 1; 4879 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 4880 } 4881 4882 /* Is the vmid inactivity timer enabled */ 4883 if (phba->pport->vmid_inactivity_timeout || 4884 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 4885 wake_up = 1; 4886 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 4887 } 4888 4889 if (wake_up) 4890 lpfc_worker_wake_up(phba); 4891 4892 /* restart the timer for the next iteration */ 4893 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 4894 LPFC_VMID_TIMER)); 4895 } 4896 4897 /** 4898 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4899 * @phba: pointer to lpfc hba data structure. 4900 * @acqe_link: pointer to the async link completion queue entry. 4901 * 4902 * This routine is to parse the SLI4 link-attention link fault code. 4903 **/ 4904 static void 4905 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4906 struct lpfc_acqe_link *acqe_link) 4907 { 4908 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4909 case LPFC_ASYNC_LINK_FAULT_NONE: 4910 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4911 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4912 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4913 break; 4914 default: 4915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4916 "0398 Unknown link fault code: x%x\n", 4917 bf_get(lpfc_acqe_link_fault, acqe_link)); 4918 break; 4919 } 4920 } 4921 4922 /** 4923 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4924 * @phba: pointer to lpfc hba data structure. 4925 * @acqe_link: pointer to the async link completion queue entry. 4926 * 4927 * This routine is to parse the SLI4 link attention type and translate it 4928 * into the base driver's link attention type coding. 4929 * 4930 * Return: Link attention type in terms of base driver's coding. 4931 **/ 4932 static uint8_t 4933 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4934 struct lpfc_acqe_link *acqe_link) 4935 { 4936 uint8_t att_type; 4937 4938 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4939 case LPFC_ASYNC_LINK_STATUS_DOWN: 4940 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4941 att_type = LPFC_ATT_LINK_DOWN; 4942 break; 4943 case LPFC_ASYNC_LINK_STATUS_UP: 4944 /* Ignore physical link up events - wait for logical link up */ 4945 att_type = LPFC_ATT_RESERVED; 4946 break; 4947 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4948 att_type = LPFC_ATT_LINK_UP; 4949 break; 4950 default: 4951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4952 "0399 Invalid link attention type: x%x\n", 4953 bf_get(lpfc_acqe_link_status, acqe_link)); 4954 att_type = LPFC_ATT_RESERVED; 4955 break; 4956 } 4957 return att_type; 4958 } 4959 4960 /** 4961 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4962 * @phba: pointer to lpfc hba data structure. 4963 * 4964 * This routine is to get an SLI3 FC port's link speed in Mbps. 4965 * 4966 * Return: link speed in terms of Mbps. 4967 **/ 4968 uint32_t 4969 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4970 { 4971 uint32_t link_speed; 4972 4973 if (!lpfc_is_link_up(phba)) 4974 return 0; 4975 4976 if (phba->sli_rev <= LPFC_SLI_REV3) { 4977 switch (phba->fc_linkspeed) { 4978 case LPFC_LINK_SPEED_1GHZ: 4979 link_speed = 1000; 4980 break; 4981 case LPFC_LINK_SPEED_2GHZ: 4982 link_speed = 2000; 4983 break; 4984 case LPFC_LINK_SPEED_4GHZ: 4985 link_speed = 4000; 4986 break; 4987 case LPFC_LINK_SPEED_8GHZ: 4988 link_speed = 8000; 4989 break; 4990 case LPFC_LINK_SPEED_10GHZ: 4991 link_speed = 10000; 4992 break; 4993 case LPFC_LINK_SPEED_16GHZ: 4994 link_speed = 16000; 4995 break; 4996 default: 4997 link_speed = 0; 4998 } 4999 } else { 5000 if (phba->sli4_hba.link_state.logical_speed) 5001 link_speed = 5002 phba->sli4_hba.link_state.logical_speed; 5003 else 5004 link_speed = phba->sli4_hba.link_state.speed; 5005 } 5006 return link_speed; 5007 } 5008 5009 /** 5010 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5011 * @phba: pointer to lpfc hba data structure. 5012 * @evt_code: asynchronous event code. 5013 * @speed_code: asynchronous event link speed code. 5014 * 5015 * This routine is to parse the giving SLI4 async event link speed code into 5016 * value of Mbps for the link speed. 5017 * 5018 * Return: link speed in terms of Mbps. 5019 **/ 5020 static uint32_t 5021 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5022 uint8_t speed_code) 5023 { 5024 uint32_t port_speed; 5025 5026 switch (evt_code) { 5027 case LPFC_TRAILER_CODE_LINK: 5028 switch (speed_code) { 5029 case LPFC_ASYNC_LINK_SPEED_ZERO: 5030 port_speed = 0; 5031 break; 5032 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5033 port_speed = 10; 5034 break; 5035 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5036 port_speed = 100; 5037 break; 5038 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5039 port_speed = 1000; 5040 break; 5041 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5042 port_speed = 10000; 5043 break; 5044 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5045 port_speed = 20000; 5046 break; 5047 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5048 port_speed = 25000; 5049 break; 5050 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5051 port_speed = 40000; 5052 break; 5053 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5054 port_speed = 100000; 5055 break; 5056 default: 5057 port_speed = 0; 5058 } 5059 break; 5060 case LPFC_TRAILER_CODE_FC: 5061 switch (speed_code) { 5062 case LPFC_FC_LA_SPEED_UNKNOWN: 5063 port_speed = 0; 5064 break; 5065 case LPFC_FC_LA_SPEED_1G: 5066 port_speed = 1000; 5067 break; 5068 case LPFC_FC_LA_SPEED_2G: 5069 port_speed = 2000; 5070 break; 5071 case LPFC_FC_LA_SPEED_4G: 5072 port_speed = 4000; 5073 break; 5074 case LPFC_FC_LA_SPEED_8G: 5075 port_speed = 8000; 5076 break; 5077 case LPFC_FC_LA_SPEED_10G: 5078 port_speed = 10000; 5079 break; 5080 case LPFC_FC_LA_SPEED_16G: 5081 port_speed = 16000; 5082 break; 5083 case LPFC_FC_LA_SPEED_32G: 5084 port_speed = 32000; 5085 break; 5086 case LPFC_FC_LA_SPEED_64G: 5087 port_speed = 64000; 5088 break; 5089 case LPFC_FC_LA_SPEED_128G: 5090 port_speed = 128000; 5091 break; 5092 case LPFC_FC_LA_SPEED_256G: 5093 port_speed = 256000; 5094 break; 5095 default: 5096 port_speed = 0; 5097 } 5098 break; 5099 default: 5100 port_speed = 0; 5101 } 5102 return port_speed; 5103 } 5104 5105 /** 5106 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5107 * @phba: pointer to lpfc hba data structure. 5108 * @acqe_link: pointer to the async link completion queue entry. 5109 * 5110 * This routine is to handle the SLI4 asynchronous FCoE link event. 5111 **/ 5112 static void 5113 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5114 struct lpfc_acqe_link *acqe_link) 5115 { 5116 struct lpfc_dmabuf *mp; 5117 LPFC_MBOXQ_t *pmb; 5118 MAILBOX_t *mb; 5119 struct lpfc_mbx_read_top *la; 5120 uint8_t att_type; 5121 int rc; 5122 5123 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5124 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5125 return; 5126 phba->fcoe_eventtag = acqe_link->event_tag; 5127 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5128 if (!pmb) { 5129 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5130 "0395 The mboxq allocation failed\n"); 5131 return; 5132 } 5133 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5134 if (!mp) { 5135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5136 "0396 The lpfc_dmabuf allocation failed\n"); 5137 goto out_free_pmb; 5138 } 5139 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5140 if (!mp->virt) { 5141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5142 "0397 The mbuf allocation failed\n"); 5143 goto out_free_dmabuf; 5144 } 5145 5146 /* Cleanup any outstanding ELS commands */ 5147 lpfc_els_flush_all_cmd(phba); 5148 5149 /* Block ELS IOCBs until we have done process link event */ 5150 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5151 5152 /* Update link event statistics */ 5153 phba->sli.slistat.link_event++; 5154 5155 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5156 lpfc_read_topology(phba, pmb, mp); 5157 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5158 pmb->vport = phba->pport; 5159 5160 /* Keep the link status for extra SLI4 state machine reference */ 5161 phba->sli4_hba.link_state.speed = 5162 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5163 bf_get(lpfc_acqe_link_speed, acqe_link)); 5164 phba->sli4_hba.link_state.duplex = 5165 bf_get(lpfc_acqe_link_duplex, acqe_link); 5166 phba->sli4_hba.link_state.status = 5167 bf_get(lpfc_acqe_link_status, acqe_link); 5168 phba->sli4_hba.link_state.type = 5169 bf_get(lpfc_acqe_link_type, acqe_link); 5170 phba->sli4_hba.link_state.number = 5171 bf_get(lpfc_acqe_link_number, acqe_link); 5172 phba->sli4_hba.link_state.fault = 5173 bf_get(lpfc_acqe_link_fault, acqe_link); 5174 phba->sli4_hba.link_state.logical_speed = 5175 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5176 5177 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5178 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5179 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5180 "Logical speed:%dMbps Fault:%d\n", 5181 phba->sli4_hba.link_state.speed, 5182 phba->sli4_hba.link_state.topology, 5183 phba->sli4_hba.link_state.status, 5184 phba->sli4_hba.link_state.type, 5185 phba->sli4_hba.link_state.number, 5186 phba->sli4_hba.link_state.logical_speed, 5187 phba->sli4_hba.link_state.fault); 5188 /* 5189 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5190 * topology info. Note: Optional for non FC-AL ports. 5191 */ 5192 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5193 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5194 if (rc == MBX_NOT_FINISHED) 5195 goto out_free_dmabuf; 5196 return; 5197 } 5198 /* 5199 * For FCoE Mode: fill in all the topology information we need and call 5200 * the READ_TOPOLOGY completion routine to continue without actually 5201 * sending the READ_TOPOLOGY mailbox command to the port. 5202 */ 5203 /* Initialize completion status */ 5204 mb = &pmb->u.mb; 5205 mb->mbxStatus = MBX_SUCCESS; 5206 5207 /* Parse port fault information field */ 5208 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5209 5210 /* Parse and translate link attention fields */ 5211 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5212 la->eventTag = acqe_link->event_tag; 5213 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5214 bf_set(lpfc_mbx_read_top_link_spd, la, 5215 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5216 5217 /* Fake the the following irrelvant fields */ 5218 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5219 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5220 bf_set(lpfc_mbx_read_top_il, la, 0); 5221 bf_set(lpfc_mbx_read_top_pb, la, 0); 5222 bf_set(lpfc_mbx_read_top_fa, la, 0); 5223 bf_set(lpfc_mbx_read_top_mm, la, 0); 5224 5225 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5226 lpfc_mbx_cmpl_read_topology(phba, pmb); 5227 5228 return; 5229 5230 out_free_dmabuf: 5231 kfree(mp); 5232 out_free_pmb: 5233 mempool_free(pmb, phba->mbox_mem_pool); 5234 } 5235 5236 /** 5237 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5238 * topology. 5239 * @phba: pointer to lpfc hba data structure. 5240 * @speed_code: asynchronous event link speed code. 5241 * 5242 * This routine is to parse the giving SLI4 async event link speed code into 5243 * value of Read topology link speed. 5244 * 5245 * Return: link speed in terms of Read topology. 5246 **/ 5247 static uint8_t 5248 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5249 { 5250 uint8_t port_speed; 5251 5252 switch (speed_code) { 5253 case LPFC_FC_LA_SPEED_1G: 5254 port_speed = LPFC_LINK_SPEED_1GHZ; 5255 break; 5256 case LPFC_FC_LA_SPEED_2G: 5257 port_speed = LPFC_LINK_SPEED_2GHZ; 5258 break; 5259 case LPFC_FC_LA_SPEED_4G: 5260 port_speed = LPFC_LINK_SPEED_4GHZ; 5261 break; 5262 case LPFC_FC_LA_SPEED_8G: 5263 port_speed = LPFC_LINK_SPEED_8GHZ; 5264 break; 5265 case LPFC_FC_LA_SPEED_16G: 5266 port_speed = LPFC_LINK_SPEED_16GHZ; 5267 break; 5268 case LPFC_FC_LA_SPEED_32G: 5269 port_speed = LPFC_LINK_SPEED_32GHZ; 5270 break; 5271 case LPFC_FC_LA_SPEED_64G: 5272 port_speed = LPFC_LINK_SPEED_64GHZ; 5273 break; 5274 case LPFC_FC_LA_SPEED_128G: 5275 port_speed = LPFC_LINK_SPEED_128GHZ; 5276 break; 5277 case LPFC_FC_LA_SPEED_256G: 5278 port_speed = LPFC_LINK_SPEED_256GHZ; 5279 break; 5280 default: 5281 port_speed = 0; 5282 break; 5283 } 5284 5285 return port_speed; 5286 } 5287 5288 #define trunk_link_status(__idx)\ 5289 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5290 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5291 "Link up" : "Link down") : "NA" 5292 /* Did port __idx reported an error */ 5293 #define trunk_port_fault(__idx)\ 5294 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5295 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5296 5297 static void 5298 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5299 struct lpfc_acqe_fc_la *acqe_fc) 5300 { 5301 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5302 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5303 5304 phba->sli4_hba.link_state.speed = 5305 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5306 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5307 5308 phba->sli4_hba.link_state.logical_speed = 5309 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5310 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5311 phba->fc_linkspeed = 5312 lpfc_async_link_speed_to_read_top( 5313 phba, 5314 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5315 5316 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5317 phba->trunk_link.link0.state = 5318 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5319 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5320 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5321 } 5322 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5323 phba->trunk_link.link1.state = 5324 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5325 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5326 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5327 } 5328 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5329 phba->trunk_link.link2.state = 5330 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5331 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5332 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5333 } 5334 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5335 phba->trunk_link.link3.state = 5336 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5337 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5338 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5339 } 5340 5341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5342 "2910 Async FC Trunking Event - Speed:%d\n" 5343 "\tLogical speed:%d " 5344 "port0: %s port1: %s port2: %s port3: %s\n", 5345 phba->sli4_hba.link_state.speed, 5346 phba->sli4_hba.link_state.logical_speed, 5347 trunk_link_status(0), trunk_link_status(1), 5348 trunk_link_status(2), trunk_link_status(3)); 5349 5350 if (port_fault) 5351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5352 "3202 trunk error:0x%x (%s) seen on port0:%s " 5353 /* 5354 * SLI-4: We have only 0xA error codes 5355 * defined as of now. print an appropriate 5356 * message in case driver needs to be updated. 5357 */ 5358 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5359 "UNDEFINED. update driver." : trunk_errmsg[err], 5360 trunk_port_fault(0), trunk_port_fault(1), 5361 trunk_port_fault(2), trunk_port_fault(3)); 5362 } 5363 5364 5365 /** 5366 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5367 * @phba: pointer to lpfc hba data structure. 5368 * @acqe_fc: pointer to the async fc completion queue entry. 5369 * 5370 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5371 * that the event was received and then issue a read_topology mailbox command so 5372 * that the rest of the driver will treat it the same as SLI3. 5373 **/ 5374 static void 5375 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5376 { 5377 struct lpfc_dmabuf *mp; 5378 LPFC_MBOXQ_t *pmb; 5379 MAILBOX_t *mb; 5380 struct lpfc_mbx_read_top *la; 5381 int rc; 5382 5383 if (bf_get(lpfc_trailer_type, acqe_fc) != 5384 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5386 "2895 Non FC link Event detected.(%d)\n", 5387 bf_get(lpfc_trailer_type, acqe_fc)); 5388 return; 5389 } 5390 5391 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5392 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5393 lpfc_update_trunk_link_status(phba, acqe_fc); 5394 return; 5395 } 5396 5397 /* Keep the link status for extra SLI4 state machine reference */ 5398 phba->sli4_hba.link_state.speed = 5399 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5400 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5401 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5402 phba->sli4_hba.link_state.topology = 5403 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5404 phba->sli4_hba.link_state.status = 5405 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5406 phba->sli4_hba.link_state.type = 5407 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5408 phba->sli4_hba.link_state.number = 5409 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5410 phba->sli4_hba.link_state.fault = 5411 bf_get(lpfc_acqe_link_fault, acqe_fc); 5412 5413 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5414 LPFC_FC_LA_TYPE_LINK_DOWN) 5415 phba->sli4_hba.link_state.logical_speed = 0; 5416 else if (!phba->sli4_hba.conf_trunk) 5417 phba->sli4_hba.link_state.logical_speed = 5418 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5419 5420 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5421 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5422 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5423 "%dMbps Fault:%d\n", 5424 phba->sli4_hba.link_state.speed, 5425 phba->sli4_hba.link_state.topology, 5426 phba->sli4_hba.link_state.status, 5427 phba->sli4_hba.link_state.type, 5428 phba->sli4_hba.link_state.number, 5429 phba->sli4_hba.link_state.logical_speed, 5430 phba->sli4_hba.link_state.fault); 5431 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5432 if (!pmb) { 5433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5434 "2897 The mboxq allocation failed\n"); 5435 return; 5436 } 5437 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5438 if (!mp) { 5439 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5440 "2898 The lpfc_dmabuf allocation failed\n"); 5441 goto out_free_pmb; 5442 } 5443 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5444 if (!mp->virt) { 5445 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5446 "2899 The mbuf allocation failed\n"); 5447 goto out_free_dmabuf; 5448 } 5449 5450 /* Cleanup any outstanding ELS commands */ 5451 lpfc_els_flush_all_cmd(phba); 5452 5453 /* Block ELS IOCBs until we have done process link event */ 5454 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5455 5456 /* Update link event statistics */ 5457 phba->sli.slistat.link_event++; 5458 5459 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5460 lpfc_read_topology(phba, pmb, mp); 5461 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5462 pmb->vport = phba->pport; 5463 5464 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5465 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5466 5467 switch (phba->sli4_hba.link_state.status) { 5468 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5469 phba->link_flag |= LS_MDS_LINK_DOWN; 5470 break; 5471 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5472 phba->link_flag |= LS_MDS_LOOPBACK; 5473 break; 5474 default: 5475 break; 5476 } 5477 5478 /* Initialize completion status */ 5479 mb = &pmb->u.mb; 5480 mb->mbxStatus = MBX_SUCCESS; 5481 5482 /* Parse port fault information field */ 5483 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5484 5485 /* Parse and translate link attention fields */ 5486 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5487 la->eventTag = acqe_fc->event_tag; 5488 5489 if (phba->sli4_hba.link_state.status == 5490 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5491 bf_set(lpfc_mbx_read_top_att_type, la, 5492 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5493 } else { 5494 bf_set(lpfc_mbx_read_top_att_type, la, 5495 LPFC_FC_LA_TYPE_LINK_DOWN); 5496 } 5497 /* Invoke the mailbox command callback function */ 5498 lpfc_mbx_cmpl_read_topology(phba, pmb); 5499 5500 return; 5501 } 5502 5503 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5504 if (rc == MBX_NOT_FINISHED) 5505 goto out_free_dmabuf; 5506 return; 5507 5508 out_free_dmabuf: 5509 kfree(mp); 5510 out_free_pmb: 5511 mempool_free(pmb, phba->mbox_mem_pool); 5512 } 5513 5514 /** 5515 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5516 * @phba: pointer to lpfc hba data structure. 5517 * @acqe_sli: pointer to the async SLI completion queue entry. 5518 * 5519 * This routine is to handle the SLI4 asynchronous SLI events. 5520 **/ 5521 static void 5522 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5523 { 5524 char port_name; 5525 char message[128]; 5526 uint8_t status; 5527 uint8_t evt_type; 5528 uint8_t operational = 0; 5529 struct temp_event temp_event_data; 5530 struct lpfc_acqe_misconfigured_event *misconfigured; 5531 struct Scsi_Host *shost; 5532 struct lpfc_vport **vports; 5533 int rc, i; 5534 5535 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5536 5537 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5538 "2901 Async SLI event - Type:%d, Event Data: x%08x " 5539 "x%08x x%08x x%08x\n", evt_type, 5540 acqe_sli->event_data1, acqe_sli->event_data2, 5541 acqe_sli->reserved, acqe_sli->trailer); 5542 5543 port_name = phba->Port[0]; 5544 if (port_name == 0x00) 5545 port_name = '?'; /* get port name is empty */ 5546 5547 switch (evt_type) { 5548 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5549 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5550 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5551 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5552 5553 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5554 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5555 acqe_sli->event_data1, port_name); 5556 5557 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5558 shost = lpfc_shost_from_vport(phba->pport); 5559 fc_host_post_vendor_event(shost, fc_get_event_number(), 5560 sizeof(temp_event_data), 5561 (char *)&temp_event_data, 5562 SCSI_NL_VID_TYPE_PCI 5563 | PCI_VENDOR_ID_EMULEX); 5564 break; 5565 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5566 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5567 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5568 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5569 5570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5571 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5572 acqe_sli->event_data1, port_name); 5573 5574 shost = lpfc_shost_from_vport(phba->pport); 5575 fc_host_post_vendor_event(shost, fc_get_event_number(), 5576 sizeof(temp_event_data), 5577 (char *)&temp_event_data, 5578 SCSI_NL_VID_TYPE_PCI 5579 | PCI_VENDOR_ID_EMULEX); 5580 break; 5581 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5582 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5583 &acqe_sli->event_data1; 5584 5585 /* fetch the status for this port */ 5586 switch (phba->sli4_hba.lnk_info.lnk_no) { 5587 case LPFC_LINK_NUMBER_0: 5588 status = bf_get(lpfc_sli_misconfigured_port0_state, 5589 &misconfigured->theEvent); 5590 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5591 &misconfigured->theEvent); 5592 break; 5593 case LPFC_LINK_NUMBER_1: 5594 status = bf_get(lpfc_sli_misconfigured_port1_state, 5595 &misconfigured->theEvent); 5596 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5597 &misconfigured->theEvent); 5598 break; 5599 case LPFC_LINK_NUMBER_2: 5600 status = bf_get(lpfc_sli_misconfigured_port2_state, 5601 &misconfigured->theEvent); 5602 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5603 &misconfigured->theEvent); 5604 break; 5605 case LPFC_LINK_NUMBER_3: 5606 status = bf_get(lpfc_sli_misconfigured_port3_state, 5607 &misconfigured->theEvent); 5608 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5609 &misconfigured->theEvent); 5610 break; 5611 default: 5612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5613 "3296 " 5614 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5615 "event: Invalid link %d", 5616 phba->sli4_hba.lnk_info.lnk_no); 5617 return; 5618 } 5619 5620 /* Skip if optic state unchanged */ 5621 if (phba->sli4_hba.lnk_info.optic_state == status) 5622 return; 5623 5624 switch (status) { 5625 case LPFC_SLI_EVENT_STATUS_VALID: 5626 sprintf(message, "Physical Link is functional"); 5627 break; 5628 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5629 sprintf(message, "Optics faulted/incorrectly " 5630 "installed/not installed - Reseat optics, " 5631 "if issue not resolved, replace."); 5632 break; 5633 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5634 sprintf(message, 5635 "Optics of two types installed - Remove one " 5636 "optic or install matching pair of optics."); 5637 break; 5638 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5639 sprintf(message, "Incompatible optics - Replace with " 5640 "compatible optics for card to function."); 5641 break; 5642 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5643 sprintf(message, "Unqualified optics - Replace with " 5644 "Avago optics for Warranty and Technical " 5645 "Support - Link is%s operational", 5646 (operational) ? " not" : ""); 5647 break; 5648 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5649 sprintf(message, "Uncertified optics - Replace with " 5650 "Avago-certified optics to enable link " 5651 "operation - Link is%s operational", 5652 (operational) ? " not" : ""); 5653 break; 5654 default: 5655 /* firmware is reporting a status we don't know about */ 5656 sprintf(message, "Unknown event status x%02x", status); 5657 break; 5658 } 5659 5660 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5661 rc = lpfc_sli4_read_config(phba); 5662 if (rc) { 5663 phba->lmt = 0; 5664 lpfc_printf_log(phba, KERN_ERR, 5665 LOG_TRACE_EVENT, 5666 "3194 Unable to retrieve supported " 5667 "speeds, rc = 0x%x\n", rc); 5668 } 5669 vports = lpfc_create_vport_work_array(phba); 5670 if (vports != NULL) { 5671 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5672 i++) { 5673 shost = lpfc_shost_from_vport(vports[i]); 5674 lpfc_host_supported_speeds_set(shost); 5675 } 5676 } 5677 lpfc_destroy_vport_work_array(phba, vports); 5678 5679 phba->sli4_hba.lnk_info.optic_state = status; 5680 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5681 "3176 Port Name %c %s\n", port_name, message); 5682 break; 5683 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5684 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5685 "3192 Remote DPort Test Initiated - " 5686 "Event Data1:x%08x Event Data2: x%08x\n", 5687 acqe_sli->event_data1, acqe_sli->event_data2); 5688 break; 5689 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 5690 /* Misconfigured WWN. Reports that the SLI Port is configured 5691 * to use FA-WWN, but the attached device doesn’t support it. 5692 * No driver action is required. 5693 * Event Data1 - N.A, Event Data2 - N.A 5694 */ 5695 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 5696 "2699 Misconfigured FA-WWN - Attached device does " 5697 "not support FA-WWN\n"); 5698 break; 5699 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 5700 /* EEPROM failure. No driver action is required */ 5701 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5702 "2518 EEPROM failure - " 5703 "Event Data1: x%08x Event Data2: x%08x\n", 5704 acqe_sli->event_data1, acqe_sli->event_data2); 5705 break; 5706 default: 5707 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5708 "3193 Unrecognized SLI event, type: 0x%x", 5709 evt_type); 5710 break; 5711 } 5712 } 5713 5714 /** 5715 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5716 * @vport: pointer to vport data structure. 5717 * 5718 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5719 * response to a CVL event. 5720 * 5721 * Return the pointer to the ndlp with the vport if successful, otherwise 5722 * return NULL. 5723 **/ 5724 static struct lpfc_nodelist * 5725 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5726 { 5727 struct lpfc_nodelist *ndlp; 5728 struct Scsi_Host *shost; 5729 struct lpfc_hba *phba; 5730 5731 if (!vport) 5732 return NULL; 5733 phba = vport->phba; 5734 if (!phba) 5735 return NULL; 5736 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5737 if (!ndlp) { 5738 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5739 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5740 if (!ndlp) 5741 return 0; 5742 /* Set the node type */ 5743 ndlp->nlp_type |= NLP_FABRIC; 5744 /* Put ndlp onto node list */ 5745 lpfc_enqueue_node(vport, ndlp); 5746 } 5747 if ((phba->pport->port_state < LPFC_FLOGI) && 5748 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5749 return NULL; 5750 /* If virtual link is not yet instantiated ignore CVL */ 5751 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5752 && (vport->port_state != LPFC_VPORT_FAILED)) 5753 return NULL; 5754 shost = lpfc_shost_from_vport(vport); 5755 if (!shost) 5756 return NULL; 5757 lpfc_linkdown_port(vport); 5758 lpfc_cleanup_pending_mbox(vport); 5759 spin_lock_irq(shost->host_lock); 5760 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5761 spin_unlock_irq(shost->host_lock); 5762 5763 return ndlp; 5764 } 5765 5766 /** 5767 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5768 * @phba: pointer to lpfc hba data structure. 5769 * 5770 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5771 * response to a FCF dead event. 5772 **/ 5773 static void 5774 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5775 { 5776 struct lpfc_vport **vports; 5777 int i; 5778 5779 vports = lpfc_create_vport_work_array(phba); 5780 if (vports) 5781 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5782 lpfc_sli4_perform_vport_cvl(vports[i]); 5783 lpfc_destroy_vport_work_array(phba, vports); 5784 } 5785 5786 /** 5787 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5788 * @phba: pointer to lpfc hba data structure. 5789 * @acqe_fip: pointer to the async fcoe completion queue entry. 5790 * 5791 * This routine is to handle the SLI4 asynchronous fcoe event. 5792 **/ 5793 static void 5794 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5795 struct lpfc_acqe_fip *acqe_fip) 5796 { 5797 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5798 int rc; 5799 struct lpfc_vport *vport; 5800 struct lpfc_nodelist *ndlp; 5801 int active_vlink_present; 5802 struct lpfc_vport **vports; 5803 int i; 5804 5805 phba->fc_eventTag = acqe_fip->event_tag; 5806 phba->fcoe_eventtag = acqe_fip->event_tag; 5807 switch (event_type) { 5808 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5809 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5810 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5812 "2546 New FCF event, evt_tag:x%x, " 5813 "index:x%x\n", 5814 acqe_fip->event_tag, 5815 acqe_fip->index); 5816 else 5817 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5818 LOG_DISCOVERY, 5819 "2788 FCF param modified event, " 5820 "evt_tag:x%x, index:x%x\n", 5821 acqe_fip->event_tag, 5822 acqe_fip->index); 5823 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5824 /* 5825 * During period of FCF discovery, read the FCF 5826 * table record indexed by the event to update 5827 * FCF roundrobin failover eligible FCF bmask. 5828 */ 5829 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5830 LOG_DISCOVERY, 5831 "2779 Read FCF (x%x) for updating " 5832 "roundrobin FCF failover bmask\n", 5833 acqe_fip->index); 5834 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5835 } 5836 5837 /* If the FCF discovery is in progress, do nothing. */ 5838 spin_lock_irq(&phba->hbalock); 5839 if (phba->hba_flag & FCF_TS_INPROG) { 5840 spin_unlock_irq(&phba->hbalock); 5841 break; 5842 } 5843 /* If fast FCF failover rescan event is pending, do nothing */ 5844 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5845 spin_unlock_irq(&phba->hbalock); 5846 break; 5847 } 5848 5849 /* If the FCF has been in discovered state, do nothing. */ 5850 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5851 spin_unlock_irq(&phba->hbalock); 5852 break; 5853 } 5854 spin_unlock_irq(&phba->hbalock); 5855 5856 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5857 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5858 "2770 Start FCF table scan per async FCF " 5859 "event, evt_tag:x%x, index:x%x\n", 5860 acqe_fip->event_tag, acqe_fip->index); 5861 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5862 LPFC_FCOE_FCF_GET_FIRST); 5863 if (rc) 5864 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5865 "2547 Issue FCF scan read FCF mailbox " 5866 "command failed (x%x)\n", rc); 5867 break; 5868 5869 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5871 "2548 FCF Table full count 0x%x tag 0x%x\n", 5872 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5873 acqe_fip->event_tag); 5874 break; 5875 5876 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5877 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5878 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5879 "2549 FCF (x%x) disconnected from network, " 5880 "tag:x%x\n", acqe_fip->index, 5881 acqe_fip->event_tag); 5882 /* 5883 * If we are in the middle of FCF failover process, clear 5884 * the corresponding FCF bit in the roundrobin bitmap. 5885 */ 5886 spin_lock_irq(&phba->hbalock); 5887 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5888 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5889 spin_unlock_irq(&phba->hbalock); 5890 /* Update FLOGI FCF failover eligible FCF bmask */ 5891 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5892 break; 5893 } 5894 spin_unlock_irq(&phba->hbalock); 5895 5896 /* If the event is not for currently used fcf do nothing */ 5897 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5898 break; 5899 5900 /* 5901 * Otherwise, request the port to rediscover the entire FCF 5902 * table for a fast recovery from case that the current FCF 5903 * is no longer valid as we are not in the middle of FCF 5904 * failover process already. 5905 */ 5906 spin_lock_irq(&phba->hbalock); 5907 /* Mark the fast failover process in progress */ 5908 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5909 spin_unlock_irq(&phba->hbalock); 5910 5911 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5912 "2771 Start FCF fast failover process due to " 5913 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5914 "\n", acqe_fip->event_tag, acqe_fip->index); 5915 rc = lpfc_sli4_redisc_fcf_table(phba); 5916 if (rc) { 5917 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5918 LOG_TRACE_EVENT, 5919 "2772 Issue FCF rediscover mailbox " 5920 "command failed, fail through to FCF " 5921 "dead event\n"); 5922 spin_lock_irq(&phba->hbalock); 5923 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5924 spin_unlock_irq(&phba->hbalock); 5925 /* 5926 * Last resort will fail over by treating this 5927 * as a link down to FCF registration. 5928 */ 5929 lpfc_sli4_fcf_dead_failthrough(phba); 5930 } else { 5931 /* Reset FCF roundrobin bmask for new discovery */ 5932 lpfc_sli4_clear_fcf_rr_bmask(phba); 5933 /* 5934 * Handling fast FCF failover to a DEAD FCF event is 5935 * considered equalivant to receiving CVL to all vports. 5936 */ 5937 lpfc_sli4_perform_all_vport_cvl(phba); 5938 } 5939 break; 5940 case LPFC_FIP_EVENT_TYPE_CVL: 5941 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5942 lpfc_printf_log(phba, KERN_ERR, 5943 LOG_TRACE_EVENT, 5944 "2718 Clear Virtual Link Received for VPI 0x%x" 5945 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5946 5947 vport = lpfc_find_vport_by_vpid(phba, 5948 acqe_fip->index); 5949 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5950 if (!ndlp) 5951 break; 5952 active_vlink_present = 0; 5953 5954 vports = lpfc_create_vport_work_array(phba); 5955 if (vports) { 5956 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5957 i++) { 5958 if ((!(vports[i]->fc_flag & 5959 FC_VPORT_CVL_RCVD)) && 5960 (vports[i]->port_state > LPFC_FDISC)) { 5961 active_vlink_present = 1; 5962 break; 5963 } 5964 } 5965 lpfc_destroy_vport_work_array(phba, vports); 5966 } 5967 5968 /* 5969 * Don't re-instantiate if vport is marked for deletion. 5970 * If we are here first then vport_delete is going to wait 5971 * for discovery to complete. 5972 */ 5973 if (!(vport->load_flag & FC_UNLOADING) && 5974 active_vlink_present) { 5975 /* 5976 * If there are other active VLinks present, 5977 * re-instantiate the Vlink using FDISC. 5978 */ 5979 mod_timer(&ndlp->nlp_delayfunc, 5980 jiffies + msecs_to_jiffies(1000)); 5981 spin_lock_irq(&ndlp->lock); 5982 ndlp->nlp_flag |= NLP_DELAY_TMO; 5983 spin_unlock_irq(&ndlp->lock); 5984 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5985 vport->port_state = LPFC_FDISC; 5986 } else { 5987 /* 5988 * Otherwise, we request port to rediscover 5989 * the entire FCF table for a fast recovery 5990 * from possible case that the current FCF 5991 * is no longer valid if we are not already 5992 * in the FCF failover process. 5993 */ 5994 spin_lock_irq(&phba->hbalock); 5995 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5996 spin_unlock_irq(&phba->hbalock); 5997 break; 5998 } 5999 /* Mark the fast failover process in progress */ 6000 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6001 spin_unlock_irq(&phba->hbalock); 6002 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6003 LOG_DISCOVERY, 6004 "2773 Start FCF failover per CVL, " 6005 "evt_tag:x%x\n", acqe_fip->event_tag); 6006 rc = lpfc_sli4_redisc_fcf_table(phba); 6007 if (rc) { 6008 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6009 LOG_TRACE_EVENT, 6010 "2774 Issue FCF rediscover " 6011 "mailbox command failed, " 6012 "through to CVL event\n"); 6013 spin_lock_irq(&phba->hbalock); 6014 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6015 spin_unlock_irq(&phba->hbalock); 6016 /* 6017 * Last resort will be re-try on the 6018 * the current registered FCF entry. 6019 */ 6020 lpfc_retry_pport_discovery(phba); 6021 } else 6022 /* 6023 * Reset FCF roundrobin bmask for new 6024 * discovery. 6025 */ 6026 lpfc_sli4_clear_fcf_rr_bmask(phba); 6027 } 6028 break; 6029 default: 6030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6031 "0288 Unknown FCoE event type 0x%x event tag " 6032 "0x%x\n", event_type, acqe_fip->event_tag); 6033 break; 6034 } 6035 } 6036 6037 /** 6038 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6039 * @phba: pointer to lpfc hba data structure. 6040 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6041 * 6042 * This routine is to handle the SLI4 asynchronous dcbx event. 6043 **/ 6044 static void 6045 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6046 struct lpfc_acqe_dcbx *acqe_dcbx) 6047 { 6048 phba->fc_eventTag = acqe_dcbx->event_tag; 6049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6050 "0290 The SLI4 DCBX asynchronous event is not " 6051 "handled yet\n"); 6052 } 6053 6054 /** 6055 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6056 * @phba: pointer to lpfc hba data structure. 6057 * @acqe_grp5: pointer to the async grp5 completion queue entry. 6058 * 6059 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 6060 * is an asynchronous notified of a logical link speed change. The Port 6061 * reports the logical link speed in units of 10Mbps. 6062 **/ 6063 static void 6064 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 6065 struct lpfc_acqe_grp5 *acqe_grp5) 6066 { 6067 uint16_t prev_ll_spd; 6068 6069 phba->fc_eventTag = acqe_grp5->event_tag; 6070 phba->fcoe_eventtag = acqe_grp5->event_tag; 6071 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 6072 phba->sli4_hba.link_state.logical_speed = 6073 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 6074 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6075 "2789 GRP5 Async Event: Updating logical link speed " 6076 "from %dMbps to %dMbps\n", prev_ll_spd, 6077 phba->sli4_hba.link_state.logical_speed); 6078 } 6079 6080 /** 6081 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 6082 * @phba: pointer to lpfc hba data structure. 6083 * 6084 * This routine is invoked by the worker thread to process all the pending 6085 * SLI4 asynchronous events. 6086 **/ 6087 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 6088 { 6089 struct lpfc_cq_event *cq_event; 6090 unsigned long iflags; 6091 6092 /* First, declare the async event has been handled */ 6093 spin_lock_irqsave(&phba->hbalock, iflags); 6094 phba->hba_flag &= ~ASYNC_EVENT; 6095 spin_unlock_irqrestore(&phba->hbalock, iflags); 6096 6097 /* Now, handle all the async events */ 6098 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 6099 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 6100 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 6101 cq_event, struct lpfc_cq_event, list); 6102 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 6103 iflags); 6104 6105 /* Process the asynchronous event */ 6106 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 6107 case LPFC_TRAILER_CODE_LINK: 6108 lpfc_sli4_async_link_evt(phba, 6109 &cq_event->cqe.acqe_link); 6110 break; 6111 case LPFC_TRAILER_CODE_FCOE: 6112 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 6113 break; 6114 case LPFC_TRAILER_CODE_DCBX: 6115 lpfc_sli4_async_dcbx_evt(phba, 6116 &cq_event->cqe.acqe_dcbx); 6117 break; 6118 case LPFC_TRAILER_CODE_GRP5: 6119 lpfc_sli4_async_grp5_evt(phba, 6120 &cq_event->cqe.acqe_grp5); 6121 break; 6122 case LPFC_TRAILER_CODE_FC: 6123 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 6124 break; 6125 case LPFC_TRAILER_CODE_SLI: 6126 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 6127 break; 6128 default: 6129 lpfc_printf_log(phba, KERN_ERR, 6130 LOG_TRACE_EVENT, 6131 "1804 Invalid asynchronous event code: " 6132 "x%x\n", bf_get(lpfc_trailer_code, 6133 &cq_event->cqe.mcqe_cmpl)); 6134 break; 6135 } 6136 6137 /* Free the completion event processed to the free pool */ 6138 lpfc_sli4_cq_event_release(phba, cq_event); 6139 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 6140 } 6141 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 6142 } 6143 6144 /** 6145 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 6146 * @phba: pointer to lpfc hba data structure. 6147 * 6148 * This routine is invoked by the worker thread to process FCF table 6149 * rediscovery pending completion event. 6150 **/ 6151 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 6152 { 6153 int rc; 6154 6155 spin_lock_irq(&phba->hbalock); 6156 /* Clear FCF rediscovery timeout event */ 6157 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 6158 /* Clear driver fast failover FCF record flag */ 6159 phba->fcf.failover_rec.flag = 0; 6160 /* Set state for FCF fast failover */ 6161 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 6162 spin_unlock_irq(&phba->hbalock); 6163 6164 /* Scan FCF table from the first entry to re-discover SAN */ 6165 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6166 "2777 Start post-quiescent FCF table scan\n"); 6167 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 6168 if (rc) 6169 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6170 "2747 Issue FCF scan read FCF mailbox " 6171 "command failed 0x%x\n", rc); 6172 } 6173 6174 /** 6175 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 6176 * @phba: pointer to lpfc hba data structure. 6177 * @dev_grp: The HBA PCI-Device group number. 6178 * 6179 * This routine is invoked to set up the per HBA PCI-Device group function 6180 * API jump table entries. 6181 * 6182 * Return: 0 if success, otherwise -ENODEV 6183 **/ 6184 int 6185 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6186 { 6187 int rc; 6188 6189 /* Set up lpfc PCI-device group */ 6190 phba->pci_dev_grp = dev_grp; 6191 6192 /* The LPFC_PCI_DEV_OC uses SLI4 */ 6193 if (dev_grp == LPFC_PCI_DEV_OC) 6194 phba->sli_rev = LPFC_SLI_REV4; 6195 6196 /* Set up device INIT API function jump table */ 6197 rc = lpfc_init_api_table_setup(phba, dev_grp); 6198 if (rc) 6199 return -ENODEV; 6200 /* Set up SCSI API function jump table */ 6201 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 6202 if (rc) 6203 return -ENODEV; 6204 /* Set up SLI API function jump table */ 6205 rc = lpfc_sli_api_table_setup(phba, dev_grp); 6206 if (rc) 6207 return -ENODEV; 6208 /* Set up MBOX API function jump table */ 6209 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 6210 if (rc) 6211 return -ENODEV; 6212 6213 return 0; 6214 } 6215 6216 /** 6217 * lpfc_log_intr_mode - Log the active interrupt mode 6218 * @phba: pointer to lpfc hba data structure. 6219 * @intr_mode: active interrupt mode adopted. 6220 * 6221 * This routine it invoked to log the currently used active interrupt mode 6222 * to the device. 6223 **/ 6224 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6225 { 6226 switch (intr_mode) { 6227 case 0: 6228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6229 "0470 Enable INTx interrupt mode.\n"); 6230 break; 6231 case 1: 6232 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6233 "0481 Enabled MSI interrupt mode.\n"); 6234 break; 6235 case 2: 6236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6237 "0480 Enabled MSI-X interrupt mode.\n"); 6238 break; 6239 default: 6240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6241 "0482 Illegal interrupt mode.\n"); 6242 break; 6243 } 6244 return; 6245 } 6246 6247 /** 6248 * lpfc_enable_pci_dev - Enable a generic PCI device. 6249 * @phba: pointer to lpfc hba data structure. 6250 * 6251 * This routine is invoked to enable the PCI device that is common to all 6252 * PCI devices. 6253 * 6254 * Return codes 6255 * 0 - successful 6256 * other values - error 6257 **/ 6258 static int 6259 lpfc_enable_pci_dev(struct lpfc_hba *phba) 6260 { 6261 struct pci_dev *pdev; 6262 6263 /* Obtain PCI device reference */ 6264 if (!phba->pcidev) 6265 goto out_error; 6266 else 6267 pdev = phba->pcidev; 6268 /* Enable PCI device */ 6269 if (pci_enable_device_mem(pdev)) 6270 goto out_error; 6271 /* Request PCI resource for the device */ 6272 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6273 goto out_disable_device; 6274 /* Set up device as PCI master and save state for EEH */ 6275 pci_set_master(pdev); 6276 pci_try_set_mwi(pdev); 6277 pci_save_state(pdev); 6278 6279 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6280 if (pci_is_pcie(pdev)) 6281 pdev->needs_freset = 1; 6282 6283 return 0; 6284 6285 out_disable_device: 6286 pci_disable_device(pdev); 6287 out_error: 6288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6289 "1401 Failed to enable pci device\n"); 6290 return -ENODEV; 6291 } 6292 6293 /** 6294 * lpfc_disable_pci_dev - Disable a generic PCI device. 6295 * @phba: pointer to lpfc hba data structure. 6296 * 6297 * This routine is invoked to disable the PCI device that is common to all 6298 * PCI devices. 6299 **/ 6300 static void 6301 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6302 { 6303 struct pci_dev *pdev; 6304 6305 /* Obtain PCI device reference */ 6306 if (!phba->pcidev) 6307 return; 6308 else 6309 pdev = phba->pcidev; 6310 /* Release PCI resource and disable PCI device */ 6311 pci_release_mem_regions(pdev); 6312 pci_disable_device(pdev); 6313 6314 return; 6315 } 6316 6317 /** 6318 * lpfc_reset_hba - Reset a hba 6319 * @phba: pointer to lpfc hba data structure. 6320 * 6321 * This routine is invoked to reset a hba device. It brings the HBA 6322 * offline, performs a board restart, and then brings the board back 6323 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6324 * on outstanding mailbox commands. 6325 **/ 6326 void 6327 lpfc_reset_hba(struct lpfc_hba *phba) 6328 { 6329 /* If resets are disabled then set error state and return. */ 6330 if (!phba->cfg_enable_hba_reset) { 6331 phba->link_state = LPFC_HBA_ERROR; 6332 return; 6333 } 6334 6335 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 6336 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 6337 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6338 } else { 6339 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6340 lpfc_sli_flush_io_rings(phba); 6341 } 6342 lpfc_offline(phba); 6343 lpfc_sli_brdrestart(phba); 6344 lpfc_online(phba); 6345 lpfc_unblock_mgmt_io(phba); 6346 } 6347 6348 /** 6349 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6350 * @phba: pointer to lpfc hba data structure. 6351 * 6352 * This function enables the PCI SR-IOV virtual functions to a physical 6353 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6354 * enable the number of virtual functions to the physical function. As 6355 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6356 * API call does not considered as an error condition for most of the device. 6357 **/ 6358 uint16_t 6359 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6360 { 6361 struct pci_dev *pdev = phba->pcidev; 6362 uint16_t nr_virtfn; 6363 int pos; 6364 6365 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6366 if (pos == 0) 6367 return 0; 6368 6369 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6370 return nr_virtfn; 6371 } 6372 6373 /** 6374 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6375 * @phba: pointer to lpfc hba data structure. 6376 * @nr_vfn: number of virtual functions to be enabled. 6377 * 6378 * This function enables the PCI SR-IOV virtual functions to a physical 6379 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6380 * enable the number of virtual functions to the physical function. As 6381 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6382 * API call does not considered as an error condition for most of the device. 6383 **/ 6384 int 6385 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6386 { 6387 struct pci_dev *pdev = phba->pcidev; 6388 uint16_t max_nr_vfn; 6389 int rc; 6390 6391 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6392 if (nr_vfn > max_nr_vfn) { 6393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6394 "3057 Requested vfs (%d) greater than " 6395 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6396 return -EINVAL; 6397 } 6398 6399 rc = pci_enable_sriov(pdev, nr_vfn); 6400 if (rc) { 6401 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6402 "2806 Failed to enable sriov on this device " 6403 "with vfn number nr_vf:%d, rc:%d\n", 6404 nr_vfn, rc); 6405 } else 6406 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6407 "2807 Successful enable sriov on this device " 6408 "with vfn number nr_vf:%d\n", nr_vfn); 6409 return rc; 6410 } 6411 6412 /** 6413 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6414 * @phba: pointer to lpfc hba data structure. 6415 * 6416 * This routine is invoked to set up the driver internal resources before the 6417 * device specific resource setup to support the HBA device it attached to. 6418 * 6419 * Return codes 6420 * 0 - successful 6421 * other values - error 6422 **/ 6423 static int 6424 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6425 { 6426 struct lpfc_sli *psli = &phba->sli; 6427 6428 /* 6429 * Driver resources common to all SLI revisions 6430 */ 6431 atomic_set(&phba->fast_event_count, 0); 6432 atomic_set(&phba->dbg_log_idx, 0); 6433 atomic_set(&phba->dbg_log_cnt, 0); 6434 atomic_set(&phba->dbg_log_dmping, 0); 6435 spin_lock_init(&phba->hbalock); 6436 6437 /* Initialize port_list spinlock */ 6438 spin_lock_init(&phba->port_list_lock); 6439 INIT_LIST_HEAD(&phba->port_list); 6440 6441 INIT_LIST_HEAD(&phba->work_list); 6442 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6443 6444 /* Initialize the wait queue head for the kernel thread */ 6445 init_waitqueue_head(&phba->work_waitq); 6446 6447 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6448 "1403 Protocols supported %s %s %s\n", 6449 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6450 "SCSI" : " "), 6451 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6452 "NVME" : " "), 6453 (phba->nvmet_support ? "NVMET" : " ")); 6454 6455 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6456 spin_lock_init(&phba->scsi_buf_list_get_lock); 6457 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6458 spin_lock_init(&phba->scsi_buf_list_put_lock); 6459 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6460 6461 /* Initialize the fabric iocb list */ 6462 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6463 6464 /* Initialize list to save ELS buffers */ 6465 INIT_LIST_HEAD(&phba->elsbuf); 6466 6467 /* Initialize FCF connection rec list */ 6468 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6469 6470 /* Initialize OAS configuration list */ 6471 spin_lock_init(&phba->devicelock); 6472 INIT_LIST_HEAD(&phba->luns); 6473 6474 /* MBOX heartbeat timer */ 6475 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6476 /* Fabric block timer */ 6477 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6478 /* EA polling mode timer */ 6479 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6480 /* Heartbeat timer */ 6481 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6482 6483 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6484 6485 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 6486 lpfc_idle_stat_delay_work); 6487 6488 return 0; 6489 } 6490 6491 /** 6492 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6493 * @phba: pointer to lpfc hba data structure. 6494 * 6495 * This routine is invoked to set up the driver internal resources specific to 6496 * support the SLI-3 HBA device it attached to. 6497 * 6498 * Return codes 6499 * 0 - successful 6500 * other values - error 6501 **/ 6502 static int 6503 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6504 { 6505 int rc, entry_sz; 6506 6507 /* 6508 * Initialize timers used by driver 6509 */ 6510 6511 /* FCP polling mode timer */ 6512 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6513 6514 /* Host attention work mask setup */ 6515 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6516 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6517 6518 /* Get all the module params for configuring this host */ 6519 lpfc_get_cfgparam(phba); 6520 /* Set up phase-1 common device driver resources */ 6521 6522 rc = lpfc_setup_driver_resource_phase1(phba); 6523 if (rc) 6524 return -ENODEV; 6525 6526 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6527 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6528 /* check for menlo minimum sg count */ 6529 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6530 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6531 } 6532 6533 if (!phba->sli.sli3_ring) 6534 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6535 sizeof(struct lpfc_sli_ring), 6536 GFP_KERNEL); 6537 if (!phba->sli.sli3_ring) 6538 return -ENOMEM; 6539 6540 /* 6541 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6542 * used to create the sg_dma_buf_pool must be dynamically calculated. 6543 */ 6544 6545 if (phba->sli_rev == LPFC_SLI_REV4) 6546 entry_sz = sizeof(struct sli4_sge); 6547 else 6548 entry_sz = sizeof(struct ulp_bde64); 6549 6550 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6551 if (phba->cfg_enable_bg) { 6552 /* 6553 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6554 * the FCP rsp, and a BDE for each. Sice we have no control 6555 * over how many protection data segments the SCSI Layer 6556 * will hand us (ie: there could be one for every block 6557 * in the IO), we just allocate enough BDEs to accomidate 6558 * our max amount and we need to limit lpfc_sg_seg_cnt to 6559 * minimize the risk of running out. 6560 */ 6561 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6562 sizeof(struct fcp_rsp) + 6563 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6564 6565 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6566 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6567 6568 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6569 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6570 } else { 6571 /* 6572 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6573 * the FCP rsp, a BDE for each, and a BDE for up to 6574 * cfg_sg_seg_cnt data segments. 6575 */ 6576 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6577 sizeof(struct fcp_rsp) + 6578 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6579 6580 /* Total BDEs in BPL for scsi_sg_list */ 6581 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6582 } 6583 6584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6585 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6586 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6587 phba->cfg_total_seg_cnt); 6588 6589 phba->max_vpi = LPFC_MAX_VPI; 6590 /* This will be set to correct value after config_port mbox */ 6591 phba->max_vports = 0; 6592 6593 /* 6594 * Initialize the SLI Layer to run with lpfc HBAs. 6595 */ 6596 lpfc_sli_setup(phba); 6597 lpfc_sli_queue_init(phba); 6598 6599 /* Allocate device driver memory */ 6600 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6601 return -ENOMEM; 6602 6603 phba->lpfc_sg_dma_buf_pool = 6604 dma_pool_create("lpfc_sg_dma_buf_pool", 6605 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 6606 BPL_ALIGN_SZ, 0); 6607 6608 if (!phba->lpfc_sg_dma_buf_pool) 6609 goto fail_free_mem; 6610 6611 phba->lpfc_cmd_rsp_buf_pool = 6612 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6613 &phba->pcidev->dev, 6614 sizeof(struct fcp_cmnd) + 6615 sizeof(struct fcp_rsp), 6616 BPL_ALIGN_SZ, 0); 6617 6618 if (!phba->lpfc_cmd_rsp_buf_pool) 6619 goto fail_free_dma_buf_pool; 6620 6621 /* 6622 * Enable sr-iov virtual functions if supported and configured 6623 * through the module parameter. 6624 */ 6625 if (phba->cfg_sriov_nr_virtfn > 0) { 6626 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6627 phba->cfg_sriov_nr_virtfn); 6628 if (rc) { 6629 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6630 "2808 Requested number of SR-IOV " 6631 "virtual functions (%d) is not " 6632 "supported\n", 6633 phba->cfg_sriov_nr_virtfn); 6634 phba->cfg_sriov_nr_virtfn = 0; 6635 } 6636 } 6637 6638 return 0; 6639 6640 fail_free_dma_buf_pool: 6641 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6642 phba->lpfc_sg_dma_buf_pool = NULL; 6643 fail_free_mem: 6644 lpfc_mem_free(phba); 6645 return -ENOMEM; 6646 } 6647 6648 /** 6649 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6650 * @phba: pointer to lpfc hba data structure. 6651 * 6652 * This routine is invoked to unset the driver internal resources set up 6653 * specific for supporting the SLI-3 HBA device it attached to. 6654 **/ 6655 static void 6656 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6657 { 6658 /* Free device driver memory allocated */ 6659 lpfc_mem_free_all(phba); 6660 6661 return; 6662 } 6663 6664 /** 6665 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6666 * @phba: pointer to lpfc hba data structure. 6667 * 6668 * This routine is invoked to set up the driver internal resources specific to 6669 * support the SLI-4 HBA device it attached to. 6670 * 6671 * Return codes 6672 * 0 - successful 6673 * other values - error 6674 **/ 6675 static int 6676 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6677 { 6678 LPFC_MBOXQ_t *mboxq; 6679 MAILBOX_t *mb; 6680 int rc, i, max_buf_size; 6681 int longs; 6682 int extra; 6683 uint64_t wwn; 6684 u32 if_type; 6685 u32 if_fam; 6686 6687 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6688 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 6689 phba->sli4_hba.curr_disp_cpu = 0; 6690 6691 /* Get all the module params for configuring this host */ 6692 lpfc_get_cfgparam(phba); 6693 6694 /* Set up phase-1 common device driver resources */ 6695 rc = lpfc_setup_driver_resource_phase1(phba); 6696 if (rc) 6697 return -ENODEV; 6698 6699 /* Before proceed, wait for POST done and device ready */ 6700 rc = lpfc_sli4_post_status_check(phba); 6701 if (rc) 6702 return -ENODEV; 6703 6704 /* Allocate all driver workqueues here */ 6705 6706 /* The lpfc_wq workqueue for deferred irq use */ 6707 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6708 6709 /* 6710 * Initialize timers used by driver 6711 */ 6712 6713 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6714 6715 /* FCF rediscover timer */ 6716 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6717 6718 /* 6719 * Control structure for handling external multi-buffer mailbox 6720 * command pass-through. 6721 */ 6722 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6723 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6724 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6725 6726 phba->max_vpi = LPFC_MAX_VPI; 6727 6728 /* This will be set to correct value after the read_config mbox */ 6729 phba->max_vports = 0; 6730 6731 /* Program the default value of vlan_id and fc_map */ 6732 phba->valid_vlan = 0; 6733 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6734 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6735 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6736 6737 /* 6738 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6739 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6740 * The WQ create will allocate the ring. 6741 */ 6742 6743 /* Initialize buffer queue management fields */ 6744 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6745 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6746 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6747 6748 /* for VMID idle timeout if VMID is enabled */ 6749 if (lpfc_is_vmid_enabled(phba)) 6750 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 6751 6752 /* 6753 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6754 */ 6755 /* Initialize the Abort buffer list used by driver */ 6756 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 6757 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 6758 6759 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6760 /* Initialize the Abort nvme buffer list used by driver */ 6761 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6762 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6763 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6764 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6765 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6766 } 6767 6768 /* This abort list used by worker thread */ 6769 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6770 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6771 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 6772 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 6773 6774 /* 6775 * Initialize driver internal slow-path work queues 6776 */ 6777 6778 /* Driver internel slow-path CQ Event pool */ 6779 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6780 /* Response IOCB work queue list */ 6781 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6782 /* Asynchronous event CQ Event work queue list */ 6783 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6784 /* Slow-path XRI aborted CQ Event work queue list */ 6785 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6786 /* Receive queue CQ Event work queue list */ 6787 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6788 6789 /* Initialize extent block lists. */ 6790 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6791 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6792 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6793 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6794 6795 /* Initialize mboxq lists. If the early init routines fail 6796 * these lists need to be correctly initialized. 6797 */ 6798 INIT_LIST_HEAD(&phba->sli.mboxq); 6799 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6800 6801 /* initialize optic_state to 0xFF */ 6802 phba->sli4_hba.lnk_info.optic_state = 0xff; 6803 6804 /* Allocate device driver memory */ 6805 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6806 if (rc) 6807 return -ENOMEM; 6808 6809 /* IF Type 2 ports get initialized now. */ 6810 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6811 LPFC_SLI_INTF_IF_TYPE_2) { 6812 rc = lpfc_pci_function_reset(phba); 6813 if (unlikely(rc)) { 6814 rc = -ENODEV; 6815 goto out_free_mem; 6816 } 6817 phba->temp_sensor_support = 1; 6818 } 6819 6820 /* Create the bootstrap mailbox command */ 6821 rc = lpfc_create_bootstrap_mbox(phba); 6822 if (unlikely(rc)) 6823 goto out_free_mem; 6824 6825 /* Set up the host's endian order with the device. */ 6826 rc = lpfc_setup_endian_order(phba); 6827 if (unlikely(rc)) 6828 goto out_free_bsmbx; 6829 6830 /* Set up the hba's configuration parameters. */ 6831 rc = lpfc_sli4_read_config(phba); 6832 if (unlikely(rc)) 6833 goto out_free_bsmbx; 6834 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6835 if (unlikely(rc)) 6836 goto out_free_bsmbx; 6837 6838 /* IF Type 0 ports get initialized now. */ 6839 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6840 LPFC_SLI_INTF_IF_TYPE_0) { 6841 rc = lpfc_pci_function_reset(phba); 6842 if (unlikely(rc)) 6843 goto out_free_bsmbx; 6844 } 6845 6846 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6847 GFP_KERNEL); 6848 if (!mboxq) { 6849 rc = -ENOMEM; 6850 goto out_free_bsmbx; 6851 } 6852 6853 /* Check for NVMET being configured */ 6854 phba->nvmet_support = 0; 6855 if (lpfc_enable_nvmet_cnt) { 6856 6857 /* First get WWN of HBA instance */ 6858 lpfc_read_nv(phba, mboxq); 6859 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6860 if (rc != MBX_SUCCESS) { 6861 lpfc_printf_log(phba, KERN_ERR, 6862 LOG_TRACE_EVENT, 6863 "6016 Mailbox failed , mbxCmd x%x " 6864 "READ_NV, mbxStatus x%x\n", 6865 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6866 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6867 mempool_free(mboxq, phba->mbox_mem_pool); 6868 rc = -EIO; 6869 goto out_free_bsmbx; 6870 } 6871 mb = &mboxq->u.mb; 6872 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6873 sizeof(uint64_t)); 6874 wwn = cpu_to_be64(wwn); 6875 phba->sli4_hba.wwnn.u.name = wwn; 6876 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6877 sizeof(uint64_t)); 6878 /* wwn is WWPN of HBA instance */ 6879 wwn = cpu_to_be64(wwn); 6880 phba->sli4_hba.wwpn.u.name = wwn; 6881 6882 /* Check to see if it matches any module parameter */ 6883 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6884 if (wwn == lpfc_enable_nvmet[i]) { 6885 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6886 if (lpfc_nvmet_mem_alloc(phba)) 6887 break; 6888 6889 phba->nvmet_support = 1; /* a match */ 6890 6891 lpfc_printf_log(phba, KERN_ERR, 6892 LOG_TRACE_EVENT, 6893 "6017 NVME Target %016llx\n", 6894 wwn); 6895 #else 6896 lpfc_printf_log(phba, KERN_ERR, 6897 LOG_TRACE_EVENT, 6898 "6021 Can't enable NVME Target." 6899 " NVME_TARGET_FC infrastructure" 6900 " is not in kernel\n"); 6901 #endif 6902 /* Not supported for NVMET */ 6903 phba->cfg_xri_rebalancing = 0; 6904 if (phba->irq_chann_mode == NHT_MODE) { 6905 phba->cfg_irq_chann = 6906 phba->sli4_hba.num_present_cpu; 6907 phba->cfg_hdw_queue = 6908 phba->sli4_hba.num_present_cpu; 6909 phba->irq_chann_mode = NORMAL_MODE; 6910 } 6911 break; 6912 } 6913 } 6914 } 6915 6916 lpfc_nvme_mod_param_dep(phba); 6917 6918 /* 6919 * Get sli4 parameters that override parameters from Port capabilities. 6920 * If this call fails, it isn't critical unless the SLI4 parameters come 6921 * back in conflict. 6922 */ 6923 rc = lpfc_get_sli4_parameters(phba, mboxq); 6924 if (rc) { 6925 if_type = bf_get(lpfc_sli_intf_if_type, 6926 &phba->sli4_hba.sli_intf); 6927 if_fam = bf_get(lpfc_sli_intf_sli_family, 6928 &phba->sli4_hba.sli_intf); 6929 if (phba->sli4_hba.extents_in_use && 6930 phba->sli4_hba.rpi_hdrs_in_use) { 6931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6932 "2999 Unsupported SLI4 Parameters " 6933 "Extents and RPI headers enabled.\n"); 6934 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6935 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6936 mempool_free(mboxq, phba->mbox_mem_pool); 6937 rc = -EIO; 6938 goto out_free_bsmbx; 6939 } 6940 } 6941 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6942 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6943 mempool_free(mboxq, phba->mbox_mem_pool); 6944 rc = -EIO; 6945 goto out_free_bsmbx; 6946 } 6947 } 6948 6949 /* 6950 * 1 for cmd, 1 for rsp, NVME adds an extra one 6951 * for boundary conditions in its max_sgl_segment template. 6952 */ 6953 extra = 2; 6954 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6955 extra++; 6956 6957 /* 6958 * It doesn't matter what family our adapter is in, we are 6959 * limited to 2 Pages, 512 SGEs, for our SGL. 6960 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6961 */ 6962 max_buf_size = (2 * SLI4_PAGE_SIZE); 6963 6964 /* 6965 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6966 * used to create the sg_dma_buf_pool must be calculated. 6967 */ 6968 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6969 /* Both cfg_enable_bg and cfg_external_dif code paths */ 6970 6971 /* 6972 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6973 * the FCP rsp, and a SGE. Sice we have no control 6974 * over how many protection segments the SCSI Layer 6975 * will hand us (ie: there could be one for every block 6976 * in the IO), just allocate enough SGEs to accomidate 6977 * our max amount and we need to limit lpfc_sg_seg_cnt 6978 * to minimize the risk of running out. 6979 */ 6980 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6981 sizeof(struct fcp_rsp) + max_buf_size; 6982 6983 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6984 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6985 6986 /* 6987 * If supporting DIF, reduce the seg count for scsi to 6988 * allow room for the DIF sges. 6989 */ 6990 if (phba->cfg_enable_bg && 6991 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6992 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6993 else 6994 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6995 6996 } else { 6997 /* 6998 * The scsi_buf for a regular I/O holds the FCP cmnd, 6999 * the FCP rsp, a SGE for each, and a SGE for up to 7000 * cfg_sg_seg_cnt data segments. 7001 */ 7002 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7003 sizeof(struct fcp_rsp) + 7004 ((phba->cfg_sg_seg_cnt + extra) * 7005 sizeof(struct sli4_sge)); 7006 7007 /* Total SGEs for scsi_sg_list */ 7008 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 7009 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 7010 7011 /* 7012 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 7013 * need to post 1 page for the SGL. 7014 */ 7015 } 7016 7017 if (phba->cfg_xpsgl && !phba->nvmet_support) 7018 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 7019 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 7020 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 7021 else 7022 phba->cfg_sg_dma_buf_size = 7023 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 7024 7025 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 7026 sizeof(struct sli4_sge); 7027 7028 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 7029 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7030 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 7031 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 7032 "6300 Reducing NVME sg segment " 7033 "cnt to %d\n", 7034 LPFC_MAX_NVME_SEG_CNT); 7035 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 7036 } else 7037 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 7038 } 7039 7040 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7041 "9087 sg_seg_cnt:%d dmabuf_size:%d " 7042 "total:%d scsi:%d nvme:%d\n", 7043 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7044 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 7045 phba->cfg_nvme_seg_cnt); 7046 7047 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 7048 i = phba->cfg_sg_dma_buf_size; 7049 else 7050 i = SLI4_PAGE_SIZE; 7051 7052 phba->lpfc_sg_dma_buf_pool = 7053 dma_pool_create("lpfc_sg_dma_buf_pool", 7054 &phba->pcidev->dev, 7055 phba->cfg_sg_dma_buf_size, 7056 i, 0); 7057 if (!phba->lpfc_sg_dma_buf_pool) 7058 goto out_free_bsmbx; 7059 7060 phba->lpfc_cmd_rsp_buf_pool = 7061 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7062 &phba->pcidev->dev, 7063 sizeof(struct fcp_cmnd) + 7064 sizeof(struct fcp_rsp), 7065 i, 0); 7066 if (!phba->lpfc_cmd_rsp_buf_pool) 7067 goto out_free_sg_dma_buf; 7068 7069 mempool_free(mboxq, phba->mbox_mem_pool); 7070 7071 /* Verify OAS is supported */ 7072 lpfc_sli4_oas_verify(phba); 7073 7074 /* Verify RAS support on adapter */ 7075 lpfc_sli4_ras_init(phba); 7076 7077 /* Verify all the SLI4 queues */ 7078 rc = lpfc_sli4_queue_verify(phba); 7079 if (rc) 7080 goto out_free_cmd_rsp_buf; 7081 7082 /* Create driver internal CQE event pool */ 7083 rc = lpfc_sli4_cq_event_pool_create(phba); 7084 if (rc) 7085 goto out_free_cmd_rsp_buf; 7086 7087 /* Initialize sgl lists per host */ 7088 lpfc_init_sgl_list(phba); 7089 7090 /* Allocate and initialize active sgl array */ 7091 rc = lpfc_init_active_sgl_array(phba); 7092 if (rc) { 7093 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7094 "1430 Failed to initialize sgl list.\n"); 7095 goto out_destroy_cq_event_pool; 7096 } 7097 rc = lpfc_sli4_init_rpi_hdrs(phba); 7098 if (rc) { 7099 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7100 "1432 Failed to initialize rpi headers.\n"); 7101 goto out_free_active_sgl; 7102 } 7103 7104 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 7105 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 7106 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 7107 GFP_KERNEL); 7108 if (!phba->fcf.fcf_rr_bmask) { 7109 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7110 "2759 Failed allocate memory for FCF round " 7111 "robin failover bmask\n"); 7112 rc = -ENOMEM; 7113 goto out_remove_rpi_hdrs; 7114 } 7115 7116 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 7117 sizeof(struct lpfc_hba_eq_hdl), 7118 GFP_KERNEL); 7119 if (!phba->sli4_hba.hba_eq_hdl) { 7120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7121 "2572 Failed allocate memory for " 7122 "fast-path per-EQ handle array\n"); 7123 rc = -ENOMEM; 7124 goto out_free_fcf_rr_bmask; 7125 } 7126 7127 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 7128 sizeof(struct lpfc_vector_map_info), 7129 GFP_KERNEL); 7130 if (!phba->sli4_hba.cpu_map) { 7131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7132 "3327 Failed allocate memory for msi-x " 7133 "interrupt vector mapping\n"); 7134 rc = -ENOMEM; 7135 goto out_free_hba_eq_hdl; 7136 } 7137 7138 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 7139 if (!phba->sli4_hba.eq_info) { 7140 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7141 "3321 Failed allocation for per_cpu stats\n"); 7142 rc = -ENOMEM; 7143 goto out_free_hba_cpu_map; 7144 } 7145 7146 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 7147 sizeof(*phba->sli4_hba.idle_stat), 7148 GFP_KERNEL); 7149 if (!phba->sli4_hba.idle_stat) { 7150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7151 "3390 Failed allocation for idle_stat\n"); 7152 rc = -ENOMEM; 7153 goto out_free_hba_eq_info; 7154 } 7155 7156 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7157 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 7158 if (!phba->sli4_hba.c_stat) { 7159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7160 "3332 Failed allocating per cpu hdwq stats\n"); 7161 rc = -ENOMEM; 7162 goto out_free_hba_idle_stat; 7163 } 7164 #endif 7165 7166 /* 7167 * Enable sr-iov virtual functions if supported and configured 7168 * through the module parameter. 7169 */ 7170 if (phba->cfg_sriov_nr_virtfn > 0) { 7171 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7172 phba->cfg_sriov_nr_virtfn); 7173 if (rc) { 7174 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7175 "3020 Requested number of SR-IOV " 7176 "virtual functions (%d) is not " 7177 "supported\n", 7178 phba->cfg_sriov_nr_virtfn); 7179 phba->cfg_sriov_nr_virtfn = 0; 7180 } 7181 } 7182 7183 return 0; 7184 7185 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7186 out_free_hba_idle_stat: 7187 kfree(phba->sli4_hba.idle_stat); 7188 #endif 7189 out_free_hba_eq_info: 7190 free_percpu(phba->sli4_hba.eq_info); 7191 out_free_hba_cpu_map: 7192 kfree(phba->sli4_hba.cpu_map); 7193 out_free_hba_eq_hdl: 7194 kfree(phba->sli4_hba.hba_eq_hdl); 7195 out_free_fcf_rr_bmask: 7196 kfree(phba->fcf.fcf_rr_bmask); 7197 out_remove_rpi_hdrs: 7198 lpfc_sli4_remove_rpi_hdrs(phba); 7199 out_free_active_sgl: 7200 lpfc_free_active_sgl(phba); 7201 out_destroy_cq_event_pool: 7202 lpfc_sli4_cq_event_pool_destroy(phba); 7203 out_free_cmd_rsp_buf: 7204 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 7205 phba->lpfc_cmd_rsp_buf_pool = NULL; 7206 out_free_sg_dma_buf: 7207 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7208 phba->lpfc_sg_dma_buf_pool = NULL; 7209 out_free_bsmbx: 7210 lpfc_destroy_bootstrap_mbox(phba); 7211 out_free_mem: 7212 lpfc_mem_free(phba); 7213 return rc; 7214 } 7215 7216 /** 7217 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 7218 * @phba: pointer to lpfc hba data structure. 7219 * 7220 * This routine is invoked to unset the driver internal resources set up 7221 * specific for supporting the SLI-4 HBA device it attached to. 7222 **/ 7223 static void 7224 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 7225 { 7226 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 7227 7228 free_percpu(phba->sli4_hba.eq_info); 7229 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7230 free_percpu(phba->sli4_hba.c_stat); 7231 #endif 7232 kfree(phba->sli4_hba.idle_stat); 7233 7234 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 7235 kfree(phba->sli4_hba.cpu_map); 7236 phba->sli4_hba.num_possible_cpu = 0; 7237 phba->sli4_hba.num_present_cpu = 0; 7238 phba->sli4_hba.curr_disp_cpu = 0; 7239 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 7240 7241 /* Free memory allocated for fast-path work queue handles */ 7242 kfree(phba->sli4_hba.hba_eq_hdl); 7243 7244 /* Free the allocated rpi headers. */ 7245 lpfc_sli4_remove_rpi_hdrs(phba); 7246 lpfc_sli4_remove_rpis(phba); 7247 7248 /* Free eligible FCF index bmask */ 7249 kfree(phba->fcf.fcf_rr_bmask); 7250 7251 /* Free the ELS sgl list */ 7252 lpfc_free_active_sgl(phba); 7253 lpfc_free_els_sgl_list(phba); 7254 lpfc_free_nvmet_sgl_list(phba); 7255 7256 /* Free the completion queue EQ event pool */ 7257 lpfc_sli4_cq_event_release_all(phba); 7258 lpfc_sli4_cq_event_pool_destroy(phba); 7259 7260 /* Release resource identifiers. */ 7261 lpfc_sli4_dealloc_resource_identifiers(phba); 7262 7263 /* Free the bsmbx region. */ 7264 lpfc_destroy_bootstrap_mbox(phba); 7265 7266 /* Free the SLI Layer memory with SLI4 HBAs */ 7267 lpfc_mem_free_all(phba); 7268 7269 /* Free the current connect table */ 7270 list_for_each_entry_safe(conn_entry, next_conn_entry, 7271 &phba->fcf_conn_rec_list, list) { 7272 list_del_init(&conn_entry->list); 7273 kfree(conn_entry); 7274 } 7275 7276 return; 7277 } 7278 7279 /** 7280 * lpfc_init_api_table_setup - Set up init api function jump table 7281 * @phba: The hba struct for which this call is being executed. 7282 * @dev_grp: The HBA PCI-Device group number. 7283 * 7284 * This routine sets up the device INIT interface API function jump table 7285 * in @phba struct. 7286 * 7287 * Returns: 0 - success, -ENODEV - failure. 7288 **/ 7289 int 7290 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7291 { 7292 phba->lpfc_hba_init_link = lpfc_hba_init_link; 7293 phba->lpfc_hba_down_link = lpfc_hba_down_link; 7294 phba->lpfc_selective_reset = lpfc_selective_reset; 7295 switch (dev_grp) { 7296 case LPFC_PCI_DEV_LP: 7297 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 7298 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 7299 phba->lpfc_stop_port = lpfc_stop_port_s3; 7300 break; 7301 case LPFC_PCI_DEV_OC: 7302 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 7303 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 7304 phba->lpfc_stop_port = lpfc_stop_port_s4; 7305 break; 7306 default: 7307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7308 "1431 Invalid HBA PCI-device group: 0x%x\n", 7309 dev_grp); 7310 return -ENODEV; 7311 } 7312 return 0; 7313 } 7314 7315 /** 7316 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 7317 * @phba: pointer to lpfc hba data structure. 7318 * 7319 * This routine is invoked to set up the driver internal resources after the 7320 * device specific resource setup to support the HBA device it attached to. 7321 * 7322 * Return codes 7323 * 0 - successful 7324 * other values - error 7325 **/ 7326 static int 7327 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 7328 { 7329 int error; 7330 7331 /* Startup the kernel thread for this host adapter. */ 7332 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7333 "lpfc_worker_%d", phba->brd_no); 7334 if (IS_ERR(phba->worker_thread)) { 7335 error = PTR_ERR(phba->worker_thread); 7336 return error; 7337 } 7338 7339 return 0; 7340 } 7341 7342 /** 7343 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7344 * @phba: pointer to lpfc hba data structure. 7345 * 7346 * This routine is invoked to unset the driver internal resources set up after 7347 * the device specific resource setup for supporting the HBA device it 7348 * attached to. 7349 **/ 7350 static void 7351 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7352 { 7353 if (phba->wq) { 7354 flush_workqueue(phba->wq); 7355 destroy_workqueue(phba->wq); 7356 phba->wq = NULL; 7357 } 7358 7359 /* Stop kernel worker thread */ 7360 if (phba->worker_thread) 7361 kthread_stop(phba->worker_thread); 7362 } 7363 7364 /** 7365 * lpfc_free_iocb_list - Free iocb list. 7366 * @phba: pointer to lpfc hba data structure. 7367 * 7368 * This routine is invoked to free the driver's IOCB list and memory. 7369 **/ 7370 void 7371 lpfc_free_iocb_list(struct lpfc_hba *phba) 7372 { 7373 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7374 7375 spin_lock_irq(&phba->hbalock); 7376 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7377 &phba->lpfc_iocb_list, list) { 7378 list_del(&iocbq_entry->list); 7379 kfree(iocbq_entry); 7380 phba->total_iocbq_bufs--; 7381 } 7382 spin_unlock_irq(&phba->hbalock); 7383 7384 return; 7385 } 7386 7387 /** 7388 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7389 * @phba: pointer to lpfc hba data structure. 7390 * @iocb_count: number of requested iocbs 7391 * 7392 * This routine is invoked to allocate and initizlize the driver's IOCB 7393 * list and set up the IOCB tag array accordingly. 7394 * 7395 * Return codes 7396 * 0 - successful 7397 * other values - error 7398 **/ 7399 int 7400 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7401 { 7402 struct lpfc_iocbq *iocbq_entry = NULL; 7403 uint16_t iotag; 7404 int i; 7405 7406 /* Initialize and populate the iocb list per host. */ 7407 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7408 for (i = 0; i < iocb_count; i++) { 7409 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7410 if (iocbq_entry == NULL) { 7411 printk(KERN_ERR "%s: only allocated %d iocbs of " 7412 "expected %d count. Unloading driver.\n", 7413 __func__, i, iocb_count); 7414 goto out_free_iocbq; 7415 } 7416 7417 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7418 if (iotag == 0) { 7419 kfree(iocbq_entry); 7420 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7421 "Unloading driver.\n", __func__); 7422 goto out_free_iocbq; 7423 } 7424 iocbq_entry->sli4_lxritag = NO_XRI; 7425 iocbq_entry->sli4_xritag = NO_XRI; 7426 7427 spin_lock_irq(&phba->hbalock); 7428 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7429 phba->total_iocbq_bufs++; 7430 spin_unlock_irq(&phba->hbalock); 7431 } 7432 7433 return 0; 7434 7435 out_free_iocbq: 7436 lpfc_free_iocb_list(phba); 7437 7438 return -ENOMEM; 7439 } 7440 7441 /** 7442 * lpfc_free_sgl_list - Free a given sgl list. 7443 * @phba: pointer to lpfc hba data structure. 7444 * @sglq_list: pointer to the head of sgl list. 7445 * 7446 * This routine is invoked to free a give sgl list and memory. 7447 **/ 7448 void 7449 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7450 { 7451 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7452 7453 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7454 list_del(&sglq_entry->list); 7455 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7456 kfree(sglq_entry); 7457 } 7458 } 7459 7460 /** 7461 * lpfc_free_els_sgl_list - Free els sgl list. 7462 * @phba: pointer to lpfc hba data structure. 7463 * 7464 * This routine is invoked to free the driver's els sgl list and memory. 7465 **/ 7466 static void 7467 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7468 { 7469 LIST_HEAD(sglq_list); 7470 7471 /* Retrieve all els sgls from driver list */ 7472 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 7473 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7474 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 7475 7476 /* Now free the sgl list */ 7477 lpfc_free_sgl_list(phba, &sglq_list); 7478 } 7479 7480 /** 7481 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7482 * @phba: pointer to lpfc hba data structure. 7483 * 7484 * This routine is invoked to free the driver's nvmet sgl list and memory. 7485 **/ 7486 static void 7487 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7488 { 7489 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7490 LIST_HEAD(sglq_list); 7491 7492 /* Retrieve all nvmet sgls from driver list */ 7493 spin_lock_irq(&phba->hbalock); 7494 spin_lock(&phba->sli4_hba.sgl_list_lock); 7495 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7496 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7497 spin_unlock_irq(&phba->hbalock); 7498 7499 /* Now free the sgl list */ 7500 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7501 list_del(&sglq_entry->list); 7502 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7503 kfree(sglq_entry); 7504 } 7505 7506 /* Update the nvmet_xri_cnt to reflect no current sgls. 7507 * The next initialization cycle sets the count and allocates 7508 * the sgls over again. 7509 */ 7510 phba->sli4_hba.nvmet_xri_cnt = 0; 7511 } 7512 7513 /** 7514 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7515 * @phba: pointer to lpfc hba data structure. 7516 * 7517 * This routine is invoked to allocate the driver's active sgl memory. 7518 * This array will hold the sglq_entry's for active IOs. 7519 **/ 7520 static int 7521 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7522 { 7523 int size; 7524 size = sizeof(struct lpfc_sglq *); 7525 size *= phba->sli4_hba.max_cfg_param.max_xri; 7526 7527 phba->sli4_hba.lpfc_sglq_active_list = 7528 kzalloc(size, GFP_KERNEL); 7529 if (!phba->sli4_hba.lpfc_sglq_active_list) 7530 return -ENOMEM; 7531 return 0; 7532 } 7533 7534 /** 7535 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7536 * @phba: pointer to lpfc hba data structure. 7537 * 7538 * This routine is invoked to walk through the array of active sglq entries 7539 * and free all of the resources. 7540 * This is just a place holder for now. 7541 **/ 7542 static void 7543 lpfc_free_active_sgl(struct lpfc_hba *phba) 7544 { 7545 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7546 } 7547 7548 /** 7549 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7550 * @phba: pointer to lpfc hba data structure. 7551 * 7552 * This routine is invoked to allocate and initizlize the driver's sgl 7553 * list and set up the sgl xritag tag array accordingly. 7554 * 7555 **/ 7556 static void 7557 lpfc_init_sgl_list(struct lpfc_hba *phba) 7558 { 7559 /* Initialize and populate the sglq list per host/VF. */ 7560 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7561 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7562 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7563 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7564 7565 /* els xri-sgl book keeping */ 7566 phba->sli4_hba.els_xri_cnt = 0; 7567 7568 /* nvme xri-buffer book keeping */ 7569 phba->sli4_hba.io_xri_cnt = 0; 7570 } 7571 7572 /** 7573 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7574 * @phba: pointer to lpfc hba data structure. 7575 * 7576 * This routine is invoked to post rpi header templates to the 7577 * port for those SLI4 ports that do not support extents. This routine 7578 * posts a PAGE_SIZE memory region to the port to hold up to 7579 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7580 * and should be called only when interrupts are disabled. 7581 * 7582 * Return codes 7583 * 0 - successful 7584 * -ERROR - otherwise. 7585 **/ 7586 int 7587 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7588 { 7589 int rc = 0; 7590 struct lpfc_rpi_hdr *rpi_hdr; 7591 7592 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7593 if (!phba->sli4_hba.rpi_hdrs_in_use) 7594 return rc; 7595 if (phba->sli4_hba.extents_in_use) 7596 return -EIO; 7597 7598 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7599 if (!rpi_hdr) { 7600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7601 "0391 Error during rpi post operation\n"); 7602 lpfc_sli4_remove_rpis(phba); 7603 rc = -ENODEV; 7604 } 7605 7606 return rc; 7607 } 7608 7609 /** 7610 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7611 * @phba: pointer to lpfc hba data structure. 7612 * 7613 * This routine is invoked to allocate a single 4KB memory region to 7614 * support rpis and stores them in the phba. This single region 7615 * provides support for up to 64 rpis. The region is used globally 7616 * by the device. 7617 * 7618 * Returns: 7619 * A valid rpi hdr on success. 7620 * A NULL pointer on any failure. 7621 **/ 7622 struct lpfc_rpi_hdr * 7623 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7624 { 7625 uint16_t rpi_limit, curr_rpi_range; 7626 struct lpfc_dmabuf *dmabuf; 7627 struct lpfc_rpi_hdr *rpi_hdr; 7628 7629 /* 7630 * If the SLI4 port supports extents, posting the rpi header isn't 7631 * required. Set the expected maximum count and let the actual value 7632 * get set when extents are fully allocated. 7633 */ 7634 if (!phba->sli4_hba.rpi_hdrs_in_use) 7635 return NULL; 7636 if (phba->sli4_hba.extents_in_use) 7637 return NULL; 7638 7639 /* The limit on the logical index is just the max_rpi count. */ 7640 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7641 7642 spin_lock_irq(&phba->hbalock); 7643 /* 7644 * Establish the starting RPI in this header block. The starting 7645 * rpi is normalized to a zero base because the physical rpi is 7646 * port based. 7647 */ 7648 curr_rpi_range = phba->sli4_hba.next_rpi; 7649 spin_unlock_irq(&phba->hbalock); 7650 7651 /* Reached full RPI range */ 7652 if (curr_rpi_range == rpi_limit) 7653 return NULL; 7654 7655 /* 7656 * First allocate the protocol header region for the port. The 7657 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7658 */ 7659 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7660 if (!dmabuf) 7661 return NULL; 7662 7663 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7664 LPFC_HDR_TEMPLATE_SIZE, 7665 &dmabuf->phys, GFP_KERNEL); 7666 if (!dmabuf->virt) { 7667 rpi_hdr = NULL; 7668 goto err_free_dmabuf; 7669 } 7670 7671 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7672 rpi_hdr = NULL; 7673 goto err_free_coherent; 7674 } 7675 7676 /* Save the rpi header data for cleanup later. */ 7677 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7678 if (!rpi_hdr) 7679 goto err_free_coherent; 7680 7681 rpi_hdr->dmabuf = dmabuf; 7682 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7683 rpi_hdr->page_count = 1; 7684 spin_lock_irq(&phba->hbalock); 7685 7686 /* The rpi_hdr stores the logical index only. */ 7687 rpi_hdr->start_rpi = curr_rpi_range; 7688 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7689 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7690 7691 spin_unlock_irq(&phba->hbalock); 7692 return rpi_hdr; 7693 7694 err_free_coherent: 7695 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7696 dmabuf->virt, dmabuf->phys); 7697 err_free_dmabuf: 7698 kfree(dmabuf); 7699 return NULL; 7700 } 7701 7702 /** 7703 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7704 * @phba: pointer to lpfc hba data structure. 7705 * 7706 * This routine is invoked to remove all memory resources allocated 7707 * to support rpis for SLI4 ports not supporting extents. This routine 7708 * presumes the caller has released all rpis consumed by fabric or port 7709 * logins and is prepared to have the header pages removed. 7710 **/ 7711 void 7712 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7713 { 7714 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7715 7716 if (!phba->sli4_hba.rpi_hdrs_in_use) 7717 goto exit; 7718 7719 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7720 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7721 list_del(&rpi_hdr->list); 7722 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7723 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7724 kfree(rpi_hdr->dmabuf); 7725 kfree(rpi_hdr); 7726 } 7727 exit: 7728 /* There are no rpis available to the port now. */ 7729 phba->sli4_hba.next_rpi = 0; 7730 } 7731 7732 /** 7733 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7734 * @pdev: pointer to pci device data structure. 7735 * 7736 * This routine is invoked to allocate the driver hba data structure for an 7737 * HBA device. If the allocation is successful, the phba reference to the 7738 * PCI device data structure is set. 7739 * 7740 * Return codes 7741 * pointer to @phba - successful 7742 * NULL - error 7743 **/ 7744 static struct lpfc_hba * 7745 lpfc_hba_alloc(struct pci_dev *pdev) 7746 { 7747 struct lpfc_hba *phba; 7748 7749 /* Allocate memory for HBA structure */ 7750 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7751 if (!phba) { 7752 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7753 return NULL; 7754 } 7755 7756 /* Set reference to PCI device in HBA structure */ 7757 phba->pcidev = pdev; 7758 7759 /* Assign an unused board number */ 7760 phba->brd_no = lpfc_get_instance(); 7761 if (phba->brd_no < 0) { 7762 kfree(phba); 7763 return NULL; 7764 } 7765 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7766 7767 spin_lock_init(&phba->ct_ev_lock); 7768 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7769 7770 return phba; 7771 } 7772 7773 /** 7774 * lpfc_hba_free - Free driver hba data structure with a device. 7775 * @phba: pointer to lpfc hba data structure. 7776 * 7777 * This routine is invoked to free the driver hba data structure with an 7778 * HBA device. 7779 **/ 7780 static void 7781 lpfc_hba_free(struct lpfc_hba *phba) 7782 { 7783 if (phba->sli_rev == LPFC_SLI_REV4) 7784 kfree(phba->sli4_hba.hdwq); 7785 7786 /* Release the driver assigned board number */ 7787 idr_remove(&lpfc_hba_index, phba->brd_no); 7788 7789 /* Free memory allocated with sli3 rings */ 7790 kfree(phba->sli.sli3_ring); 7791 phba->sli.sli3_ring = NULL; 7792 7793 kfree(phba); 7794 return; 7795 } 7796 7797 /** 7798 * lpfc_create_shost - Create hba physical port with associated scsi host. 7799 * @phba: pointer to lpfc hba data structure. 7800 * 7801 * This routine is invoked to create HBA physical port and associate a SCSI 7802 * host with it. 7803 * 7804 * Return codes 7805 * 0 - successful 7806 * other values - error 7807 **/ 7808 static int 7809 lpfc_create_shost(struct lpfc_hba *phba) 7810 { 7811 struct lpfc_vport *vport; 7812 struct Scsi_Host *shost; 7813 7814 /* Initialize HBA FC structure */ 7815 phba->fc_edtov = FF_DEF_EDTOV; 7816 phba->fc_ratov = FF_DEF_RATOV; 7817 phba->fc_altov = FF_DEF_ALTOV; 7818 phba->fc_arbtov = FF_DEF_ARBTOV; 7819 7820 atomic_set(&phba->sdev_cnt, 0); 7821 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7822 if (!vport) 7823 return -ENODEV; 7824 7825 shost = lpfc_shost_from_vport(vport); 7826 phba->pport = vport; 7827 7828 if (phba->nvmet_support) { 7829 /* Only 1 vport (pport) will support NVME target */ 7830 phba->targetport = NULL; 7831 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7832 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 7833 "6076 NVME Target Found\n"); 7834 } 7835 7836 lpfc_debugfs_initialize(vport); 7837 /* Put reference to SCSI host to driver's device private data */ 7838 pci_set_drvdata(phba->pcidev, shost); 7839 7840 /* 7841 * At this point we are fully registered with PSA. In addition, 7842 * any initial discovery should be completed. 7843 */ 7844 vport->load_flag |= FC_ALLOW_FDMI; 7845 if (phba->cfg_enable_SmartSAN || 7846 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7847 7848 /* Setup appropriate attribute masks */ 7849 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7850 if (phba->cfg_enable_SmartSAN) 7851 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7852 else 7853 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7854 } 7855 return 0; 7856 } 7857 7858 /** 7859 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7860 * @phba: pointer to lpfc hba data structure. 7861 * 7862 * This routine is invoked to destroy HBA physical port and the associated 7863 * SCSI host. 7864 **/ 7865 static void 7866 lpfc_destroy_shost(struct lpfc_hba *phba) 7867 { 7868 struct lpfc_vport *vport = phba->pport; 7869 7870 /* Destroy physical port that associated with the SCSI host */ 7871 destroy_port(vport); 7872 7873 return; 7874 } 7875 7876 /** 7877 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7878 * @phba: pointer to lpfc hba data structure. 7879 * @shost: the shost to be used to detect Block guard settings. 7880 * 7881 * This routine sets up the local Block guard protocol settings for @shost. 7882 * This routine also allocates memory for debugging bg buffers. 7883 **/ 7884 static void 7885 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7886 { 7887 uint32_t old_mask; 7888 uint32_t old_guard; 7889 7890 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7891 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7892 "1478 Registering BlockGuard with the " 7893 "SCSI layer\n"); 7894 7895 old_mask = phba->cfg_prot_mask; 7896 old_guard = phba->cfg_prot_guard; 7897 7898 /* Only allow supported values */ 7899 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7900 SHOST_DIX_TYPE0_PROTECTION | 7901 SHOST_DIX_TYPE1_PROTECTION); 7902 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7903 SHOST_DIX_GUARD_CRC); 7904 7905 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7906 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7907 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7908 7909 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7910 if ((old_mask != phba->cfg_prot_mask) || 7911 (old_guard != phba->cfg_prot_guard)) 7912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7913 "1475 Registering BlockGuard with the " 7914 "SCSI layer: mask %d guard %d\n", 7915 phba->cfg_prot_mask, 7916 phba->cfg_prot_guard); 7917 7918 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7919 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7920 } else 7921 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7922 "1479 Not Registering BlockGuard with the SCSI " 7923 "layer, Bad protection parameters: %d %d\n", 7924 old_mask, old_guard); 7925 } 7926 } 7927 7928 /** 7929 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7930 * @phba: pointer to lpfc hba data structure. 7931 * 7932 * This routine is invoked to perform all the necessary post initialization 7933 * setup for the device. 7934 **/ 7935 static void 7936 lpfc_post_init_setup(struct lpfc_hba *phba) 7937 { 7938 struct Scsi_Host *shost; 7939 struct lpfc_adapter_event_header adapter_event; 7940 7941 /* Get the default values for Model Name and Description */ 7942 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7943 7944 /* 7945 * hba setup may have changed the hba_queue_depth so we need to 7946 * adjust the value of can_queue. 7947 */ 7948 shost = pci_get_drvdata(phba->pcidev); 7949 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7950 7951 lpfc_host_attrib_init(shost); 7952 7953 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7954 spin_lock_irq(shost->host_lock); 7955 lpfc_poll_start_timer(phba); 7956 spin_unlock_irq(shost->host_lock); 7957 } 7958 7959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7960 "0428 Perform SCSI scan\n"); 7961 /* Send board arrival event to upper layer */ 7962 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7963 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7964 fc_host_post_vendor_event(shost, fc_get_event_number(), 7965 sizeof(adapter_event), 7966 (char *) &adapter_event, 7967 LPFC_NL_VENDOR_ID); 7968 return; 7969 } 7970 7971 /** 7972 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7973 * @phba: pointer to lpfc hba data structure. 7974 * 7975 * This routine is invoked to set up the PCI device memory space for device 7976 * with SLI-3 interface spec. 7977 * 7978 * Return codes 7979 * 0 - successful 7980 * other values - error 7981 **/ 7982 static int 7983 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7984 { 7985 struct pci_dev *pdev = phba->pcidev; 7986 unsigned long bar0map_len, bar2map_len; 7987 int i, hbq_count; 7988 void *ptr; 7989 int error; 7990 7991 if (!pdev) 7992 return -ENODEV; 7993 7994 /* Set the device DMA mask size */ 7995 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7996 if (error) 7997 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7998 if (error) 7999 return error; 8000 error = -ENODEV; 8001 8002 /* Get the bus address of Bar0 and Bar2 and the number of bytes 8003 * required by each mapping. 8004 */ 8005 phba->pci_bar0_map = pci_resource_start(pdev, 0); 8006 bar0map_len = pci_resource_len(pdev, 0); 8007 8008 phba->pci_bar2_map = pci_resource_start(pdev, 2); 8009 bar2map_len = pci_resource_len(pdev, 2); 8010 8011 /* Map HBA SLIM to a kernel virtual address. */ 8012 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 8013 if (!phba->slim_memmap_p) { 8014 dev_printk(KERN_ERR, &pdev->dev, 8015 "ioremap failed for SLIM memory.\n"); 8016 goto out; 8017 } 8018 8019 /* Map HBA Control Registers to a kernel virtual address. */ 8020 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 8021 if (!phba->ctrl_regs_memmap_p) { 8022 dev_printk(KERN_ERR, &pdev->dev, 8023 "ioremap failed for HBA control registers.\n"); 8024 goto out_iounmap_slim; 8025 } 8026 8027 /* Allocate memory for SLI-2 structures */ 8028 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8029 &phba->slim2p.phys, GFP_KERNEL); 8030 if (!phba->slim2p.virt) 8031 goto out_iounmap; 8032 8033 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 8034 phba->mbox_ext = (phba->slim2p.virt + 8035 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 8036 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 8037 phba->IOCBs = (phba->slim2p.virt + 8038 offsetof(struct lpfc_sli2_slim, IOCBs)); 8039 8040 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 8041 lpfc_sli_hbq_size(), 8042 &phba->hbqslimp.phys, 8043 GFP_KERNEL); 8044 if (!phba->hbqslimp.virt) 8045 goto out_free_slim; 8046 8047 hbq_count = lpfc_sli_hbq_count(); 8048 ptr = phba->hbqslimp.virt; 8049 for (i = 0; i < hbq_count; ++i) { 8050 phba->hbqs[i].hbq_virt = ptr; 8051 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 8052 ptr += (lpfc_hbq_defs[i]->entry_count * 8053 sizeof(struct lpfc_hbq_entry)); 8054 } 8055 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 8056 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 8057 8058 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 8059 8060 phba->MBslimaddr = phba->slim_memmap_p; 8061 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 8062 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 8063 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 8064 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 8065 8066 return 0; 8067 8068 out_free_slim: 8069 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8070 phba->slim2p.virt, phba->slim2p.phys); 8071 out_iounmap: 8072 iounmap(phba->ctrl_regs_memmap_p); 8073 out_iounmap_slim: 8074 iounmap(phba->slim_memmap_p); 8075 out: 8076 return error; 8077 } 8078 8079 /** 8080 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 8081 * @phba: pointer to lpfc hba data structure. 8082 * 8083 * This routine is invoked to unset the PCI device memory space for device 8084 * with SLI-3 interface spec. 8085 **/ 8086 static void 8087 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 8088 { 8089 struct pci_dev *pdev; 8090 8091 /* Obtain PCI device reference */ 8092 if (!phba->pcidev) 8093 return; 8094 else 8095 pdev = phba->pcidev; 8096 8097 /* Free coherent DMA memory allocated */ 8098 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8099 phba->hbqslimp.virt, phba->hbqslimp.phys); 8100 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8101 phba->slim2p.virt, phba->slim2p.phys); 8102 8103 /* I/O memory unmap */ 8104 iounmap(phba->ctrl_regs_memmap_p); 8105 iounmap(phba->slim_memmap_p); 8106 8107 return; 8108 } 8109 8110 /** 8111 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 8112 * @phba: pointer to lpfc hba data structure. 8113 * 8114 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 8115 * done and check status. 8116 * 8117 * Return 0 if successful, otherwise -ENODEV. 8118 **/ 8119 int 8120 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 8121 { 8122 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 8123 struct lpfc_register reg_data; 8124 int i, port_error = 0; 8125 uint32_t if_type; 8126 8127 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 8128 memset(®_data, 0, sizeof(reg_data)); 8129 if (!phba->sli4_hba.PSMPHRregaddr) 8130 return -ENODEV; 8131 8132 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 8133 for (i = 0; i < 3000; i++) { 8134 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 8135 &portsmphr_reg.word0) || 8136 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 8137 /* Port has a fatal POST error, break out */ 8138 port_error = -ENODEV; 8139 break; 8140 } 8141 if (LPFC_POST_STAGE_PORT_READY == 8142 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 8143 break; 8144 msleep(10); 8145 } 8146 8147 /* 8148 * If there was a port error during POST, then don't proceed with 8149 * other register reads as the data may not be valid. Just exit. 8150 */ 8151 if (port_error) { 8152 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8153 "1408 Port Failed POST - portsmphr=0x%x, " 8154 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 8155 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 8156 portsmphr_reg.word0, 8157 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 8158 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 8159 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 8160 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 8161 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 8162 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 8163 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 8164 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 8165 } else { 8166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8167 "2534 Device Info: SLIFamily=0x%x, " 8168 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 8169 "SLIHint_2=0x%x, FT=0x%x\n", 8170 bf_get(lpfc_sli_intf_sli_family, 8171 &phba->sli4_hba.sli_intf), 8172 bf_get(lpfc_sli_intf_slirev, 8173 &phba->sli4_hba.sli_intf), 8174 bf_get(lpfc_sli_intf_if_type, 8175 &phba->sli4_hba.sli_intf), 8176 bf_get(lpfc_sli_intf_sli_hint1, 8177 &phba->sli4_hba.sli_intf), 8178 bf_get(lpfc_sli_intf_sli_hint2, 8179 &phba->sli4_hba.sli_intf), 8180 bf_get(lpfc_sli_intf_func_type, 8181 &phba->sli4_hba.sli_intf)); 8182 /* 8183 * Check for other Port errors during the initialization 8184 * process. Fail the load if the port did not come up 8185 * correctly. 8186 */ 8187 if_type = bf_get(lpfc_sli_intf_if_type, 8188 &phba->sli4_hba.sli_intf); 8189 switch (if_type) { 8190 case LPFC_SLI_INTF_IF_TYPE_0: 8191 phba->sli4_hba.ue_mask_lo = 8192 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 8193 phba->sli4_hba.ue_mask_hi = 8194 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 8195 uerrlo_reg.word0 = 8196 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 8197 uerrhi_reg.word0 = 8198 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 8199 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 8200 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 8201 lpfc_printf_log(phba, KERN_ERR, 8202 LOG_TRACE_EVENT, 8203 "1422 Unrecoverable Error " 8204 "Detected during POST " 8205 "uerr_lo_reg=0x%x, " 8206 "uerr_hi_reg=0x%x, " 8207 "ue_mask_lo_reg=0x%x, " 8208 "ue_mask_hi_reg=0x%x\n", 8209 uerrlo_reg.word0, 8210 uerrhi_reg.word0, 8211 phba->sli4_hba.ue_mask_lo, 8212 phba->sli4_hba.ue_mask_hi); 8213 port_error = -ENODEV; 8214 } 8215 break; 8216 case LPFC_SLI_INTF_IF_TYPE_2: 8217 case LPFC_SLI_INTF_IF_TYPE_6: 8218 /* Final checks. The port status should be clean. */ 8219 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 8220 ®_data.word0) || 8221 (bf_get(lpfc_sliport_status_err, ®_data) && 8222 !bf_get(lpfc_sliport_status_rn, ®_data))) { 8223 phba->work_status[0] = 8224 readl(phba->sli4_hba.u.if_type2. 8225 ERR1regaddr); 8226 phba->work_status[1] = 8227 readl(phba->sli4_hba.u.if_type2. 8228 ERR2regaddr); 8229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8230 "2888 Unrecoverable port error " 8231 "following POST: port status reg " 8232 "0x%x, port_smphr reg 0x%x, " 8233 "error 1=0x%x, error 2=0x%x\n", 8234 reg_data.word0, 8235 portsmphr_reg.word0, 8236 phba->work_status[0], 8237 phba->work_status[1]); 8238 port_error = -ENODEV; 8239 } 8240 break; 8241 case LPFC_SLI_INTF_IF_TYPE_1: 8242 default: 8243 break; 8244 } 8245 } 8246 return port_error; 8247 } 8248 8249 /** 8250 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 8251 * @phba: pointer to lpfc hba data structure. 8252 * @if_type: The SLI4 interface type getting configured. 8253 * 8254 * This routine is invoked to set up SLI4 BAR0 PCI config space register 8255 * memory map. 8256 **/ 8257 static void 8258 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8259 { 8260 switch (if_type) { 8261 case LPFC_SLI_INTF_IF_TYPE_0: 8262 phba->sli4_hba.u.if_type0.UERRLOregaddr = 8263 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 8264 phba->sli4_hba.u.if_type0.UERRHIregaddr = 8265 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 8266 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 8267 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 8268 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 8269 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 8270 phba->sli4_hba.SLIINTFregaddr = 8271 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8272 break; 8273 case LPFC_SLI_INTF_IF_TYPE_2: 8274 phba->sli4_hba.u.if_type2.EQDregaddr = 8275 phba->sli4_hba.conf_regs_memmap_p + 8276 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8277 phba->sli4_hba.u.if_type2.ERR1regaddr = 8278 phba->sli4_hba.conf_regs_memmap_p + 8279 LPFC_CTL_PORT_ER1_OFFSET; 8280 phba->sli4_hba.u.if_type2.ERR2regaddr = 8281 phba->sli4_hba.conf_regs_memmap_p + 8282 LPFC_CTL_PORT_ER2_OFFSET; 8283 phba->sli4_hba.u.if_type2.CTRLregaddr = 8284 phba->sli4_hba.conf_regs_memmap_p + 8285 LPFC_CTL_PORT_CTL_OFFSET; 8286 phba->sli4_hba.u.if_type2.STATUSregaddr = 8287 phba->sli4_hba.conf_regs_memmap_p + 8288 LPFC_CTL_PORT_STA_OFFSET; 8289 phba->sli4_hba.SLIINTFregaddr = 8290 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8291 phba->sli4_hba.PSMPHRregaddr = 8292 phba->sli4_hba.conf_regs_memmap_p + 8293 LPFC_CTL_PORT_SEM_OFFSET; 8294 phba->sli4_hba.RQDBregaddr = 8295 phba->sli4_hba.conf_regs_memmap_p + 8296 LPFC_ULP0_RQ_DOORBELL; 8297 phba->sli4_hba.WQDBregaddr = 8298 phba->sli4_hba.conf_regs_memmap_p + 8299 LPFC_ULP0_WQ_DOORBELL; 8300 phba->sli4_hba.CQDBregaddr = 8301 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8302 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8303 phba->sli4_hba.MQDBregaddr = 8304 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8305 phba->sli4_hba.BMBXregaddr = 8306 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8307 break; 8308 case LPFC_SLI_INTF_IF_TYPE_6: 8309 phba->sli4_hba.u.if_type2.EQDregaddr = 8310 phba->sli4_hba.conf_regs_memmap_p + 8311 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8312 phba->sli4_hba.u.if_type2.ERR1regaddr = 8313 phba->sli4_hba.conf_regs_memmap_p + 8314 LPFC_CTL_PORT_ER1_OFFSET; 8315 phba->sli4_hba.u.if_type2.ERR2regaddr = 8316 phba->sli4_hba.conf_regs_memmap_p + 8317 LPFC_CTL_PORT_ER2_OFFSET; 8318 phba->sli4_hba.u.if_type2.CTRLregaddr = 8319 phba->sli4_hba.conf_regs_memmap_p + 8320 LPFC_CTL_PORT_CTL_OFFSET; 8321 phba->sli4_hba.u.if_type2.STATUSregaddr = 8322 phba->sli4_hba.conf_regs_memmap_p + 8323 LPFC_CTL_PORT_STA_OFFSET; 8324 phba->sli4_hba.PSMPHRregaddr = 8325 phba->sli4_hba.conf_regs_memmap_p + 8326 LPFC_CTL_PORT_SEM_OFFSET; 8327 phba->sli4_hba.BMBXregaddr = 8328 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8329 break; 8330 case LPFC_SLI_INTF_IF_TYPE_1: 8331 default: 8332 dev_printk(KERN_ERR, &phba->pcidev->dev, 8333 "FATAL - unsupported SLI4 interface type - %d\n", 8334 if_type); 8335 break; 8336 } 8337 } 8338 8339 /** 8340 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8341 * @phba: pointer to lpfc hba data structure. 8342 * @if_type: sli if type to operate on. 8343 * 8344 * This routine is invoked to set up SLI4 BAR1 register memory map. 8345 **/ 8346 static void 8347 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8348 { 8349 switch (if_type) { 8350 case LPFC_SLI_INTF_IF_TYPE_0: 8351 phba->sli4_hba.PSMPHRregaddr = 8352 phba->sli4_hba.ctrl_regs_memmap_p + 8353 LPFC_SLIPORT_IF0_SMPHR; 8354 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8355 LPFC_HST_ISR0; 8356 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8357 LPFC_HST_IMR0; 8358 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8359 LPFC_HST_ISCR0; 8360 break; 8361 case LPFC_SLI_INTF_IF_TYPE_6: 8362 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8363 LPFC_IF6_RQ_DOORBELL; 8364 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8365 LPFC_IF6_WQ_DOORBELL; 8366 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8367 LPFC_IF6_CQ_DOORBELL; 8368 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8369 LPFC_IF6_EQ_DOORBELL; 8370 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8371 LPFC_IF6_MQ_DOORBELL; 8372 break; 8373 case LPFC_SLI_INTF_IF_TYPE_2: 8374 case LPFC_SLI_INTF_IF_TYPE_1: 8375 default: 8376 dev_err(&phba->pcidev->dev, 8377 "FATAL - unsupported SLI4 interface type - %d\n", 8378 if_type); 8379 break; 8380 } 8381 } 8382 8383 /** 8384 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8385 * @phba: pointer to lpfc hba data structure. 8386 * @vf: virtual function number 8387 * 8388 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8389 * based on the given viftual function number, @vf. 8390 * 8391 * Return 0 if successful, otherwise -ENODEV. 8392 **/ 8393 static int 8394 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8395 { 8396 if (vf > LPFC_VIR_FUNC_MAX) 8397 return -ENODEV; 8398 8399 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8400 vf * LPFC_VFR_PAGE_SIZE + 8401 LPFC_ULP0_RQ_DOORBELL); 8402 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8403 vf * LPFC_VFR_PAGE_SIZE + 8404 LPFC_ULP0_WQ_DOORBELL); 8405 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8406 vf * LPFC_VFR_PAGE_SIZE + 8407 LPFC_EQCQ_DOORBELL); 8408 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8409 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8410 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8411 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8412 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8413 return 0; 8414 } 8415 8416 /** 8417 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8418 * @phba: pointer to lpfc hba data structure. 8419 * 8420 * This routine is invoked to create the bootstrap mailbox 8421 * region consistent with the SLI-4 interface spec. This 8422 * routine allocates all memory necessary to communicate 8423 * mailbox commands to the port and sets up all alignment 8424 * needs. No locks are expected to be held when calling 8425 * this routine. 8426 * 8427 * Return codes 8428 * 0 - successful 8429 * -ENOMEM - could not allocated memory. 8430 **/ 8431 static int 8432 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8433 { 8434 uint32_t bmbx_size; 8435 struct lpfc_dmabuf *dmabuf; 8436 struct dma_address *dma_address; 8437 uint32_t pa_addr; 8438 uint64_t phys_addr; 8439 8440 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8441 if (!dmabuf) 8442 return -ENOMEM; 8443 8444 /* 8445 * The bootstrap mailbox region is comprised of 2 parts 8446 * plus an alignment restriction of 16 bytes. 8447 */ 8448 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8449 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8450 &dmabuf->phys, GFP_KERNEL); 8451 if (!dmabuf->virt) { 8452 kfree(dmabuf); 8453 return -ENOMEM; 8454 } 8455 8456 /* 8457 * Initialize the bootstrap mailbox pointers now so that the register 8458 * operations are simple later. The mailbox dma address is required 8459 * to be 16-byte aligned. Also align the virtual memory as each 8460 * maibox is copied into the bmbx mailbox region before issuing the 8461 * command to the port. 8462 */ 8463 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8464 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8465 8466 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8467 LPFC_ALIGN_16_BYTE); 8468 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8469 LPFC_ALIGN_16_BYTE); 8470 8471 /* 8472 * Set the high and low physical addresses now. The SLI4 alignment 8473 * requirement is 16 bytes and the mailbox is posted to the port 8474 * as two 30-bit addresses. The other data is a bit marking whether 8475 * the 30-bit address is the high or low address. 8476 * Upcast bmbx aphys to 64bits so shift instruction compiles 8477 * clean on 32 bit machines. 8478 */ 8479 dma_address = &phba->sli4_hba.bmbx.dma_address; 8480 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8481 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8482 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8483 LPFC_BMBX_BIT1_ADDR_HI); 8484 8485 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8486 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8487 LPFC_BMBX_BIT1_ADDR_LO); 8488 return 0; 8489 } 8490 8491 /** 8492 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8493 * @phba: pointer to lpfc hba data structure. 8494 * 8495 * This routine is invoked to teardown the bootstrap mailbox 8496 * region and release all host resources. This routine requires 8497 * the caller to ensure all mailbox commands recovered, no 8498 * additional mailbox comands are sent, and interrupts are disabled 8499 * before calling this routine. 8500 * 8501 **/ 8502 static void 8503 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8504 { 8505 dma_free_coherent(&phba->pcidev->dev, 8506 phba->sli4_hba.bmbx.bmbx_size, 8507 phba->sli4_hba.bmbx.dmabuf->virt, 8508 phba->sli4_hba.bmbx.dmabuf->phys); 8509 8510 kfree(phba->sli4_hba.bmbx.dmabuf); 8511 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8512 } 8513 8514 static const char * const lpfc_topo_to_str[] = { 8515 "Loop then P2P", 8516 "Loopback", 8517 "P2P Only", 8518 "Unsupported", 8519 "Loop Only", 8520 "Unsupported", 8521 "P2P then Loop", 8522 }; 8523 8524 #define LINK_FLAGS_DEF 0x0 8525 #define LINK_FLAGS_P2P 0x1 8526 #define LINK_FLAGS_LOOP 0x2 8527 /** 8528 * lpfc_map_topology - Map the topology read from READ_CONFIG 8529 * @phba: pointer to lpfc hba data structure. 8530 * @rd_config: pointer to read config data 8531 * 8532 * This routine is invoked to map the topology values as read 8533 * from the read config mailbox command. If the persistent 8534 * topology feature is supported, the firmware will provide the 8535 * saved topology information to be used in INIT_LINK 8536 **/ 8537 static void 8538 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 8539 { 8540 u8 ptv, tf, pt; 8541 8542 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 8543 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 8544 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 8545 8546 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8547 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 8548 ptv, tf, pt); 8549 if (!ptv) { 8550 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8551 "2019 FW does not support persistent topology " 8552 "Using driver parameter defined value [%s]", 8553 lpfc_topo_to_str[phba->cfg_topology]); 8554 return; 8555 } 8556 /* FW supports persistent topology - override module parameter value */ 8557 phba->hba_flag |= HBA_PERSISTENT_TOPO; 8558 8559 /* if ASIC_GEN_NUM >= 0xC) */ 8560 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8561 LPFC_SLI_INTF_IF_TYPE_6) || 8562 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 8563 LPFC_SLI_INTF_FAMILY_G6)) { 8564 if (!tf) { 8565 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 8566 ? FLAGS_TOPOLOGY_MODE_LOOP 8567 : FLAGS_TOPOLOGY_MODE_PT_PT); 8568 } else { 8569 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 8570 } 8571 } else { /* G5 */ 8572 if (tf) { 8573 /* If topology failover set - pt is '0' or '1' */ 8574 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 8575 FLAGS_TOPOLOGY_MODE_LOOP_PT); 8576 } else { 8577 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 8578 ? FLAGS_TOPOLOGY_MODE_PT_PT 8579 : FLAGS_TOPOLOGY_MODE_LOOP); 8580 } 8581 } 8582 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 8583 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8584 "2020 Using persistent topology value [%s]", 8585 lpfc_topo_to_str[phba->cfg_topology]); 8586 } else { 8587 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8588 "2021 Invalid topology values from FW " 8589 "Using driver parameter defined value [%s]", 8590 lpfc_topo_to_str[phba->cfg_topology]); 8591 } 8592 } 8593 8594 /** 8595 * lpfc_sli4_read_config - Get the config parameters. 8596 * @phba: pointer to lpfc hba data structure. 8597 * 8598 * This routine is invoked to read the configuration parameters from the HBA. 8599 * The configuration parameters are used to set the base and maximum values 8600 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8601 * allocation for the port. 8602 * 8603 * Return codes 8604 * 0 - successful 8605 * -ENOMEM - No available memory 8606 * -EIO - The mailbox failed to complete successfully. 8607 **/ 8608 int 8609 lpfc_sli4_read_config(struct lpfc_hba *phba) 8610 { 8611 LPFC_MBOXQ_t *pmb; 8612 struct lpfc_mbx_read_config *rd_config; 8613 union lpfc_sli4_cfg_shdr *shdr; 8614 uint32_t shdr_status, shdr_add_status; 8615 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8616 struct lpfc_rsrc_desc_fcfcoe *desc; 8617 char *pdesc_0; 8618 uint16_t forced_link_speed; 8619 uint32_t if_type, qmin; 8620 int length, i, rc = 0, rc2; 8621 8622 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8623 if (!pmb) { 8624 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8625 "2011 Unable to allocate memory for issuing " 8626 "SLI_CONFIG_SPECIAL mailbox command\n"); 8627 return -ENOMEM; 8628 } 8629 8630 lpfc_read_config(phba, pmb); 8631 8632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8633 if (rc != MBX_SUCCESS) { 8634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8635 "2012 Mailbox failed , mbxCmd x%x " 8636 "READ_CONFIG, mbxStatus x%x\n", 8637 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8638 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8639 rc = -EIO; 8640 } else { 8641 rd_config = &pmb->u.mqe.un.rd_config; 8642 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8643 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8644 phba->sli4_hba.lnk_info.lnk_tp = 8645 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8646 phba->sli4_hba.lnk_info.lnk_no = 8647 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8648 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8649 "3081 lnk_type:%d, lnk_numb:%d\n", 8650 phba->sli4_hba.lnk_info.lnk_tp, 8651 phba->sli4_hba.lnk_info.lnk_no); 8652 } else 8653 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8654 "3082 Mailbox (x%x) returned ldv:x0\n", 8655 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8656 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8657 phba->bbcredit_support = 1; 8658 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8659 } 8660 8661 phba->sli4_hba.conf_trunk = 8662 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8663 phba->sli4_hba.extents_in_use = 8664 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8665 phba->sli4_hba.max_cfg_param.max_xri = 8666 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8667 /* Reduce resource usage in kdump environment */ 8668 if (is_kdump_kernel() && 8669 phba->sli4_hba.max_cfg_param.max_xri > 512) 8670 phba->sli4_hba.max_cfg_param.max_xri = 512; 8671 phba->sli4_hba.max_cfg_param.xri_base = 8672 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8673 phba->sli4_hba.max_cfg_param.max_vpi = 8674 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8675 /* Limit the max we support */ 8676 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8677 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8678 phba->sli4_hba.max_cfg_param.vpi_base = 8679 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8680 phba->sli4_hba.max_cfg_param.max_rpi = 8681 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8682 phba->sli4_hba.max_cfg_param.rpi_base = 8683 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8684 phba->sli4_hba.max_cfg_param.max_vfi = 8685 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8686 phba->sli4_hba.max_cfg_param.vfi_base = 8687 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8688 phba->sli4_hba.max_cfg_param.max_fcfi = 8689 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8690 phba->sli4_hba.max_cfg_param.max_eq = 8691 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8692 phba->sli4_hba.max_cfg_param.max_rq = 8693 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8694 phba->sli4_hba.max_cfg_param.max_wq = 8695 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8696 phba->sli4_hba.max_cfg_param.max_cq = 8697 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8698 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8699 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8700 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8701 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8702 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8703 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8704 phba->max_vports = phba->max_vpi; 8705 lpfc_map_topology(phba, rd_config); 8706 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8707 "2003 cfg params Extents? %d " 8708 "XRI(B:%d M:%d), " 8709 "VPI(B:%d M:%d) " 8710 "VFI(B:%d M:%d) " 8711 "RPI(B:%d M:%d) " 8712 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 8713 phba->sli4_hba.extents_in_use, 8714 phba->sli4_hba.max_cfg_param.xri_base, 8715 phba->sli4_hba.max_cfg_param.max_xri, 8716 phba->sli4_hba.max_cfg_param.vpi_base, 8717 phba->sli4_hba.max_cfg_param.max_vpi, 8718 phba->sli4_hba.max_cfg_param.vfi_base, 8719 phba->sli4_hba.max_cfg_param.max_vfi, 8720 phba->sli4_hba.max_cfg_param.rpi_base, 8721 phba->sli4_hba.max_cfg_param.max_rpi, 8722 phba->sli4_hba.max_cfg_param.max_fcfi, 8723 phba->sli4_hba.max_cfg_param.max_eq, 8724 phba->sli4_hba.max_cfg_param.max_cq, 8725 phba->sli4_hba.max_cfg_param.max_wq, 8726 phba->sli4_hba.max_cfg_param.max_rq, 8727 phba->lmt); 8728 8729 /* 8730 * Calculate queue resources based on how 8731 * many WQ/CQ/EQs are available. 8732 */ 8733 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8734 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8735 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8736 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8737 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8738 /* 8739 * Whats left after this can go toward NVME / FCP. 8740 * The minus 4 accounts for ELS, NVME LS, MBOX 8741 * plus one extra. When configured for 8742 * NVMET, FCP io channel WQs are not created. 8743 */ 8744 qmin -= 4; 8745 8746 /* Check to see if there is enough for NVME */ 8747 if ((phba->cfg_irq_chann > qmin) || 8748 (phba->cfg_hdw_queue > qmin)) { 8749 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8750 "2005 Reducing Queues - " 8751 "FW resource limitation: " 8752 "WQ %d CQ %d EQ %d: min %d: " 8753 "IRQ %d HDWQ %d\n", 8754 phba->sli4_hba.max_cfg_param.max_wq, 8755 phba->sli4_hba.max_cfg_param.max_cq, 8756 phba->sli4_hba.max_cfg_param.max_eq, 8757 qmin, phba->cfg_irq_chann, 8758 phba->cfg_hdw_queue); 8759 8760 if (phba->cfg_irq_chann > qmin) 8761 phba->cfg_irq_chann = qmin; 8762 if (phba->cfg_hdw_queue > qmin) 8763 phba->cfg_hdw_queue = qmin; 8764 } 8765 } 8766 8767 if (rc) 8768 goto read_cfg_out; 8769 8770 /* Update link speed if forced link speed is supported */ 8771 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8772 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8773 forced_link_speed = 8774 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8775 if (forced_link_speed) { 8776 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8777 8778 switch (forced_link_speed) { 8779 case LINK_SPEED_1G: 8780 phba->cfg_link_speed = 8781 LPFC_USER_LINK_SPEED_1G; 8782 break; 8783 case LINK_SPEED_2G: 8784 phba->cfg_link_speed = 8785 LPFC_USER_LINK_SPEED_2G; 8786 break; 8787 case LINK_SPEED_4G: 8788 phba->cfg_link_speed = 8789 LPFC_USER_LINK_SPEED_4G; 8790 break; 8791 case LINK_SPEED_8G: 8792 phba->cfg_link_speed = 8793 LPFC_USER_LINK_SPEED_8G; 8794 break; 8795 case LINK_SPEED_10G: 8796 phba->cfg_link_speed = 8797 LPFC_USER_LINK_SPEED_10G; 8798 break; 8799 case LINK_SPEED_16G: 8800 phba->cfg_link_speed = 8801 LPFC_USER_LINK_SPEED_16G; 8802 break; 8803 case LINK_SPEED_32G: 8804 phba->cfg_link_speed = 8805 LPFC_USER_LINK_SPEED_32G; 8806 break; 8807 case LINK_SPEED_64G: 8808 phba->cfg_link_speed = 8809 LPFC_USER_LINK_SPEED_64G; 8810 break; 8811 case 0xffff: 8812 phba->cfg_link_speed = 8813 LPFC_USER_LINK_SPEED_AUTO; 8814 break; 8815 default: 8816 lpfc_printf_log(phba, KERN_ERR, 8817 LOG_TRACE_EVENT, 8818 "0047 Unrecognized link " 8819 "speed : %d\n", 8820 forced_link_speed); 8821 phba->cfg_link_speed = 8822 LPFC_USER_LINK_SPEED_AUTO; 8823 } 8824 } 8825 } 8826 8827 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8828 length = phba->sli4_hba.max_cfg_param.max_xri - 8829 lpfc_sli4_get_els_iocb_cnt(phba); 8830 if (phba->cfg_hba_queue_depth > length) { 8831 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8832 "3361 HBA queue depth changed from %d to %d\n", 8833 phba->cfg_hba_queue_depth, length); 8834 phba->cfg_hba_queue_depth = length; 8835 } 8836 8837 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8838 LPFC_SLI_INTF_IF_TYPE_2) 8839 goto read_cfg_out; 8840 8841 /* get the pf# and vf# for SLI4 if_type 2 port */ 8842 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8843 sizeof(struct lpfc_sli4_cfg_mhdr)); 8844 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8845 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8846 length, LPFC_SLI4_MBX_EMBED); 8847 8848 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8849 shdr = (union lpfc_sli4_cfg_shdr *) 8850 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8851 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8852 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8853 if (rc2 || shdr_status || shdr_add_status) { 8854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8855 "3026 Mailbox failed , mbxCmd x%x " 8856 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8857 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8858 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8859 goto read_cfg_out; 8860 } 8861 8862 /* search for fc_fcoe resrouce descriptor */ 8863 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8864 8865 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8866 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8867 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8868 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8869 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8870 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8871 goto read_cfg_out; 8872 8873 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8874 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8875 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8876 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8877 phba->sli4_hba.iov.pf_number = 8878 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8879 phba->sli4_hba.iov.vf_number = 8880 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8881 break; 8882 } 8883 } 8884 8885 if (i < LPFC_RSRC_DESC_MAX_NUM) 8886 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8887 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8888 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8889 phba->sli4_hba.iov.vf_number); 8890 else 8891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8892 "3028 GET_FUNCTION_CONFIG: failed to find " 8893 "Resource Descriptor:x%x\n", 8894 LPFC_RSRC_DESC_TYPE_FCFCOE); 8895 8896 read_cfg_out: 8897 mempool_free(pmb, phba->mbox_mem_pool); 8898 return rc; 8899 } 8900 8901 /** 8902 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8903 * @phba: pointer to lpfc hba data structure. 8904 * 8905 * This routine is invoked to setup the port-side endian order when 8906 * the port if_type is 0. This routine has no function for other 8907 * if_types. 8908 * 8909 * Return codes 8910 * 0 - successful 8911 * -ENOMEM - No available memory 8912 * -EIO - The mailbox failed to complete successfully. 8913 **/ 8914 static int 8915 lpfc_setup_endian_order(struct lpfc_hba *phba) 8916 { 8917 LPFC_MBOXQ_t *mboxq; 8918 uint32_t if_type, rc = 0; 8919 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8920 HOST_ENDIAN_HIGH_WORD1}; 8921 8922 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8923 switch (if_type) { 8924 case LPFC_SLI_INTF_IF_TYPE_0: 8925 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8926 GFP_KERNEL); 8927 if (!mboxq) { 8928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8929 "0492 Unable to allocate memory for " 8930 "issuing SLI_CONFIG_SPECIAL mailbox " 8931 "command\n"); 8932 return -ENOMEM; 8933 } 8934 8935 /* 8936 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8937 * two words to contain special data values and no other data. 8938 */ 8939 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8940 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8941 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8942 if (rc != MBX_SUCCESS) { 8943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8944 "0493 SLI_CONFIG_SPECIAL mailbox " 8945 "failed with status x%x\n", 8946 rc); 8947 rc = -EIO; 8948 } 8949 mempool_free(mboxq, phba->mbox_mem_pool); 8950 break; 8951 case LPFC_SLI_INTF_IF_TYPE_6: 8952 case LPFC_SLI_INTF_IF_TYPE_2: 8953 case LPFC_SLI_INTF_IF_TYPE_1: 8954 default: 8955 break; 8956 } 8957 return rc; 8958 } 8959 8960 /** 8961 * lpfc_sli4_queue_verify - Verify and update EQ counts 8962 * @phba: pointer to lpfc hba data structure. 8963 * 8964 * This routine is invoked to check the user settable queue counts for EQs. 8965 * After this routine is called the counts will be set to valid values that 8966 * adhere to the constraints of the system's interrupt vectors and the port's 8967 * queue resources. 8968 * 8969 * Return codes 8970 * 0 - successful 8971 * -ENOMEM - No available memory 8972 **/ 8973 static int 8974 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8975 { 8976 /* 8977 * Sanity check for configured queue parameters against the run-time 8978 * device parameters 8979 */ 8980 8981 if (phba->nvmet_support) { 8982 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 8983 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 8984 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8985 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8986 } 8987 8988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8989 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8990 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8991 phba->cfg_nvmet_mrq); 8992 8993 /* Get EQ depth from module parameter, fake the default for now */ 8994 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8995 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8996 8997 /* Get CQ depth from module parameter, fake the default for now */ 8998 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8999 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 9000 return 0; 9001 } 9002 9003 static int 9004 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 9005 { 9006 struct lpfc_queue *qdesc; 9007 u32 wqesize; 9008 int cpu; 9009 9010 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 9011 /* Create Fast Path IO CQs */ 9012 if (phba->enab_exp_wqcq_pages) 9013 /* Increase the CQ size when WQEs contain an embedded cdb */ 9014 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 9015 phba->sli4_hba.cq_esize, 9016 LPFC_CQE_EXP_COUNT, cpu); 9017 9018 else 9019 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9020 phba->sli4_hba.cq_esize, 9021 phba->sli4_hba.cq_ecount, cpu); 9022 if (!qdesc) { 9023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9024 "0499 Failed allocate fast-path IO CQ (%d)\n", 9025 idx); 9026 return 1; 9027 } 9028 qdesc->qe_valid = 1; 9029 qdesc->hdwq = idx; 9030 qdesc->chann = cpu; 9031 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 9032 9033 /* Create Fast Path IO WQs */ 9034 if (phba->enab_exp_wqcq_pages) { 9035 /* Increase the WQ size when WQEs contain an embedded cdb */ 9036 wqesize = (phba->fcp_embed_io) ? 9037 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 9038 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 9039 wqesize, 9040 LPFC_WQE_EXP_COUNT, cpu); 9041 } else 9042 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9043 phba->sli4_hba.wq_esize, 9044 phba->sli4_hba.wq_ecount, cpu); 9045 9046 if (!qdesc) { 9047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9048 "0503 Failed allocate fast-path IO WQ (%d)\n", 9049 idx); 9050 return 1; 9051 } 9052 qdesc->hdwq = idx; 9053 qdesc->chann = cpu; 9054 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 9055 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9056 return 0; 9057 } 9058 9059 /** 9060 * lpfc_sli4_queue_create - Create all the SLI4 queues 9061 * @phba: pointer to lpfc hba data structure. 9062 * 9063 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 9064 * operation. For each SLI4 queue type, the parameters such as queue entry 9065 * count (queue depth) shall be taken from the module parameter. For now, 9066 * we just use some constant number as place holder. 9067 * 9068 * Return codes 9069 * 0 - successful 9070 * -ENOMEM - No availble memory 9071 * -EIO - The mailbox failed to complete successfully. 9072 **/ 9073 int 9074 lpfc_sli4_queue_create(struct lpfc_hba *phba) 9075 { 9076 struct lpfc_queue *qdesc; 9077 int idx, cpu, eqcpu; 9078 struct lpfc_sli4_hdw_queue *qp; 9079 struct lpfc_vector_map_info *cpup; 9080 struct lpfc_vector_map_info *eqcpup; 9081 struct lpfc_eq_intr_info *eqi; 9082 9083 /* 9084 * Create HBA Record arrays. 9085 * Both NVME and FCP will share that same vectors / EQs 9086 */ 9087 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 9088 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 9089 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 9090 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 9091 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 9092 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 9093 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 9094 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 9095 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 9096 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 9097 9098 if (!phba->sli4_hba.hdwq) { 9099 phba->sli4_hba.hdwq = kcalloc( 9100 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 9101 GFP_KERNEL); 9102 if (!phba->sli4_hba.hdwq) { 9103 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9104 "6427 Failed allocate memory for " 9105 "fast-path Hardware Queue array\n"); 9106 goto out_error; 9107 } 9108 /* Prepare hardware queues to take IO buffers */ 9109 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9110 qp = &phba->sli4_hba.hdwq[idx]; 9111 spin_lock_init(&qp->io_buf_list_get_lock); 9112 spin_lock_init(&qp->io_buf_list_put_lock); 9113 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 9114 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 9115 qp->get_io_bufs = 0; 9116 qp->put_io_bufs = 0; 9117 qp->total_io_bufs = 0; 9118 spin_lock_init(&qp->abts_io_buf_list_lock); 9119 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 9120 qp->abts_scsi_io_bufs = 0; 9121 qp->abts_nvme_io_bufs = 0; 9122 INIT_LIST_HEAD(&qp->sgl_list); 9123 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 9124 spin_lock_init(&qp->hdwq_lock); 9125 } 9126 } 9127 9128 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9129 if (phba->nvmet_support) { 9130 phba->sli4_hba.nvmet_cqset = kcalloc( 9131 phba->cfg_nvmet_mrq, 9132 sizeof(struct lpfc_queue *), 9133 GFP_KERNEL); 9134 if (!phba->sli4_hba.nvmet_cqset) { 9135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9136 "3121 Fail allocate memory for " 9137 "fast-path CQ set array\n"); 9138 goto out_error; 9139 } 9140 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 9141 phba->cfg_nvmet_mrq, 9142 sizeof(struct lpfc_queue *), 9143 GFP_KERNEL); 9144 if (!phba->sli4_hba.nvmet_mrq_hdr) { 9145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9146 "3122 Fail allocate memory for " 9147 "fast-path RQ set hdr array\n"); 9148 goto out_error; 9149 } 9150 phba->sli4_hba.nvmet_mrq_data = kcalloc( 9151 phba->cfg_nvmet_mrq, 9152 sizeof(struct lpfc_queue *), 9153 GFP_KERNEL); 9154 if (!phba->sli4_hba.nvmet_mrq_data) { 9155 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9156 "3124 Fail allocate memory for " 9157 "fast-path RQ set data array\n"); 9158 goto out_error; 9159 } 9160 } 9161 } 9162 9163 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9164 9165 /* Create HBA Event Queues (EQs) */ 9166 for_each_present_cpu(cpu) { 9167 /* We only want to create 1 EQ per vector, even though 9168 * multiple CPUs might be using that vector. so only 9169 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 9170 */ 9171 cpup = &phba->sli4_hba.cpu_map[cpu]; 9172 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9173 continue; 9174 9175 /* Get a ptr to the Hardware Queue associated with this CPU */ 9176 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 9177 9178 /* Allocate an EQ */ 9179 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9180 phba->sli4_hba.eq_esize, 9181 phba->sli4_hba.eq_ecount, cpu); 9182 if (!qdesc) { 9183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9184 "0497 Failed allocate EQ (%d)\n", 9185 cpup->hdwq); 9186 goto out_error; 9187 } 9188 qdesc->qe_valid = 1; 9189 qdesc->hdwq = cpup->hdwq; 9190 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 9191 qdesc->last_cpu = qdesc->chann; 9192 9193 /* Save the allocated EQ in the Hardware Queue */ 9194 qp->hba_eq = qdesc; 9195 9196 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 9197 list_add(&qdesc->cpu_list, &eqi->list); 9198 } 9199 9200 /* Now we need to populate the other Hardware Queues, that share 9201 * an IRQ vector, with the associated EQ ptr. 9202 */ 9203 for_each_present_cpu(cpu) { 9204 cpup = &phba->sli4_hba.cpu_map[cpu]; 9205 9206 /* Check for EQ already allocated in previous loop */ 9207 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 9208 continue; 9209 9210 /* Check for multiple CPUs per hdwq */ 9211 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 9212 if (qp->hba_eq) 9213 continue; 9214 9215 /* We need to share an EQ for this hdwq */ 9216 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 9217 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 9218 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 9219 } 9220 9221 /* Allocate IO Path SLI4 CQ/WQs */ 9222 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9223 if (lpfc_alloc_io_wq_cq(phba, idx)) 9224 goto out_error; 9225 } 9226 9227 if (phba->nvmet_support) { 9228 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9229 cpu = lpfc_find_cpu_handle(phba, idx, 9230 LPFC_FIND_BY_HDWQ); 9231 qdesc = lpfc_sli4_queue_alloc(phba, 9232 LPFC_DEFAULT_PAGE_SIZE, 9233 phba->sli4_hba.cq_esize, 9234 phba->sli4_hba.cq_ecount, 9235 cpu); 9236 if (!qdesc) { 9237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9238 "3142 Failed allocate NVME " 9239 "CQ Set (%d)\n", idx); 9240 goto out_error; 9241 } 9242 qdesc->qe_valid = 1; 9243 qdesc->hdwq = idx; 9244 qdesc->chann = cpu; 9245 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 9246 } 9247 } 9248 9249 /* 9250 * Create Slow Path Completion Queues (CQs) 9251 */ 9252 9253 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 9254 /* Create slow-path Mailbox Command Complete Queue */ 9255 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9256 phba->sli4_hba.cq_esize, 9257 phba->sli4_hba.cq_ecount, cpu); 9258 if (!qdesc) { 9259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9260 "0500 Failed allocate slow-path mailbox CQ\n"); 9261 goto out_error; 9262 } 9263 qdesc->qe_valid = 1; 9264 phba->sli4_hba.mbx_cq = qdesc; 9265 9266 /* Create slow-path ELS Complete Queue */ 9267 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9268 phba->sli4_hba.cq_esize, 9269 phba->sli4_hba.cq_ecount, cpu); 9270 if (!qdesc) { 9271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9272 "0501 Failed allocate slow-path ELS CQ\n"); 9273 goto out_error; 9274 } 9275 qdesc->qe_valid = 1; 9276 qdesc->chann = cpu; 9277 phba->sli4_hba.els_cq = qdesc; 9278 9279 9280 /* 9281 * Create Slow Path Work Queues (WQs) 9282 */ 9283 9284 /* Create Mailbox Command Queue */ 9285 9286 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9287 phba->sli4_hba.mq_esize, 9288 phba->sli4_hba.mq_ecount, cpu); 9289 if (!qdesc) { 9290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9291 "0505 Failed allocate slow-path MQ\n"); 9292 goto out_error; 9293 } 9294 qdesc->chann = cpu; 9295 phba->sli4_hba.mbx_wq = qdesc; 9296 9297 /* 9298 * Create ELS Work Queues 9299 */ 9300 9301 /* Create slow-path ELS Work Queue */ 9302 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9303 phba->sli4_hba.wq_esize, 9304 phba->sli4_hba.wq_ecount, cpu); 9305 if (!qdesc) { 9306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9307 "0504 Failed allocate slow-path ELS WQ\n"); 9308 goto out_error; 9309 } 9310 qdesc->chann = cpu; 9311 phba->sli4_hba.els_wq = qdesc; 9312 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9313 9314 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9315 /* Create NVME LS Complete Queue */ 9316 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9317 phba->sli4_hba.cq_esize, 9318 phba->sli4_hba.cq_ecount, cpu); 9319 if (!qdesc) { 9320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9321 "6079 Failed allocate NVME LS CQ\n"); 9322 goto out_error; 9323 } 9324 qdesc->chann = cpu; 9325 qdesc->qe_valid = 1; 9326 phba->sli4_hba.nvmels_cq = qdesc; 9327 9328 /* Create NVME LS Work Queue */ 9329 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9330 phba->sli4_hba.wq_esize, 9331 phba->sli4_hba.wq_ecount, cpu); 9332 if (!qdesc) { 9333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9334 "6080 Failed allocate NVME LS WQ\n"); 9335 goto out_error; 9336 } 9337 qdesc->chann = cpu; 9338 phba->sli4_hba.nvmels_wq = qdesc; 9339 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9340 } 9341 9342 /* 9343 * Create Receive Queue (RQ) 9344 */ 9345 9346 /* Create Receive Queue for header */ 9347 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9348 phba->sli4_hba.rq_esize, 9349 phba->sli4_hba.rq_ecount, cpu); 9350 if (!qdesc) { 9351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9352 "0506 Failed allocate receive HRQ\n"); 9353 goto out_error; 9354 } 9355 phba->sli4_hba.hdr_rq = qdesc; 9356 9357 /* Create Receive Queue for data */ 9358 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9359 phba->sli4_hba.rq_esize, 9360 phba->sli4_hba.rq_ecount, cpu); 9361 if (!qdesc) { 9362 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9363 "0507 Failed allocate receive DRQ\n"); 9364 goto out_error; 9365 } 9366 phba->sli4_hba.dat_rq = qdesc; 9367 9368 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9369 phba->nvmet_support) { 9370 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9371 cpu = lpfc_find_cpu_handle(phba, idx, 9372 LPFC_FIND_BY_HDWQ); 9373 /* Create NVMET Receive Queue for header */ 9374 qdesc = lpfc_sli4_queue_alloc(phba, 9375 LPFC_DEFAULT_PAGE_SIZE, 9376 phba->sli4_hba.rq_esize, 9377 LPFC_NVMET_RQE_DEF_COUNT, 9378 cpu); 9379 if (!qdesc) { 9380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9381 "3146 Failed allocate " 9382 "receive HRQ\n"); 9383 goto out_error; 9384 } 9385 qdesc->hdwq = idx; 9386 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9387 9388 /* Only needed for header of RQ pair */ 9389 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9390 GFP_KERNEL, 9391 cpu_to_node(cpu)); 9392 if (qdesc->rqbp == NULL) { 9393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9394 "6131 Failed allocate " 9395 "Header RQBP\n"); 9396 goto out_error; 9397 } 9398 9399 /* Put list in known state in case driver load fails. */ 9400 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9401 9402 /* Create NVMET Receive Queue for data */ 9403 qdesc = lpfc_sli4_queue_alloc(phba, 9404 LPFC_DEFAULT_PAGE_SIZE, 9405 phba->sli4_hba.rq_esize, 9406 LPFC_NVMET_RQE_DEF_COUNT, 9407 cpu); 9408 if (!qdesc) { 9409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9410 "3156 Failed allocate " 9411 "receive DRQ\n"); 9412 goto out_error; 9413 } 9414 qdesc->hdwq = idx; 9415 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9416 } 9417 } 9418 9419 /* Clear NVME stats */ 9420 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9421 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9422 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9423 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9424 } 9425 } 9426 9427 /* Clear SCSI stats */ 9428 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9429 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9430 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9431 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9432 } 9433 } 9434 9435 return 0; 9436 9437 out_error: 9438 lpfc_sli4_queue_destroy(phba); 9439 return -ENOMEM; 9440 } 9441 9442 static inline void 9443 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9444 { 9445 if (*qp != NULL) { 9446 lpfc_sli4_queue_free(*qp); 9447 *qp = NULL; 9448 } 9449 } 9450 9451 static inline void 9452 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9453 { 9454 int idx; 9455 9456 if (*qs == NULL) 9457 return; 9458 9459 for (idx = 0; idx < max; idx++) 9460 __lpfc_sli4_release_queue(&(*qs)[idx]); 9461 9462 kfree(*qs); 9463 *qs = NULL; 9464 } 9465 9466 static inline void 9467 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9468 { 9469 struct lpfc_sli4_hdw_queue *hdwq; 9470 struct lpfc_queue *eq; 9471 uint32_t idx; 9472 9473 hdwq = phba->sli4_hba.hdwq; 9474 9475 /* Loop thru all Hardware Queues */ 9476 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9477 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9478 lpfc_sli4_queue_free(hdwq[idx].io_cq); 9479 lpfc_sli4_queue_free(hdwq[idx].io_wq); 9480 hdwq[idx].hba_eq = NULL; 9481 hdwq[idx].io_cq = NULL; 9482 hdwq[idx].io_wq = NULL; 9483 if (phba->cfg_xpsgl && !phba->nvmet_support) 9484 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 9485 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 9486 } 9487 /* Loop thru all IRQ vectors */ 9488 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9489 /* Free the EQ corresponding to the IRQ vector */ 9490 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9491 lpfc_sli4_queue_free(eq); 9492 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9493 } 9494 } 9495 9496 /** 9497 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9498 * @phba: pointer to lpfc hba data structure. 9499 * 9500 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9501 * operation. 9502 * 9503 * Return codes 9504 * 0 - successful 9505 * -ENOMEM - No available memory 9506 * -EIO - The mailbox failed to complete successfully. 9507 **/ 9508 void 9509 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9510 { 9511 /* 9512 * Set FREE_INIT before beginning to free the queues. 9513 * Wait until the users of queues to acknowledge to 9514 * release queues by clearing FREE_WAIT. 9515 */ 9516 spin_lock_irq(&phba->hbalock); 9517 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9518 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9519 spin_unlock_irq(&phba->hbalock); 9520 msleep(20); 9521 spin_lock_irq(&phba->hbalock); 9522 } 9523 spin_unlock_irq(&phba->hbalock); 9524 9525 lpfc_sli4_cleanup_poll_list(phba); 9526 9527 /* Release HBA eqs */ 9528 if (phba->sli4_hba.hdwq) 9529 lpfc_sli4_release_hdwq(phba); 9530 9531 if (phba->nvmet_support) { 9532 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9533 phba->cfg_nvmet_mrq); 9534 9535 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9536 phba->cfg_nvmet_mrq); 9537 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9538 phba->cfg_nvmet_mrq); 9539 } 9540 9541 /* Release mailbox command work queue */ 9542 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9543 9544 /* Release ELS work queue */ 9545 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9546 9547 /* Release ELS work queue */ 9548 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9549 9550 /* Release unsolicited receive queue */ 9551 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9552 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9553 9554 /* Release ELS complete queue */ 9555 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9556 9557 /* Release NVME LS complete queue */ 9558 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9559 9560 /* Release mailbox command complete queue */ 9561 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9562 9563 /* Everything on this list has been freed */ 9564 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9565 9566 /* Done with freeing the queues */ 9567 spin_lock_irq(&phba->hbalock); 9568 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9569 spin_unlock_irq(&phba->hbalock); 9570 } 9571 9572 int 9573 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9574 { 9575 struct lpfc_rqb *rqbp; 9576 struct lpfc_dmabuf *h_buf; 9577 struct rqb_dmabuf *rqb_buffer; 9578 9579 rqbp = rq->rqbp; 9580 while (!list_empty(&rqbp->rqb_buffer_list)) { 9581 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9582 struct lpfc_dmabuf, list); 9583 9584 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9585 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9586 rqbp->buffer_count--; 9587 } 9588 return 1; 9589 } 9590 9591 static int 9592 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9593 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9594 int qidx, uint32_t qtype) 9595 { 9596 struct lpfc_sli_ring *pring; 9597 int rc; 9598 9599 if (!eq || !cq || !wq) { 9600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9601 "6085 Fast-path %s (%d) not allocated\n", 9602 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9603 return -ENOMEM; 9604 } 9605 9606 /* create the Cq first */ 9607 rc = lpfc_cq_create(phba, cq, eq, 9608 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9609 if (rc) { 9610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9611 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9612 qidx, (uint32_t)rc); 9613 return rc; 9614 } 9615 9616 if (qtype != LPFC_MBOX) { 9617 /* Setup cq_map for fast lookup */ 9618 if (cq_map) 9619 *cq_map = cq->queue_id; 9620 9621 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9622 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9623 qidx, cq->queue_id, qidx, eq->queue_id); 9624 9625 /* create the wq */ 9626 rc = lpfc_wq_create(phba, wq, cq, qtype); 9627 if (rc) { 9628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9629 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9630 qidx, (uint32_t)rc); 9631 /* no need to tear down cq - caller will do so */ 9632 return rc; 9633 } 9634 9635 /* Bind this CQ/WQ to the NVME ring */ 9636 pring = wq->pring; 9637 pring->sli.sli4.wqp = (void *)wq; 9638 cq->pring = pring; 9639 9640 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9641 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9642 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9643 } else { 9644 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9645 if (rc) { 9646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9647 "0539 Failed setup of slow-path MQ: " 9648 "rc = 0x%x\n", rc); 9649 /* no need to tear down cq - caller will do so */ 9650 return rc; 9651 } 9652 9653 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9654 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9655 phba->sli4_hba.mbx_wq->queue_id, 9656 phba->sli4_hba.mbx_cq->queue_id); 9657 } 9658 9659 return 0; 9660 } 9661 9662 /** 9663 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9664 * @phba: pointer to lpfc hba data structure. 9665 * 9666 * This routine will populate the cq_lookup table by all 9667 * available CQ queue_id's. 9668 **/ 9669 static void 9670 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9671 { 9672 struct lpfc_queue *eq, *childq; 9673 int qidx; 9674 9675 memset(phba->sli4_hba.cq_lookup, 0, 9676 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9677 /* Loop thru all IRQ vectors */ 9678 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9679 /* Get the EQ corresponding to the IRQ vector */ 9680 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9681 if (!eq) 9682 continue; 9683 /* Loop through all CQs associated with that EQ */ 9684 list_for_each_entry(childq, &eq->child_list, list) { 9685 if (childq->queue_id > phba->sli4_hba.cq_max) 9686 continue; 9687 if (childq->subtype == LPFC_IO) 9688 phba->sli4_hba.cq_lookup[childq->queue_id] = 9689 childq; 9690 } 9691 } 9692 } 9693 9694 /** 9695 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9696 * @phba: pointer to lpfc hba data structure. 9697 * 9698 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9699 * operation. 9700 * 9701 * Return codes 9702 * 0 - successful 9703 * -ENOMEM - No available memory 9704 * -EIO - The mailbox failed to complete successfully. 9705 **/ 9706 int 9707 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9708 { 9709 uint32_t shdr_status, shdr_add_status; 9710 union lpfc_sli4_cfg_shdr *shdr; 9711 struct lpfc_vector_map_info *cpup; 9712 struct lpfc_sli4_hdw_queue *qp; 9713 LPFC_MBOXQ_t *mboxq; 9714 int qidx, cpu; 9715 uint32_t length, usdelay; 9716 int rc = -ENOMEM; 9717 9718 /* Check for dual-ULP support */ 9719 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9720 if (!mboxq) { 9721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9722 "3249 Unable to allocate memory for " 9723 "QUERY_FW_CFG mailbox command\n"); 9724 return -ENOMEM; 9725 } 9726 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9727 sizeof(struct lpfc_sli4_cfg_mhdr)); 9728 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9729 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9730 length, LPFC_SLI4_MBX_EMBED); 9731 9732 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9733 9734 shdr = (union lpfc_sli4_cfg_shdr *) 9735 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9736 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9737 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9738 if (shdr_status || shdr_add_status || rc) { 9739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9740 "3250 QUERY_FW_CFG mailbox failed with status " 9741 "x%x add_status x%x, mbx status x%x\n", 9742 shdr_status, shdr_add_status, rc); 9743 mempool_free(mboxq, phba->mbox_mem_pool); 9744 rc = -ENXIO; 9745 goto out_error; 9746 } 9747 9748 phba->sli4_hba.fw_func_mode = 9749 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9750 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9751 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9752 phba->sli4_hba.physical_port = 9753 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9754 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9755 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9756 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9757 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9758 9759 mempool_free(mboxq, phba->mbox_mem_pool); 9760 9761 /* 9762 * Set up HBA Event Queues (EQs) 9763 */ 9764 qp = phba->sli4_hba.hdwq; 9765 9766 /* Set up HBA event queue */ 9767 if (!qp) { 9768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9769 "3147 Fast-path EQs not allocated\n"); 9770 rc = -ENOMEM; 9771 goto out_error; 9772 } 9773 9774 /* Loop thru all IRQ vectors */ 9775 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9776 /* Create HBA Event Queues (EQs) in order */ 9777 for_each_present_cpu(cpu) { 9778 cpup = &phba->sli4_hba.cpu_map[cpu]; 9779 9780 /* Look for the CPU thats using that vector with 9781 * LPFC_CPU_FIRST_IRQ set. 9782 */ 9783 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9784 continue; 9785 if (qidx != cpup->eq) 9786 continue; 9787 9788 /* Create an EQ for that vector */ 9789 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9790 phba->cfg_fcp_imax); 9791 if (rc) { 9792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9793 "0523 Failed setup of fast-path" 9794 " EQ (%d), rc = 0x%x\n", 9795 cpup->eq, (uint32_t)rc); 9796 goto out_destroy; 9797 } 9798 9799 /* Save the EQ for that vector in the hba_eq_hdl */ 9800 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9801 qp[cpup->hdwq].hba_eq; 9802 9803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9804 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9805 cpup->eq, 9806 qp[cpup->hdwq].hba_eq->queue_id); 9807 } 9808 } 9809 9810 /* Loop thru all Hardware Queues */ 9811 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9812 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9813 cpup = &phba->sli4_hba.cpu_map[cpu]; 9814 9815 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9816 rc = lpfc_create_wq_cq(phba, 9817 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9818 qp[qidx].io_cq, 9819 qp[qidx].io_wq, 9820 &phba->sli4_hba.hdwq[qidx].io_cq_map, 9821 qidx, 9822 LPFC_IO); 9823 if (rc) { 9824 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9825 "0535 Failed to setup fastpath " 9826 "IO WQ/CQ (%d), rc = 0x%x\n", 9827 qidx, (uint32_t)rc); 9828 goto out_destroy; 9829 } 9830 } 9831 9832 /* 9833 * Set up Slow Path Complete Queues (CQs) 9834 */ 9835 9836 /* Set up slow-path MBOX CQ/MQ */ 9837 9838 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9840 "0528 %s not allocated\n", 9841 phba->sli4_hba.mbx_cq ? 9842 "Mailbox WQ" : "Mailbox CQ"); 9843 rc = -ENOMEM; 9844 goto out_destroy; 9845 } 9846 9847 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9848 phba->sli4_hba.mbx_cq, 9849 phba->sli4_hba.mbx_wq, 9850 NULL, 0, LPFC_MBOX); 9851 if (rc) { 9852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9853 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9854 (uint32_t)rc); 9855 goto out_destroy; 9856 } 9857 if (phba->nvmet_support) { 9858 if (!phba->sli4_hba.nvmet_cqset) { 9859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9860 "3165 Fast-path NVME CQ Set " 9861 "array not allocated\n"); 9862 rc = -ENOMEM; 9863 goto out_destroy; 9864 } 9865 if (phba->cfg_nvmet_mrq > 1) { 9866 rc = lpfc_cq_create_set(phba, 9867 phba->sli4_hba.nvmet_cqset, 9868 qp, 9869 LPFC_WCQ, LPFC_NVMET); 9870 if (rc) { 9871 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9872 "3164 Failed setup of NVME CQ " 9873 "Set, rc = 0x%x\n", 9874 (uint32_t)rc); 9875 goto out_destroy; 9876 } 9877 } else { 9878 /* Set up NVMET Receive Complete Queue */ 9879 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9880 qp[0].hba_eq, 9881 LPFC_WCQ, LPFC_NVMET); 9882 if (rc) { 9883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9884 "6089 Failed setup NVMET CQ: " 9885 "rc = 0x%x\n", (uint32_t)rc); 9886 goto out_destroy; 9887 } 9888 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9889 9890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9891 "6090 NVMET CQ setup: cq-id=%d, " 9892 "parent eq-id=%d\n", 9893 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9894 qp[0].hba_eq->queue_id); 9895 } 9896 } 9897 9898 /* Set up slow-path ELS WQ/CQ */ 9899 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9901 "0530 ELS %s not allocated\n", 9902 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9903 rc = -ENOMEM; 9904 goto out_destroy; 9905 } 9906 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9907 phba->sli4_hba.els_cq, 9908 phba->sli4_hba.els_wq, 9909 NULL, 0, LPFC_ELS); 9910 if (rc) { 9911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9912 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9913 (uint32_t)rc); 9914 goto out_destroy; 9915 } 9916 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9917 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9918 phba->sli4_hba.els_wq->queue_id, 9919 phba->sli4_hba.els_cq->queue_id); 9920 9921 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9922 /* Set up NVME LS Complete Queue */ 9923 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9925 "6091 LS %s not allocated\n", 9926 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9927 rc = -ENOMEM; 9928 goto out_destroy; 9929 } 9930 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9931 phba->sli4_hba.nvmels_cq, 9932 phba->sli4_hba.nvmels_wq, 9933 NULL, 0, LPFC_NVME_LS); 9934 if (rc) { 9935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9936 "0526 Failed setup of NVVME LS WQ/CQ: " 9937 "rc = 0x%x\n", (uint32_t)rc); 9938 goto out_destroy; 9939 } 9940 9941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9942 "6096 ELS WQ setup: wq-id=%d, " 9943 "parent cq-id=%d\n", 9944 phba->sli4_hba.nvmels_wq->queue_id, 9945 phba->sli4_hba.nvmels_cq->queue_id); 9946 } 9947 9948 /* 9949 * Create NVMET Receive Queue (RQ) 9950 */ 9951 if (phba->nvmet_support) { 9952 if ((!phba->sli4_hba.nvmet_cqset) || 9953 (!phba->sli4_hba.nvmet_mrq_hdr) || 9954 (!phba->sli4_hba.nvmet_mrq_data)) { 9955 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9956 "6130 MRQ CQ Queues not " 9957 "allocated\n"); 9958 rc = -ENOMEM; 9959 goto out_destroy; 9960 } 9961 if (phba->cfg_nvmet_mrq > 1) { 9962 rc = lpfc_mrq_create(phba, 9963 phba->sli4_hba.nvmet_mrq_hdr, 9964 phba->sli4_hba.nvmet_mrq_data, 9965 phba->sli4_hba.nvmet_cqset, 9966 LPFC_NVMET); 9967 if (rc) { 9968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9969 "6098 Failed setup of NVMET " 9970 "MRQ: rc = 0x%x\n", 9971 (uint32_t)rc); 9972 goto out_destroy; 9973 } 9974 9975 } else { 9976 rc = lpfc_rq_create(phba, 9977 phba->sli4_hba.nvmet_mrq_hdr[0], 9978 phba->sli4_hba.nvmet_mrq_data[0], 9979 phba->sli4_hba.nvmet_cqset[0], 9980 LPFC_NVMET); 9981 if (rc) { 9982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9983 "6057 Failed setup of NVMET " 9984 "Receive Queue: rc = 0x%x\n", 9985 (uint32_t)rc); 9986 goto out_destroy; 9987 } 9988 9989 lpfc_printf_log( 9990 phba, KERN_INFO, LOG_INIT, 9991 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9992 "dat-rq-id=%d parent cq-id=%d\n", 9993 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9994 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9995 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9996 9997 } 9998 } 9999 10000 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 10001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10002 "0540 Receive Queue not allocated\n"); 10003 rc = -ENOMEM; 10004 goto out_destroy; 10005 } 10006 10007 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 10008 phba->sli4_hba.els_cq, LPFC_USOL); 10009 if (rc) { 10010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10011 "0541 Failed setup of Receive Queue: " 10012 "rc = 0x%x\n", (uint32_t)rc); 10013 goto out_destroy; 10014 } 10015 10016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10017 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 10018 "parent cq-id=%d\n", 10019 phba->sli4_hba.hdr_rq->queue_id, 10020 phba->sli4_hba.dat_rq->queue_id, 10021 phba->sli4_hba.els_cq->queue_id); 10022 10023 if (phba->cfg_fcp_imax) 10024 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 10025 else 10026 usdelay = 0; 10027 10028 for (qidx = 0; qidx < phba->cfg_irq_chann; 10029 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 10030 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 10031 usdelay); 10032 10033 if (phba->sli4_hba.cq_max) { 10034 kfree(phba->sli4_hba.cq_lookup); 10035 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 10036 sizeof(struct lpfc_queue *), GFP_KERNEL); 10037 if (!phba->sli4_hba.cq_lookup) { 10038 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10039 "0549 Failed setup of CQ Lookup table: " 10040 "size 0x%x\n", phba->sli4_hba.cq_max); 10041 rc = -ENOMEM; 10042 goto out_destroy; 10043 } 10044 lpfc_setup_cq_lookup(phba); 10045 } 10046 return 0; 10047 10048 out_destroy: 10049 lpfc_sli4_queue_unset(phba); 10050 out_error: 10051 return rc; 10052 } 10053 10054 /** 10055 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 10056 * @phba: pointer to lpfc hba data structure. 10057 * 10058 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 10059 * operation. 10060 * 10061 * Return codes 10062 * 0 - successful 10063 * -ENOMEM - No available memory 10064 * -EIO - The mailbox failed to complete successfully. 10065 **/ 10066 void 10067 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 10068 { 10069 struct lpfc_sli4_hdw_queue *qp; 10070 struct lpfc_queue *eq; 10071 int qidx; 10072 10073 /* Unset mailbox command work queue */ 10074 if (phba->sli4_hba.mbx_wq) 10075 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 10076 10077 /* Unset NVME LS work queue */ 10078 if (phba->sli4_hba.nvmels_wq) 10079 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 10080 10081 /* Unset ELS work queue */ 10082 if (phba->sli4_hba.els_wq) 10083 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 10084 10085 /* Unset unsolicited receive queue */ 10086 if (phba->sli4_hba.hdr_rq) 10087 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 10088 phba->sli4_hba.dat_rq); 10089 10090 /* Unset mailbox command complete queue */ 10091 if (phba->sli4_hba.mbx_cq) 10092 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 10093 10094 /* Unset ELS complete queue */ 10095 if (phba->sli4_hba.els_cq) 10096 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 10097 10098 /* Unset NVME LS complete queue */ 10099 if (phba->sli4_hba.nvmels_cq) 10100 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 10101 10102 if (phba->nvmet_support) { 10103 /* Unset NVMET MRQ queue */ 10104 if (phba->sli4_hba.nvmet_mrq_hdr) { 10105 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 10106 lpfc_rq_destroy( 10107 phba, 10108 phba->sli4_hba.nvmet_mrq_hdr[qidx], 10109 phba->sli4_hba.nvmet_mrq_data[qidx]); 10110 } 10111 10112 /* Unset NVMET CQ Set complete queue */ 10113 if (phba->sli4_hba.nvmet_cqset) { 10114 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 10115 lpfc_cq_destroy( 10116 phba, phba->sli4_hba.nvmet_cqset[qidx]); 10117 } 10118 } 10119 10120 /* Unset fast-path SLI4 queues */ 10121 if (phba->sli4_hba.hdwq) { 10122 /* Loop thru all Hardware Queues */ 10123 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 10124 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 10125 qp = &phba->sli4_hba.hdwq[qidx]; 10126 lpfc_wq_destroy(phba, qp->io_wq); 10127 lpfc_cq_destroy(phba, qp->io_cq); 10128 } 10129 /* Loop thru all IRQ vectors */ 10130 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10131 /* Destroy the EQ corresponding to the IRQ vector */ 10132 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10133 lpfc_eq_destroy(phba, eq); 10134 } 10135 } 10136 10137 kfree(phba->sli4_hba.cq_lookup); 10138 phba->sli4_hba.cq_lookup = NULL; 10139 phba->sli4_hba.cq_max = 0; 10140 } 10141 10142 /** 10143 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 10144 * @phba: pointer to lpfc hba data structure. 10145 * 10146 * This routine is invoked to allocate and set up a pool of completion queue 10147 * events. The body of the completion queue event is a completion queue entry 10148 * CQE. For now, this pool is used for the interrupt service routine to queue 10149 * the following HBA completion queue events for the worker thread to process: 10150 * - Mailbox asynchronous events 10151 * - Receive queue completion unsolicited events 10152 * Later, this can be used for all the slow-path events. 10153 * 10154 * Return codes 10155 * 0 - successful 10156 * -ENOMEM - No available memory 10157 **/ 10158 static int 10159 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 10160 { 10161 struct lpfc_cq_event *cq_event; 10162 int i; 10163 10164 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 10165 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 10166 if (!cq_event) 10167 goto out_pool_create_fail; 10168 list_add_tail(&cq_event->list, 10169 &phba->sli4_hba.sp_cqe_event_pool); 10170 } 10171 return 0; 10172 10173 out_pool_create_fail: 10174 lpfc_sli4_cq_event_pool_destroy(phba); 10175 return -ENOMEM; 10176 } 10177 10178 /** 10179 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 10180 * @phba: pointer to lpfc hba data structure. 10181 * 10182 * This routine is invoked to free the pool of completion queue events at 10183 * driver unload time. Note that, it is the responsibility of the driver 10184 * cleanup routine to free all the outstanding completion-queue events 10185 * allocated from this pool back into the pool before invoking this routine 10186 * to destroy the pool. 10187 **/ 10188 static void 10189 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 10190 { 10191 struct lpfc_cq_event *cq_event, *next_cq_event; 10192 10193 list_for_each_entry_safe(cq_event, next_cq_event, 10194 &phba->sli4_hba.sp_cqe_event_pool, list) { 10195 list_del(&cq_event->list); 10196 kfree(cq_event); 10197 } 10198 } 10199 10200 /** 10201 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 10202 * @phba: pointer to lpfc hba data structure. 10203 * 10204 * This routine is the lock free version of the API invoked to allocate a 10205 * completion-queue event from the free pool. 10206 * 10207 * Return: Pointer to the newly allocated completion-queue event if successful 10208 * NULL otherwise. 10209 **/ 10210 struct lpfc_cq_event * 10211 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 10212 { 10213 struct lpfc_cq_event *cq_event = NULL; 10214 10215 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 10216 struct lpfc_cq_event, list); 10217 return cq_event; 10218 } 10219 10220 /** 10221 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 10222 * @phba: pointer to lpfc hba data structure. 10223 * 10224 * This routine is the lock version of the API invoked to allocate a 10225 * completion-queue event from the free pool. 10226 * 10227 * Return: Pointer to the newly allocated completion-queue event if successful 10228 * NULL otherwise. 10229 **/ 10230 struct lpfc_cq_event * 10231 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 10232 { 10233 struct lpfc_cq_event *cq_event; 10234 unsigned long iflags; 10235 10236 spin_lock_irqsave(&phba->hbalock, iflags); 10237 cq_event = __lpfc_sli4_cq_event_alloc(phba); 10238 spin_unlock_irqrestore(&phba->hbalock, iflags); 10239 return cq_event; 10240 } 10241 10242 /** 10243 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10244 * @phba: pointer to lpfc hba data structure. 10245 * @cq_event: pointer to the completion queue event to be freed. 10246 * 10247 * This routine is the lock free version of the API invoked to release a 10248 * completion-queue event back into the free pool. 10249 **/ 10250 void 10251 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10252 struct lpfc_cq_event *cq_event) 10253 { 10254 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 10255 } 10256 10257 /** 10258 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10259 * @phba: pointer to lpfc hba data structure. 10260 * @cq_event: pointer to the completion queue event to be freed. 10261 * 10262 * This routine is the lock version of the API invoked to release a 10263 * completion-queue event back into the free pool. 10264 **/ 10265 void 10266 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10267 struct lpfc_cq_event *cq_event) 10268 { 10269 unsigned long iflags; 10270 spin_lock_irqsave(&phba->hbalock, iflags); 10271 __lpfc_sli4_cq_event_release(phba, cq_event); 10272 spin_unlock_irqrestore(&phba->hbalock, iflags); 10273 } 10274 10275 /** 10276 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 10277 * @phba: pointer to lpfc hba data structure. 10278 * 10279 * This routine is to free all the pending completion-queue events to the 10280 * back into the free pool for device reset. 10281 **/ 10282 static void 10283 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 10284 { 10285 LIST_HEAD(cq_event_list); 10286 struct lpfc_cq_event *cq_event; 10287 unsigned long iflags; 10288 10289 /* Retrieve all the pending WCQEs from pending WCQE lists */ 10290 10291 /* Pending ELS XRI abort events */ 10292 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 10293 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10294 &cq_event_list); 10295 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 10296 10297 /* Pending asynnc events */ 10298 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 10299 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 10300 &cq_event_list); 10301 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 10302 10303 while (!list_empty(&cq_event_list)) { 10304 list_remove_head(&cq_event_list, cq_event, 10305 struct lpfc_cq_event, list); 10306 lpfc_sli4_cq_event_release(phba, cq_event); 10307 } 10308 } 10309 10310 /** 10311 * lpfc_pci_function_reset - Reset pci function. 10312 * @phba: pointer to lpfc hba data structure. 10313 * 10314 * This routine is invoked to request a PCI function reset. It will destroys 10315 * all resources assigned to the PCI function which originates this request. 10316 * 10317 * Return codes 10318 * 0 - successful 10319 * -ENOMEM - No available memory 10320 * -EIO - The mailbox failed to complete successfully. 10321 **/ 10322 int 10323 lpfc_pci_function_reset(struct lpfc_hba *phba) 10324 { 10325 LPFC_MBOXQ_t *mboxq; 10326 uint32_t rc = 0, if_type; 10327 uint32_t shdr_status, shdr_add_status; 10328 uint32_t rdy_chk; 10329 uint32_t port_reset = 0; 10330 union lpfc_sli4_cfg_shdr *shdr; 10331 struct lpfc_register reg_data; 10332 uint16_t devid; 10333 10334 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10335 switch (if_type) { 10336 case LPFC_SLI_INTF_IF_TYPE_0: 10337 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10338 GFP_KERNEL); 10339 if (!mboxq) { 10340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10341 "0494 Unable to allocate memory for " 10342 "issuing SLI_FUNCTION_RESET mailbox " 10343 "command\n"); 10344 return -ENOMEM; 10345 } 10346 10347 /* Setup PCI function reset mailbox-ioctl command */ 10348 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10349 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 10350 LPFC_SLI4_MBX_EMBED); 10351 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10352 shdr = (union lpfc_sli4_cfg_shdr *) 10353 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10354 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10355 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 10356 &shdr->response); 10357 mempool_free(mboxq, phba->mbox_mem_pool); 10358 if (shdr_status || shdr_add_status || rc) { 10359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10360 "0495 SLI_FUNCTION_RESET mailbox " 10361 "failed with status x%x add_status x%x," 10362 " mbx status x%x\n", 10363 shdr_status, shdr_add_status, rc); 10364 rc = -ENXIO; 10365 } 10366 break; 10367 case LPFC_SLI_INTF_IF_TYPE_2: 10368 case LPFC_SLI_INTF_IF_TYPE_6: 10369 wait: 10370 /* 10371 * Poll the Port Status Register and wait for RDY for 10372 * up to 30 seconds. If the port doesn't respond, treat 10373 * it as an error. 10374 */ 10375 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10376 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10377 STATUSregaddr, ®_data.word0)) { 10378 rc = -ENODEV; 10379 goto out; 10380 } 10381 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10382 break; 10383 msleep(20); 10384 } 10385 10386 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10387 phba->work_status[0] = readl( 10388 phba->sli4_hba.u.if_type2.ERR1regaddr); 10389 phba->work_status[1] = readl( 10390 phba->sli4_hba.u.if_type2.ERR2regaddr); 10391 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10392 "2890 Port not ready, port status reg " 10393 "0x%x error 1=0x%x, error 2=0x%x\n", 10394 reg_data.word0, 10395 phba->work_status[0], 10396 phba->work_status[1]); 10397 rc = -ENODEV; 10398 goto out; 10399 } 10400 10401 if (!port_reset) { 10402 /* 10403 * Reset the port now 10404 */ 10405 reg_data.word0 = 0; 10406 bf_set(lpfc_sliport_ctrl_end, ®_data, 10407 LPFC_SLIPORT_LITTLE_ENDIAN); 10408 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10409 LPFC_SLIPORT_INIT_PORT); 10410 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10411 CTRLregaddr); 10412 /* flush */ 10413 pci_read_config_word(phba->pcidev, 10414 PCI_DEVICE_ID, &devid); 10415 10416 port_reset = 1; 10417 msleep(20); 10418 goto wait; 10419 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10420 rc = -ENODEV; 10421 goto out; 10422 } 10423 break; 10424 10425 case LPFC_SLI_INTF_IF_TYPE_1: 10426 default: 10427 break; 10428 } 10429 10430 out: 10431 /* Catch the not-ready port failure after a port reset. */ 10432 if (rc) { 10433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10434 "3317 HBA not functional: IP Reset Failed " 10435 "try: echo fw_reset > board_mode\n"); 10436 rc = -ENODEV; 10437 } 10438 10439 return rc; 10440 } 10441 10442 /** 10443 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10444 * @phba: pointer to lpfc hba data structure. 10445 * 10446 * This routine is invoked to set up the PCI device memory space for device 10447 * with SLI-4 interface spec. 10448 * 10449 * Return codes 10450 * 0 - successful 10451 * other values - error 10452 **/ 10453 static int 10454 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10455 { 10456 struct pci_dev *pdev = phba->pcidev; 10457 unsigned long bar0map_len, bar1map_len, bar2map_len; 10458 int error; 10459 uint32_t if_type; 10460 10461 if (!pdev) 10462 return -ENODEV; 10463 10464 /* Set the device DMA mask size */ 10465 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10466 if (error) 10467 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10468 if (error) 10469 return error; 10470 10471 /* 10472 * The BARs and register set definitions and offset locations are 10473 * dependent on the if_type. 10474 */ 10475 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10476 &phba->sli4_hba.sli_intf.word0)) { 10477 return -ENODEV; 10478 } 10479 10480 /* There is no SLI3 failback for SLI4 devices. */ 10481 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10482 LPFC_SLI_INTF_VALID) { 10483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10484 "2894 SLI_INTF reg contents invalid " 10485 "sli_intf reg 0x%x\n", 10486 phba->sli4_hba.sli_intf.word0); 10487 return -ENODEV; 10488 } 10489 10490 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10491 /* 10492 * Get the bus address of SLI4 device Bar regions and the 10493 * number of bytes required by each mapping. The mapping of the 10494 * particular PCI BARs regions is dependent on the type of 10495 * SLI4 device. 10496 */ 10497 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10498 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10499 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10500 10501 /* 10502 * Map SLI4 PCI Config Space Register base to a kernel virtual 10503 * addr 10504 */ 10505 phba->sli4_hba.conf_regs_memmap_p = 10506 ioremap(phba->pci_bar0_map, bar0map_len); 10507 if (!phba->sli4_hba.conf_regs_memmap_p) { 10508 dev_printk(KERN_ERR, &pdev->dev, 10509 "ioremap failed for SLI4 PCI config " 10510 "registers.\n"); 10511 return -ENODEV; 10512 } 10513 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10514 /* Set up BAR0 PCI config space register memory map */ 10515 lpfc_sli4_bar0_register_memmap(phba, if_type); 10516 } else { 10517 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10518 bar0map_len = pci_resource_len(pdev, 1); 10519 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10520 dev_printk(KERN_ERR, &pdev->dev, 10521 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10522 return -ENODEV; 10523 } 10524 phba->sli4_hba.conf_regs_memmap_p = 10525 ioremap(phba->pci_bar0_map, bar0map_len); 10526 if (!phba->sli4_hba.conf_regs_memmap_p) { 10527 dev_printk(KERN_ERR, &pdev->dev, 10528 "ioremap failed for SLI4 PCI config " 10529 "registers.\n"); 10530 return -ENODEV; 10531 } 10532 lpfc_sli4_bar0_register_memmap(phba, if_type); 10533 } 10534 10535 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10536 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10537 /* 10538 * Map SLI4 if type 0 HBA Control Register base to a 10539 * kernel virtual address and setup the registers. 10540 */ 10541 phba->pci_bar1_map = pci_resource_start(pdev, 10542 PCI_64BIT_BAR2); 10543 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10544 phba->sli4_hba.ctrl_regs_memmap_p = 10545 ioremap(phba->pci_bar1_map, 10546 bar1map_len); 10547 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10548 dev_err(&pdev->dev, 10549 "ioremap failed for SLI4 HBA " 10550 "control registers.\n"); 10551 error = -ENOMEM; 10552 goto out_iounmap_conf; 10553 } 10554 phba->pci_bar2_memmap_p = 10555 phba->sli4_hba.ctrl_regs_memmap_p; 10556 lpfc_sli4_bar1_register_memmap(phba, if_type); 10557 } else { 10558 error = -ENOMEM; 10559 goto out_iounmap_conf; 10560 } 10561 } 10562 10563 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10564 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10565 /* 10566 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10567 * virtual address and setup the registers. 10568 */ 10569 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10570 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10571 phba->sli4_hba.drbl_regs_memmap_p = 10572 ioremap(phba->pci_bar1_map, bar1map_len); 10573 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10574 dev_err(&pdev->dev, 10575 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10576 error = -ENOMEM; 10577 goto out_iounmap_conf; 10578 } 10579 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10580 lpfc_sli4_bar1_register_memmap(phba, if_type); 10581 } 10582 10583 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10584 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10585 /* 10586 * Map SLI4 if type 0 HBA Doorbell Register base to 10587 * a kernel virtual address and setup the registers. 10588 */ 10589 phba->pci_bar2_map = pci_resource_start(pdev, 10590 PCI_64BIT_BAR4); 10591 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10592 phba->sli4_hba.drbl_regs_memmap_p = 10593 ioremap(phba->pci_bar2_map, 10594 bar2map_len); 10595 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10596 dev_err(&pdev->dev, 10597 "ioremap failed for SLI4 HBA" 10598 " doorbell registers.\n"); 10599 error = -ENOMEM; 10600 goto out_iounmap_ctrl; 10601 } 10602 phba->pci_bar4_memmap_p = 10603 phba->sli4_hba.drbl_regs_memmap_p; 10604 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10605 if (error) 10606 goto out_iounmap_all; 10607 } else { 10608 error = -ENOMEM; 10609 goto out_iounmap_all; 10610 } 10611 } 10612 10613 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10614 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10615 /* 10616 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10617 * virtual address and setup the registers. 10618 */ 10619 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10620 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10621 phba->sli4_hba.dpp_regs_memmap_p = 10622 ioremap(phba->pci_bar2_map, bar2map_len); 10623 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10624 dev_err(&pdev->dev, 10625 "ioremap failed for SLI4 HBA dpp registers.\n"); 10626 error = -ENOMEM; 10627 goto out_iounmap_ctrl; 10628 } 10629 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10630 } 10631 10632 /* Set up the EQ/CQ register handeling functions now */ 10633 switch (if_type) { 10634 case LPFC_SLI_INTF_IF_TYPE_0: 10635 case LPFC_SLI_INTF_IF_TYPE_2: 10636 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10637 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10638 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10639 break; 10640 case LPFC_SLI_INTF_IF_TYPE_6: 10641 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10642 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10643 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10644 break; 10645 default: 10646 break; 10647 } 10648 10649 return 0; 10650 10651 out_iounmap_all: 10652 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10653 out_iounmap_ctrl: 10654 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10655 out_iounmap_conf: 10656 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10657 10658 return error; 10659 } 10660 10661 /** 10662 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10663 * @phba: pointer to lpfc hba data structure. 10664 * 10665 * This routine is invoked to unset the PCI device memory space for device 10666 * with SLI-4 interface spec. 10667 **/ 10668 static void 10669 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10670 { 10671 uint32_t if_type; 10672 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10673 10674 switch (if_type) { 10675 case LPFC_SLI_INTF_IF_TYPE_0: 10676 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10677 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10678 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10679 break; 10680 case LPFC_SLI_INTF_IF_TYPE_2: 10681 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10682 break; 10683 case LPFC_SLI_INTF_IF_TYPE_6: 10684 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10685 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10686 if (phba->sli4_hba.dpp_regs_memmap_p) 10687 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 10688 break; 10689 case LPFC_SLI_INTF_IF_TYPE_1: 10690 default: 10691 dev_printk(KERN_ERR, &phba->pcidev->dev, 10692 "FATAL - unsupported SLI4 interface type - %d\n", 10693 if_type); 10694 break; 10695 } 10696 } 10697 10698 /** 10699 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10700 * @phba: pointer to lpfc hba data structure. 10701 * 10702 * This routine is invoked to enable the MSI-X interrupt vectors to device 10703 * with SLI-3 interface specs. 10704 * 10705 * Return codes 10706 * 0 - successful 10707 * other values - error 10708 **/ 10709 static int 10710 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10711 { 10712 int rc; 10713 LPFC_MBOXQ_t *pmb; 10714 10715 /* Set up MSI-X multi-message vectors */ 10716 rc = pci_alloc_irq_vectors(phba->pcidev, 10717 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10718 if (rc < 0) { 10719 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10720 "0420 PCI enable MSI-X failed (%d)\n", rc); 10721 goto vec_fail_out; 10722 } 10723 10724 /* 10725 * Assign MSI-X vectors to interrupt handlers 10726 */ 10727 10728 /* vector-0 is associated to slow-path handler */ 10729 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10730 &lpfc_sli_sp_intr_handler, 0, 10731 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10732 if (rc) { 10733 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10734 "0421 MSI-X slow-path request_irq failed " 10735 "(%d)\n", rc); 10736 goto msi_fail_out; 10737 } 10738 10739 /* vector-1 is associated to fast-path handler */ 10740 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10741 &lpfc_sli_fp_intr_handler, 0, 10742 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10743 10744 if (rc) { 10745 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10746 "0429 MSI-X fast-path request_irq failed " 10747 "(%d)\n", rc); 10748 goto irq_fail_out; 10749 } 10750 10751 /* 10752 * Configure HBA MSI-X attention conditions to messages 10753 */ 10754 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10755 10756 if (!pmb) { 10757 rc = -ENOMEM; 10758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10759 "0474 Unable to allocate memory for issuing " 10760 "MBOX_CONFIG_MSI command\n"); 10761 goto mem_fail_out; 10762 } 10763 rc = lpfc_config_msi(phba, pmb); 10764 if (rc) 10765 goto mbx_fail_out; 10766 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10767 if (rc != MBX_SUCCESS) { 10768 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10769 "0351 Config MSI mailbox command failed, " 10770 "mbxCmd x%x, mbxStatus x%x\n", 10771 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10772 goto mbx_fail_out; 10773 } 10774 10775 /* Free memory allocated for mailbox command */ 10776 mempool_free(pmb, phba->mbox_mem_pool); 10777 return rc; 10778 10779 mbx_fail_out: 10780 /* Free memory allocated for mailbox command */ 10781 mempool_free(pmb, phba->mbox_mem_pool); 10782 10783 mem_fail_out: 10784 /* free the irq already requested */ 10785 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10786 10787 irq_fail_out: 10788 /* free the irq already requested */ 10789 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10790 10791 msi_fail_out: 10792 /* Unconfigure MSI-X capability structure */ 10793 pci_free_irq_vectors(phba->pcidev); 10794 10795 vec_fail_out: 10796 return rc; 10797 } 10798 10799 /** 10800 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10801 * @phba: pointer to lpfc hba data structure. 10802 * 10803 * This routine is invoked to enable the MSI interrupt mode to device with 10804 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10805 * enable the MSI vector. The device driver is responsible for calling the 10806 * request_irq() to register MSI vector with a interrupt the handler, which 10807 * is done in this function. 10808 * 10809 * Return codes 10810 * 0 - successful 10811 * other values - error 10812 */ 10813 static int 10814 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10815 { 10816 int rc; 10817 10818 rc = pci_enable_msi(phba->pcidev); 10819 if (!rc) 10820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10821 "0462 PCI enable MSI mode success.\n"); 10822 else { 10823 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10824 "0471 PCI enable MSI mode failed (%d)\n", rc); 10825 return rc; 10826 } 10827 10828 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10829 0, LPFC_DRIVER_NAME, phba); 10830 if (rc) { 10831 pci_disable_msi(phba->pcidev); 10832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10833 "0478 MSI request_irq failed (%d)\n", rc); 10834 } 10835 return rc; 10836 } 10837 10838 /** 10839 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10840 * @phba: pointer to lpfc hba data structure. 10841 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 10842 * 10843 * This routine is invoked to enable device interrupt and associate driver's 10844 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10845 * spec. Depends on the interrupt mode configured to the driver, the driver 10846 * will try to fallback from the configured interrupt mode to an interrupt 10847 * mode which is supported by the platform, kernel, and device in the order 10848 * of: 10849 * MSI-X -> MSI -> IRQ. 10850 * 10851 * Return codes 10852 * 0 - successful 10853 * other values - error 10854 **/ 10855 static uint32_t 10856 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10857 { 10858 uint32_t intr_mode = LPFC_INTR_ERROR; 10859 int retval; 10860 10861 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10862 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10863 if (retval) 10864 return intr_mode; 10865 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 10866 10867 if (cfg_mode == 2) { 10868 /* Now, try to enable MSI-X interrupt mode */ 10869 retval = lpfc_sli_enable_msix(phba); 10870 if (!retval) { 10871 /* Indicate initialization to MSI-X mode */ 10872 phba->intr_type = MSIX; 10873 intr_mode = 2; 10874 } 10875 } 10876 10877 /* Fallback to MSI if MSI-X initialization failed */ 10878 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10879 retval = lpfc_sli_enable_msi(phba); 10880 if (!retval) { 10881 /* Indicate initialization to MSI mode */ 10882 phba->intr_type = MSI; 10883 intr_mode = 1; 10884 } 10885 } 10886 10887 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10888 if (phba->intr_type == NONE) { 10889 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10890 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10891 if (!retval) { 10892 /* Indicate initialization to INTx mode */ 10893 phba->intr_type = INTx; 10894 intr_mode = 0; 10895 } 10896 } 10897 return intr_mode; 10898 } 10899 10900 /** 10901 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10902 * @phba: pointer to lpfc hba data structure. 10903 * 10904 * This routine is invoked to disable device interrupt and disassociate the 10905 * driver's interrupt handler(s) from interrupt vector(s) to device with 10906 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10907 * release the interrupt vector(s) for the message signaled interrupt. 10908 **/ 10909 static void 10910 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10911 { 10912 int nr_irqs, i; 10913 10914 if (phba->intr_type == MSIX) 10915 nr_irqs = LPFC_MSIX_VECTORS; 10916 else 10917 nr_irqs = 1; 10918 10919 for (i = 0; i < nr_irqs; i++) 10920 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10921 pci_free_irq_vectors(phba->pcidev); 10922 10923 /* Reset interrupt management states */ 10924 phba->intr_type = NONE; 10925 phba->sli.slistat.sli_intr = 0; 10926 } 10927 10928 /** 10929 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10930 * @phba: pointer to lpfc hba data structure. 10931 * @id: EQ vector index or Hardware Queue index 10932 * @match: LPFC_FIND_BY_EQ = match by EQ 10933 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10934 * Return the CPU that matches the selection criteria 10935 */ 10936 static uint16_t 10937 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10938 { 10939 struct lpfc_vector_map_info *cpup; 10940 int cpu; 10941 10942 /* Loop through all CPUs */ 10943 for_each_present_cpu(cpu) { 10944 cpup = &phba->sli4_hba.cpu_map[cpu]; 10945 10946 /* If we are matching by EQ, there may be multiple CPUs using 10947 * using the same vector, so select the one with 10948 * LPFC_CPU_FIRST_IRQ set. 10949 */ 10950 if ((match == LPFC_FIND_BY_EQ) && 10951 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10952 (cpup->eq == id)) 10953 return cpu; 10954 10955 /* If matching by HDWQ, select the first CPU that matches */ 10956 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10957 return cpu; 10958 } 10959 return 0; 10960 } 10961 10962 #ifdef CONFIG_X86 10963 /** 10964 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10965 * @phba: pointer to lpfc hba data structure. 10966 * @cpu: CPU map index 10967 * @phys_id: CPU package physical id 10968 * @core_id: CPU core id 10969 */ 10970 static int 10971 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10972 uint16_t phys_id, uint16_t core_id) 10973 { 10974 struct lpfc_vector_map_info *cpup; 10975 int idx; 10976 10977 for_each_present_cpu(idx) { 10978 cpup = &phba->sli4_hba.cpu_map[idx]; 10979 /* Does the cpup match the one we are looking for */ 10980 if ((cpup->phys_id == phys_id) && 10981 (cpup->core_id == core_id) && 10982 (cpu != idx)) 10983 return 1; 10984 } 10985 return 0; 10986 } 10987 #endif 10988 10989 /* 10990 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 10991 * @phba: pointer to lpfc hba data structure. 10992 * @eqidx: index for eq and irq vector 10993 * @flag: flags to set for vector_map structure 10994 * @cpu: cpu used to index vector_map structure 10995 * 10996 * The routine assigns eq info into vector_map structure 10997 */ 10998 static inline void 10999 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 11000 unsigned int cpu) 11001 { 11002 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 11003 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 11004 11005 cpup->eq = eqidx; 11006 cpup->flag |= flag; 11007 11008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11009 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 11010 cpu, eqhdl->irq, cpup->eq, cpup->flag); 11011 } 11012 11013 /** 11014 * lpfc_cpu_map_array_init - Initialize cpu_map structure 11015 * @phba: pointer to lpfc hba data structure. 11016 * 11017 * The routine initializes the cpu_map array structure 11018 */ 11019 static void 11020 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 11021 { 11022 struct lpfc_vector_map_info *cpup; 11023 struct lpfc_eq_intr_info *eqi; 11024 int cpu; 11025 11026 for_each_possible_cpu(cpu) { 11027 cpup = &phba->sli4_hba.cpu_map[cpu]; 11028 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 11029 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 11030 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 11031 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 11032 cpup->flag = 0; 11033 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 11034 INIT_LIST_HEAD(&eqi->list); 11035 eqi->icnt = 0; 11036 } 11037 } 11038 11039 /** 11040 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 11041 * @phba: pointer to lpfc hba data structure. 11042 * 11043 * The routine initializes the hba_eq_hdl array structure 11044 */ 11045 static void 11046 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 11047 { 11048 struct lpfc_hba_eq_hdl *eqhdl; 11049 int i; 11050 11051 for (i = 0; i < phba->cfg_irq_chann; i++) { 11052 eqhdl = lpfc_get_eq_hdl(i); 11053 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 11054 eqhdl->phba = phba; 11055 } 11056 } 11057 11058 /** 11059 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 11060 * @phba: pointer to lpfc hba data structure. 11061 * @vectors: number of msix vectors allocated. 11062 * 11063 * The routine will figure out the CPU affinity assignment for every 11064 * MSI-X vector allocated for the HBA. 11065 * In addition, the CPU to IO channel mapping will be calculated 11066 * and the phba->sli4_hba.cpu_map array will reflect this. 11067 */ 11068 static void 11069 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 11070 { 11071 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 11072 int max_phys_id, min_phys_id; 11073 int max_core_id, min_core_id; 11074 struct lpfc_vector_map_info *cpup; 11075 struct lpfc_vector_map_info *new_cpup; 11076 #ifdef CONFIG_X86 11077 struct cpuinfo_x86 *cpuinfo; 11078 #endif 11079 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11080 struct lpfc_hdwq_stat *c_stat; 11081 #endif 11082 11083 max_phys_id = 0; 11084 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 11085 max_core_id = 0; 11086 min_core_id = LPFC_VECTOR_MAP_EMPTY; 11087 11088 /* Update CPU map with physical id and core id of each CPU */ 11089 for_each_present_cpu(cpu) { 11090 cpup = &phba->sli4_hba.cpu_map[cpu]; 11091 #ifdef CONFIG_X86 11092 cpuinfo = &cpu_data(cpu); 11093 cpup->phys_id = cpuinfo->phys_proc_id; 11094 cpup->core_id = cpuinfo->cpu_core_id; 11095 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 11096 cpup->flag |= LPFC_CPU_MAP_HYPER; 11097 #else 11098 /* No distinction between CPUs for other platforms */ 11099 cpup->phys_id = 0; 11100 cpup->core_id = cpu; 11101 #endif 11102 11103 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11104 "3328 CPU %d physid %d coreid %d flag x%x\n", 11105 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 11106 11107 if (cpup->phys_id > max_phys_id) 11108 max_phys_id = cpup->phys_id; 11109 if (cpup->phys_id < min_phys_id) 11110 min_phys_id = cpup->phys_id; 11111 11112 if (cpup->core_id > max_core_id) 11113 max_core_id = cpup->core_id; 11114 if (cpup->core_id < min_core_id) 11115 min_core_id = cpup->core_id; 11116 } 11117 11118 /* After looking at each irq vector assigned to this pcidev, its 11119 * possible to see that not ALL CPUs have been accounted for. 11120 * Next we will set any unassigned (unaffinitized) cpu map 11121 * entries to a IRQ on the same phys_id. 11122 */ 11123 first_cpu = cpumask_first(cpu_present_mask); 11124 start_cpu = first_cpu; 11125 11126 for_each_present_cpu(cpu) { 11127 cpup = &phba->sli4_hba.cpu_map[cpu]; 11128 11129 /* Is this CPU entry unassigned */ 11130 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 11131 /* Mark CPU as IRQ not assigned by the kernel */ 11132 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 11133 11134 /* If so, find a new_cpup thats on the the SAME 11135 * phys_id as cpup. start_cpu will start where we 11136 * left off so all unassigned entries don't get assgined 11137 * the IRQ of the first entry. 11138 */ 11139 new_cpu = start_cpu; 11140 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11141 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11142 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 11143 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 11144 (new_cpup->phys_id == cpup->phys_id)) 11145 goto found_same; 11146 new_cpu = cpumask_next( 11147 new_cpu, cpu_present_mask); 11148 if (new_cpu == nr_cpumask_bits) 11149 new_cpu = first_cpu; 11150 } 11151 /* At this point, we leave the CPU as unassigned */ 11152 continue; 11153 found_same: 11154 /* We found a matching phys_id, so copy the IRQ info */ 11155 cpup->eq = new_cpup->eq; 11156 11157 /* Bump start_cpu to the next slot to minmize the 11158 * chance of having multiple unassigned CPU entries 11159 * selecting the same IRQ. 11160 */ 11161 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11162 if (start_cpu == nr_cpumask_bits) 11163 start_cpu = first_cpu; 11164 11165 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11166 "3337 Set Affinity: CPU %d " 11167 "eq %d from peer cpu %d same " 11168 "phys_id (%d)\n", 11169 cpu, cpup->eq, new_cpu, 11170 cpup->phys_id); 11171 } 11172 } 11173 11174 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 11175 start_cpu = first_cpu; 11176 11177 for_each_present_cpu(cpu) { 11178 cpup = &phba->sli4_hba.cpu_map[cpu]; 11179 11180 /* Is this entry unassigned */ 11181 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 11182 /* Mark it as IRQ not assigned by the kernel */ 11183 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 11184 11185 /* If so, find a new_cpup thats on ANY phys_id 11186 * as the cpup. start_cpu will start where we 11187 * left off so all unassigned entries don't get 11188 * assigned the IRQ of the first entry. 11189 */ 11190 new_cpu = start_cpu; 11191 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11192 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11193 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 11194 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 11195 goto found_any; 11196 new_cpu = cpumask_next( 11197 new_cpu, cpu_present_mask); 11198 if (new_cpu == nr_cpumask_bits) 11199 new_cpu = first_cpu; 11200 } 11201 /* We should never leave an entry unassigned */ 11202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11203 "3339 Set Affinity: CPU %d " 11204 "eq %d UNASSIGNED\n", 11205 cpup->hdwq, cpup->eq); 11206 continue; 11207 found_any: 11208 /* We found an available entry, copy the IRQ info */ 11209 cpup->eq = new_cpup->eq; 11210 11211 /* Bump start_cpu to the next slot to minmize the 11212 * chance of having multiple unassigned CPU entries 11213 * selecting the same IRQ. 11214 */ 11215 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11216 if (start_cpu == nr_cpumask_bits) 11217 start_cpu = first_cpu; 11218 11219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11220 "3338 Set Affinity: CPU %d " 11221 "eq %d from peer cpu %d (%d/%d)\n", 11222 cpu, cpup->eq, new_cpu, 11223 new_cpup->phys_id, new_cpup->core_id); 11224 } 11225 } 11226 11227 /* Assign hdwq indices that are unique across all cpus in the map 11228 * that are also FIRST_CPUs. 11229 */ 11230 idx = 0; 11231 for_each_present_cpu(cpu) { 11232 cpup = &phba->sli4_hba.cpu_map[cpu]; 11233 11234 /* Only FIRST IRQs get a hdwq index assignment. */ 11235 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11236 continue; 11237 11238 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 11239 cpup->hdwq = idx; 11240 idx++; 11241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11242 "3333 Set Affinity: CPU %d (phys %d core %d): " 11243 "hdwq %d eq %d flg x%x\n", 11244 cpu, cpup->phys_id, cpup->core_id, 11245 cpup->hdwq, cpup->eq, cpup->flag); 11246 } 11247 /* Associate a hdwq with each cpu_map entry 11248 * This will be 1 to 1 - hdwq to cpu, unless there are less 11249 * hardware queues then CPUs. For that case we will just round-robin 11250 * the available hardware queues as they get assigned to CPUs. 11251 * The next_idx is the idx from the FIRST_CPU loop above to account 11252 * for irq_chann < hdwq. The idx is used for round-robin assignments 11253 * and needs to start at 0. 11254 */ 11255 next_idx = idx; 11256 start_cpu = 0; 11257 idx = 0; 11258 for_each_present_cpu(cpu) { 11259 cpup = &phba->sli4_hba.cpu_map[cpu]; 11260 11261 /* FIRST cpus are already mapped. */ 11262 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 11263 continue; 11264 11265 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 11266 * of the unassigned cpus to the next idx so that all 11267 * hdw queues are fully utilized. 11268 */ 11269 if (next_idx < phba->cfg_hdw_queue) { 11270 cpup->hdwq = next_idx; 11271 next_idx++; 11272 continue; 11273 } 11274 11275 /* Not a First CPU and all hdw_queues are used. Reuse a 11276 * Hardware Queue for another CPU, so be smart about it 11277 * and pick one that has its IRQ/EQ mapped to the same phys_id 11278 * (CPU package) and core_id. 11279 */ 11280 new_cpu = start_cpu; 11281 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11282 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11283 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11284 new_cpup->phys_id == cpup->phys_id && 11285 new_cpup->core_id == cpup->core_id) { 11286 goto found_hdwq; 11287 } 11288 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11289 if (new_cpu == nr_cpumask_bits) 11290 new_cpu = first_cpu; 11291 } 11292 11293 /* If we can't match both phys_id and core_id, 11294 * settle for just a phys_id match. 11295 */ 11296 new_cpu = start_cpu; 11297 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11298 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11299 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11300 new_cpup->phys_id == cpup->phys_id) 11301 goto found_hdwq; 11302 11303 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11304 if (new_cpu == nr_cpumask_bits) 11305 new_cpu = first_cpu; 11306 } 11307 11308 /* Otherwise just round robin on cfg_hdw_queue */ 11309 cpup->hdwq = idx % phba->cfg_hdw_queue; 11310 idx++; 11311 goto logit; 11312 found_hdwq: 11313 /* We found an available entry, copy the IRQ info */ 11314 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11315 if (start_cpu == nr_cpumask_bits) 11316 start_cpu = first_cpu; 11317 cpup->hdwq = new_cpup->hdwq; 11318 logit: 11319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11320 "3335 Set Affinity: CPU %d (phys %d core %d): " 11321 "hdwq %d eq %d flg x%x\n", 11322 cpu, cpup->phys_id, cpup->core_id, 11323 cpup->hdwq, cpup->eq, cpup->flag); 11324 } 11325 11326 /* 11327 * Initialize the cpu_map slots for not-present cpus in case 11328 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 11329 */ 11330 idx = 0; 11331 for_each_possible_cpu(cpu) { 11332 cpup = &phba->sli4_hba.cpu_map[cpu]; 11333 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11334 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 11335 c_stat->hdwq_no = cpup->hdwq; 11336 #endif 11337 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 11338 continue; 11339 11340 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 11341 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11342 c_stat->hdwq_no = cpup->hdwq; 11343 #endif 11344 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11345 "3340 Set Affinity: not present " 11346 "CPU %d hdwq %d\n", 11347 cpu, cpup->hdwq); 11348 } 11349 11350 /* The cpu_map array will be used later during initialization 11351 * when EQ / CQ / WQs are allocated and configured. 11352 */ 11353 return; 11354 } 11355 11356 /** 11357 * lpfc_cpuhp_get_eq 11358 * 11359 * @phba: pointer to lpfc hba data structure. 11360 * @cpu: cpu going offline 11361 * @eqlist: eq list to append to 11362 */ 11363 static int 11364 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 11365 struct list_head *eqlist) 11366 { 11367 const struct cpumask *maskp; 11368 struct lpfc_queue *eq; 11369 struct cpumask *tmp; 11370 u16 idx; 11371 11372 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 11373 if (!tmp) 11374 return -ENOMEM; 11375 11376 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11377 maskp = pci_irq_get_affinity(phba->pcidev, idx); 11378 if (!maskp) 11379 continue; 11380 /* 11381 * if irq is not affinitized to the cpu going 11382 * then we don't need to poll the eq attached 11383 * to it. 11384 */ 11385 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 11386 continue; 11387 /* get the cpus that are online and are affini- 11388 * tized to this irq vector. If the count is 11389 * more than 1 then cpuhp is not going to shut- 11390 * down this vector. Since this cpu has not 11391 * gone offline yet, we need >1. 11392 */ 11393 cpumask_and(tmp, maskp, cpu_online_mask); 11394 if (cpumask_weight(tmp) > 1) 11395 continue; 11396 11397 /* Now that we have an irq to shutdown, get the eq 11398 * mapped to this irq. Note: multiple hdwq's in 11399 * the software can share an eq, but eventually 11400 * only eq will be mapped to this vector 11401 */ 11402 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 11403 list_add(&eq->_poll_list, eqlist); 11404 } 11405 kfree(tmp); 11406 return 0; 11407 } 11408 11409 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 11410 { 11411 if (phba->sli_rev != LPFC_SLI_REV4) 11412 return; 11413 11414 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 11415 &phba->cpuhp); 11416 /* 11417 * unregistering the instance doesn't stop the polling 11418 * timer. Wait for the poll timer to retire. 11419 */ 11420 synchronize_rcu(); 11421 del_timer_sync(&phba->cpuhp_poll_timer); 11422 } 11423 11424 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 11425 { 11426 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 11427 return; 11428 11429 __lpfc_cpuhp_remove(phba); 11430 } 11431 11432 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 11433 { 11434 if (phba->sli_rev != LPFC_SLI_REV4) 11435 return; 11436 11437 rcu_read_lock(); 11438 11439 if (!list_empty(&phba->poll_list)) 11440 mod_timer(&phba->cpuhp_poll_timer, 11441 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 11442 11443 rcu_read_unlock(); 11444 11445 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 11446 &phba->cpuhp); 11447 } 11448 11449 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 11450 { 11451 if (phba->pport->load_flag & FC_UNLOADING) { 11452 *retval = -EAGAIN; 11453 return true; 11454 } 11455 11456 if (phba->sli_rev != LPFC_SLI_REV4) { 11457 *retval = 0; 11458 return true; 11459 } 11460 11461 /* proceed with the hotplug */ 11462 return false; 11463 } 11464 11465 /** 11466 * lpfc_irq_set_aff - set IRQ affinity 11467 * @eqhdl: EQ handle 11468 * @cpu: cpu to set affinity 11469 * 11470 **/ 11471 static inline void 11472 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 11473 { 11474 cpumask_clear(&eqhdl->aff_mask); 11475 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 11476 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11477 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11478 } 11479 11480 /** 11481 * lpfc_irq_clear_aff - clear IRQ affinity 11482 * @eqhdl: EQ handle 11483 * 11484 **/ 11485 static inline void 11486 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 11487 { 11488 cpumask_clear(&eqhdl->aff_mask); 11489 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11490 } 11491 11492 /** 11493 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 11494 * @phba: pointer to HBA context object. 11495 * @cpu: cpu going offline/online 11496 * @offline: true, cpu is going offline. false, cpu is coming online. 11497 * 11498 * If cpu is going offline, we'll try our best effort to find the next 11499 * online cpu on the phba's original_mask and migrate all offlining IRQ 11500 * affinities. 11501 * 11502 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 11503 * 11504 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 11505 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 11506 * 11507 **/ 11508 static void 11509 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 11510 { 11511 struct lpfc_vector_map_info *cpup; 11512 struct cpumask *aff_mask; 11513 unsigned int cpu_select, cpu_next, idx; 11514 const struct cpumask *orig_mask; 11515 11516 if (phba->irq_chann_mode == NORMAL_MODE) 11517 return; 11518 11519 orig_mask = &phba->sli4_hba.irq_aff_mask; 11520 11521 if (!cpumask_test_cpu(cpu, orig_mask)) 11522 return; 11523 11524 cpup = &phba->sli4_hba.cpu_map[cpu]; 11525 11526 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11527 return; 11528 11529 if (offline) { 11530 /* Find next online CPU on original mask */ 11531 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 11532 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 11533 11534 /* Found a valid CPU */ 11535 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 11536 /* Go through each eqhdl and ensure offlining 11537 * cpu aff_mask is migrated 11538 */ 11539 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11540 aff_mask = lpfc_get_aff_mask(idx); 11541 11542 /* Migrate affinity */ 11543 if (cpumask_test_cpu(cpu, aff_mask)) 11544 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 11545 cpu_select); 11546 } 11547 } else { 11548 /* Rely on irqbalance if no online CPUs left on NUMA */ 11549 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 11550 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 11551 } 11552 } else { 11553 /* Migrate affinity back to this CPU */ 11554 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 11555 } 11556 } 11557 11558 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 11559 { 11560 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11561 struct lpfc_queue *eq, *next; 11562 LIST_HEAD(eqlist); 11563 int retval; 11564 11565 if (!phba) { 11566 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11567 return 0; 11568 } 11569 11570 if (__lpfc_cpuhp_checks(phba, &retval)) 11571 return retval; 11572 11573 lpfc_irq_rebalance(phba, cpu, true); 11574 11575 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 11576 if (retval) 11577 return retval; 11578 11579 /* start polling on these eq's */ 11580 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 11581 list_del_init(&eq->_poll_list); 11582 lpfc_sli4_start_polling(eq); 11583 } 11584 11585 return 0; 11586 } 11587 11588 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 11589 { 11590 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11591 struct lpfc_queue *eq, *next; 11592 unsigned int n; 11593 int retval; 11594 11595 if (!phba) { 11596 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11597 return 0; 11598 } 11599 11600 if (__lpfc_cpuhp_checks(phba, &retval)) 11601 return retval; 11602 11603 lpfc_irq_rebalance(phba, cpu, false); 11604 11605 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 11606 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 11607 if (n == cpu) 11608 lpfc_sli4_stop_polling(eq); 11609 } 11610 11611 return 0; 11612 } 11613 11614 /** 11615 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 11616 * @phba: pointer to lpfc hba data structure. 11617 * 11618 * This routine is invoked to enable the MSI-X interrupt vectors to device 11619 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 11620 * to cpus on the system. 11621 * 11622 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 11623 * the number of cpus on the same numa node as this adapter. The vectors are 11624 * allocated without requesting OS affinity mapping. A vector will be 11625 * allocated and assigned to each online and offline cpu. If the cpu is 11626 * online, then affinity will be set to that cpu. If the cpu is offline, then 11627 * affinity will be set to the nearest peer cpu within the numa node that is 11628 * online. If there are no online cpus within the numa node, affinity is not 11629 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 11630 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 11631 * configured. 11632 * 11633 * If numa mode is not enabled and there is more than 1 vector allocated, then 11634 * the driver relies on the managed irq interface where the OS assigns vector to 11635 * cpu affinity. The driver will then use that affinity mapping to setup its 11636 * cpu mapping table. 11637 * 11638 * Return codes 11639 * 0 - successful 11640 * other values - error 11641 **/ 11642 static int 11643 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 11644 { 11645 int vectors, rc, index; 11646 char *name; 11647 const struct cpumask *aff_mask = NULL; 11648 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 11649 struct lpfc_vector_map_info *cpup; 11650 struct lpfc_hba_eq_hdl *eqhdl; 11651 const struct cpumask *maskp; 11652 unsigned int flags = PCI_IRQ_MSIX; 11653 11654 /* Set up MSI-X multi-message vectors */ 11655 vectors = phba->cfg_irq_chann; 11656 11657 if (phba->irq_chann_mode != NORMAL_MODE) 11658 aff_mask = &phba->sli4_hba.irq_aff_mask; 11659 11660 if (aff_mask) { 11661 cpu_cnt = cpumask_weight(aff_mask); 11662 vectors = min(phba->cfg_irq_chann, cpu_cnt); 11663 11664 /* cpu: iterates over aff_mask including offline or online 11665 * cpu_select: iterates over online aff_mask to set affinity 11666 */ 11667 cpu = cpumask_first(aff_mask); 11668 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 11669 } else { 11670 flags |= PCI_IRQ_AFFINITY; 11671 } 11672 11673 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 11674 if (rc < 0) { 11675 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11676 "0484 PCI enable MSI-X failed (%d)\n", rc); 11677 goto vec_fail_out; 11678 } 11679 vectors = rc; 11680 11681 /* Assign MSI-X vectors to interrupt handlers */ 11682 for (index = 0; index < vectors; index++) { 11683 eqhdl = lpfc_get_eq_hdl(index); 11684 name = eqhdl->handler_name; 11685 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11686 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11687 LPFC_DRIVER_HANDLER_NAME"%d", index); 11688 11689 eqhdl->idx = index; 11690 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11691 &lpfc_sli4_hba_intr_handler, 0, 11692 name, eqhdl); 11693 if (rc) { 11694 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11695 "0486 MSI-X fast-path (%d) " 11696 "request_irq failed (%d)\n", index, rc); 11697 goto cfg_fail_out; 11698 } 11699 11700 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 11701 11702 if (aff_mask) { 11703 /* If found a neighboring online cpu, set affinity */ 11704 if (cpu_select < nr_cpu_ids) 11705 lpfc_irq_set_aff(eqhdl, cpu_select); 11706 11707 /* Assign EQ to cpu_map */ 11708 lpfc_assign_eq_map_info(phba, index, 11709 LPFC_CPU_FIRST_IRQ, 11710 cpu); 11711 11712 /* Iterate to next offline or online cpu in aff_mask */ 11713 cpu = cpumask_next(cpu, aff_mask); 11714 11715 /* Find next online cpu in aff_mask to set affinity */ 11716 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 11717 } else if (vectors == 1) { 11718 cpu = cpumask_first(cpu_present_mask); 11719 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 11720 cpu); 11721 } else { 11722 maskp = pci_irq_get_affinity(phba->pcidev, index); 11723 11724 /* Loop through all CPUs associated with vector index */ 11725 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 11726 cpup = &phba->sli4_hba.cpu_map[cpu]; 11727 11728 /* If this is the first CPU thats assigned to 11729 * this vector, set LPFC_CPU_FIRST_IRQ. 11730 * 11731 * With certain platforms its possible that irq 11732 * vectors are affinitized to all the cpu's. 11733 * This can result in each cpu_map.eq to be set 11734 * to the last vector, resulting in overwrite 11735 * of all the previous cpu_map.eq. Ensure that 11736 * each vector receives a place in cpu_map. 11737 * Later call to lpfc_cpu_affinity_check will 11738 * ensure we are nicely balanced out. 11739 */ 11740 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 11741 continue; 11742 lpfc_assign_eq_map_info(phba, index, 11743 LPFC_CPU_FIRST_IRQ, 11744 cpu); 11745 break; 11746 } 11747 } 11748 } 11749 11750 if (vectors != phba->cfg_irq_chann) { 11751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11752 "3238 Reducing IO channels to match number of " 11753 "MSI-X vectors, requested %d got %d\n", 11754 phba->cfg_irq_chann, vectors); 11755 if (phba->cfg_irq_chann > vectors) 11756 phba->cfg_irq_chann = vectors; 11757 } 11758 11759 return rc; 11760 11761 cfg_fail_out: 11762 /* free the irq already requested */ 11763 for (--index; index >= 0; index--) { 11764 eqhdl = lpfc_get_eq_hdl(index); 11765 lpfc_irq_clear_aff(eqhdl); 11766 irq_set_affinity_hint(eqhdl->irq, NULL); 11767 free_irq(eqhdl->irq, eqhdl); 11768 } 11769 11770 /* Unconfigure MSI-X capability structure */ 11771 pci_free_irq_vectors(phba->pcidev); 11772 11773 vec_fail_out: 11774 return rc; 11775 } 11776 11777 /** 11778 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11779 * @phba: pointer to lpfc hba data structure. 11780 * 11781 * This routine is invoked to enable the MSI interrupt mode to device with 11782 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 11783 * called to enable the MSI vector. The device driver is responsible for 11784 * calling the request_irq() to register MSI vector with a interrupt the 11785 * handler, which is done in this function. 11786 * 11787 * Return codes 11788 * 0 - successful 11789 * other values - error 11790 **/ 11791 static int 11792 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11793 { 11794 int rc, index; 11795 unsigned int cpu; 11796 struct lpfc_hba_eq_hdl *eqhdl; 11797 11798 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 11799 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 11800 if (rc > 0) 11801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11802 "0487 PCI enable MSI mode success.\n"); 11803 else { 11804 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11805 "0488 PCI enable MSI mode failed (%d)\n", rc); 11806 return rc ? rc : -1; 11807 } 11808 11809 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11810 0, LPFC_DRIVER_NAME, phba); 11811 if (rc) { 11812 pci_free_irq_vectors(phba->pcidev); 11813 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11814 "0490 MSI request_irq failed (%d)\n", rc); 11815 return rc; 11816 } 11817 11818 eqhdl = lpfc_get_eq_hdl(0); 11819 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11820 11821 cpu = cpumask_first(cpu_present_mask); 11822 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 11823 11824 for (index = 0; index < phba->cfg_irq_chann; index++) { 11825 eqhdl = lpfc_get_eq_hdl(index); 11826 eqhdl->idx = index; 11827 } 11828 11829 return 0; 11830 } 11831 11832 /** 11833 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11834 * @phba: pointer to lpfc hba data structure. 11835 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 11836 * 11837 * This routine is invoked to enable device interrupt and associate driver's 11838 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11839 * interface spec. Depends on the interrupt mode configured to the driver, 11840 * the driver will try to fallback from the configured interrupt mode to an 11841 * interrupt mode which is supported by the platform, kernel, and device in 11842 * the order of: 11843 * MSI-X -> MSI -> IRQ. 11844 * 11845 * Return codes 11846 * 0 - successful 11847 * other values - error 11848 **/ 11849 static uint32_t 11850 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11851 { 11852 uint32_t intr_mode = LPFC_INTR_ERROR; 11853 int retval, idx; 11854 11855 if (cfg_mode == 2) { 11856 /* Preparation before conf_msi mbox cmd */ 11857 retval = 0; 11858 if (!retval) { 11859 /* Now, try to enable MSI-X interrupt mode */ 11860 retval = lpfc_sli4_enable_msix(phba); 11861 if (!retval) { 11862 /* Indicate initialization to MSI-X mode */ 11863 phba->intr_type = MSIX; 11864 intr_mode = 2; 11865 } 11866 } 11867 } 11868 11869 /* Fallback to MSI if MSI-X initialization failed */ 11870 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11871 retval = lpfc_sli4_enable_msi(phba); 11872 if (!retval) { 11873 /* Indicate initialization to MSI mode */ 11874 phba->intr_type = MSI; 11875 intr_mode = 1; 11876 } 11877 } 11878 11879 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11880 if (phba->intr_type == NONE) { 11881 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11882 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11883 if (!retval) { 11884 struct lpfc_hba_eq_hdl *eqhdl; 11885 unsigned int cpu; 11886 11887 /* Indicate initialization to INTx mode */ 11888 phba->intr_type = INTx; 11889 intr_mode = 0; 11890 11891 eqhdl = lpfc_get_eq_hdl(0); 11892 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11893 11894 cpu = cpumask_first(cpu_present_mask); 11895 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 11896 cpu); 11897 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11898 eqhdl = lpfc_get_eq_hdl(idx); 11899 eqhdl->idx = idx; 11900 } 11901 } 11902 } 11903 return intr_mode; 11904 } 11905 11906 /** 11907 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11908 * @phba: pointer to lpfc hba data structure. 11909 * 11910 * This routine is invoked to disable device interrupt and disassociate 11911 * the driver's interrupt handler(s) from interrupt vector(s) to device 11912 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11913 * will release the interrupt vector(s) for the message signaled interrupt. 11914 **/ 11915 static void 11916 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11917 { 11918 /* Disable the currently initialized interrupt mode */ 11919 if (phba->intr_type == MSIX) { 11920 int index; 11921 struct lpfc_hba_eq_hdl *eqhdl; 11922 11923 /* Free up MSI-X multi-message vectors */ 11924 for (index = 0; index < phba->cfg_irq_chann; index++) { 11925 eqhdl = lpfc_get_eq_hdl(index); 11926 lpfc_irq_clear_aff(eqhdl); 11927 irq_set_affinity_hint(eqhdl->irq, NULL); 11928 free_irq(eqhdl->irq, eqhdl); 11929 } 11930 } else { 11931 free_irq(phba->pcidev->irq, phba); 11932 } 11933 11934 pci_free_irq_vectors(phba->pcidev); 11935 11936 /* Reset interrupt management states */ 11937 phba->intr_type = NONE; 11938 phba->sli.slistat.sli_intr = 0; 11939 } 11940 11941 /** 11942 * lpfc_unset_hba - Unset SLI3 hba device initialization 11943 * @phba: pointer to lpfc hba data structure. 11944 * 11945 * This routine is invoked to unset the HBA device initialization steps to 11946 * a device with SLI-3 interface spec. 11947 **/ 11948 static void 11949 lpfc_unset_hba(struct lpfc_hba *phba) 11950 { 11951 struct lpfc_vport *vport = phba->pport; 11952 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11953 11954 spin_lock_irq(shost->host_lock); 11955 vport->load_flag |= FC_UNLOADING; 11956 spin_unlock_irq(shost->host_lock); 11957 11958 kfree(phba->vpi_bmask); 11959 kfree(phba->vpi_ids); 11960 11961 lpfc_stop_hba_timers(phba); 11962 11963 phba->pport->work_port_events = 0; 11964 11965 lpfc_sli_hba_down(phba); 11966 11967 lpfc_sli_brdrestart(phba); 11968 11969 lpfc_sli_disable_intr(phba); 11970 11971 return; 11972 } 11973 11974 /** 11975 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11976 * @phba: Pointer to HBA context object. 11977 * 11978 * This function is called in the SLI4 code path to wait for completion 11979 * of device's XRIs exchange busy. It will check the XRI exchange busy 11980 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11981 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11982 * I/Os every 30 seconds, log error message, and wait forever. Only when 11983 * all XRI exchange busy complete, the driver unload shall proceed with 11984 * invoking the function reset ioctl mailbox command to the CNA and the 11985 * the rest of the driver unload resource release. 11986 **/ 11987 static void 11988 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11989 { 11990 struct lpfc_sli4_hdw_queue *qp; 11991 int idx, ccnt; 11992 int wait_time = 0; 11993 int io_xri_cmpl = 1; 11994 int nvmet_xri_cmpl = 1; 11995 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11996 11997 /* Driver just aborted IOs during the hba_unset process. Pause 11998 * here to give the HBA time to complete the IO and get entries 11999 * into the abts lists. 12000 */ 12001 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 12002 12003 /* Wait for NVME pending IO to flush back to transport. */ 12004 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12005 lpfc_nvme_wait_for_io_drain(phba); 12006 12007 ccnt = 0; 12008 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 12009 qp = &phba->sli4_hba.hdwq[idx]; 12010 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 12011 if (!io_xri_cmpl) /* if list is NOT empty */ 12012 ccnt++; 12013 } 12014 if (ccnt) 12015 io_xri_cmpl = 0; 12016 12017 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12018 nvmet_xri_cmpl = 12019 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 12020 } 12021 12022 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 12023 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 12024 if (!nvmet_xri_cmpl) 12025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12026 "6424 NVMET XRI exchange busy " 12027 "wait time: %d seconds.\n", 12028 wait_time/1000); 12029 if (!io_xri_cmpl) 12030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12031 "6100 IO XRI exchange busy " 12032 "wait time: %d seconds.\n", 12033 wait_time/1000); 12034 if (!els_xri_cmpl) 12035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12036 "2878 ELS XRI exchange busy " 12037 "wait time: %d seconds.\n", 12038 wait_time/1000); 12039 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 12040 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 12041 } else { 12042 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 12043 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 12044 } 12045 12046 ccnt = 0; 12047 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 12048 qp = &phba->sli4_hba.hdwq[idx]; 12049 io_xri_cmpl = list_empty( 12050 &qp->lpfc_abts_io_buf_list); 12051 if (!io_xri_cmpl) /* if list is NOT empty */ 12052 ccnt++; 12053 } 12054 if (ccnt) 12055 io_xri_cmpl = 0; 12056 12057 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12058 nvmet_xri_cmpl = list_empty( 12059 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 12060 } 12061 els_xri_cmpl = 12062 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 12063 12064 } 12065 } 12066 12067 /** 12068 * lpfc_sli4_hba_unset - Unset the fcoe hba 12069 * @phba: Pointer to HBA context object. 12070 * 12071 * This function is called in the SLI4 code path to reset the HBA's FCoE 12072 * function. The caller is not required to hold any lock. This routine 12073 * issues PCI function reset mailbox command to reset the FCoE function. 12074 * At the end of the function, it calls lpfc_hba_down_post function to 12075 * free any pending commands. 12076 **/ 12077 static void 12078 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 12079 { 12080 int wait_cnt = 0; 12081 LPFC_MBOXQ_t *mboxq; 12082 struct pci_dev *pdev = phba->pcidev; 12083 12084 lpfc_stop_hba_timers(phba); 12085 if (phba->pport) 12086 phba->sli4_hba.intr_enable = 0; 12087 12088 /* 12089 * Gracefully wait out the potential current outstanding asynchronous 12090 * mailbox command. 12091 */ 12092 12093 /* First, block any pending async mailbox command from posted */ 12094 spin_lock_irq(&phba->hbalock); 12095 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12096 spin_unlock_irq(&phba->hbalock); 12097 /* Now, trying to wait it out if we can */ 12098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 12099 msleep(10); 12100 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 12101 break; 12102 } 12103 /* Forcefully release the outstanding mailbox command if timed out */ 12104 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 12105 spin_lock_irq(&phba->hbalock); 12106 mboxq = phba->sli.mbox_active; 12107 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 12108 __lpfc_mbox_cmpl_put(phba, mboxq); 12109 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 12110 phba->sli.mbox_active = NULL; 12111 spin_unlock_irq(&phba->hbalock); 12112 } 12113 12114 /* Abort all iocbs associated with the hba */ 12115 lpfc_sli_hba_iocb_abort(phba); 12116 12117 /* Wait for completion of device XRI exchange busy */ 12118 lpfc_sli4_xri_exchange_busy_wait(phba); 12119 12120 /* per-phba callback de-registration for hotplug event */ 12121 if (phba->pport) 12122 lpfc_cpuhp_remove(phba); 12123 12124 /* Disable PCI subsystem interrupt */ 12125 lpfc_sli4_disable_intr(phba); 12126 12127 /* Disable SR-IOV if enabled */ 12128 if (phba->cfg_sriov_nr_virtfn) 12129 pci_disable_sriov(pdev); 12130 12131 /* Stop kthread signal shall trigger work_done one more time */ 12132 kthread_stop(phba->worker_thread); 12133 12134 /* Disable FW logging to host memory */ 12135 lpfc_ras_stop_fwlog(phba); 12136 12137 /* Unset the queues shared with the hardware then release all 12138 * allocated resources. 12139 */ 12140 lpfc_sli4_queue_unset(phba); 12141 lpfc_sli4_queue_destroy(phba); 12142 12143 /* Reset SLI4 HBA FCoE function */ 12144 lpfc_pci_function_reset(phba); 12145 12146 /* Free RAS DMA memory */ 12147 if (phba->ras_fwlog.ras_enabled) 12148 lpfc_sli4_ras_dma_free(phba); 12149 12150 /* Stop the SLI4 device port */ 12151 if (phba->pport) 12152 phba->pport->work_port_events = 0; 12153 } 12154 12155 /** 12156 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 12157 * @phba: Pointer to HBA context object. 12158 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 12159 * 12160 * This function is called in the SLI4 code path to read the port's 12161 * sli4 capabilities. 12162 * 12163 * This function may be be called from any context that can block-wait 12164 * for the completion. The expectation is that this routine is called 12165 * typically from probe_one or from the online routine. 12166 **/ 12167 int 12168 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12169 { 12170 int rc; 12171 struct lpfc_mqe *mqe = &mboxq->u.mqe; 12172 struct lpfc_pc_sli4_params *sli4_params; 12173 uint32_t mbox_tmo; 12174 int length; 12175 bool exp_wqcq_pages = true; 12176 struct lpfc_sli4_parameters *mbx_sli4_parameters; 12177 12178 /* 12179 * By default, the driver assumes the SLI4 port requires RPI 12180 * header postings. The SLI4_PARAM response will correct this 12181 * assumption. 12182 */ 12183 phba->sli4_hba.rpi_hdrs_in_use = 1; 12184 12185 /* Read the port's SLI4 Config Parameters */ 12186 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 12187 sizeof(struct lpfc_sli4_cfg_mhdr)); 12188 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 12189 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 12190 length, LPFC_SLI4_MBX_EMBED); 12191 if (!phba->sli4_hba.intr_enable) 12192 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 12193 else { 12194 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 12195 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12196 } 12197 if (unlikely(rc)) 12198 return rc; 12199 sli4_params = &phba->sli4_hba.pc_sli4_params; 12200 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 12201 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 12202 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 12203 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 12204 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 12205 mbx_sli4_parameters); 12206 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 12207 mbx_sli4_parameters); 12208 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 12209 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 12210 else 12211 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 12212 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 12213 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 12214 mbx_sli4_parameters); 12215 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 12216 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 12217 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 12218 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 12219 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 12220 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 12221 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 12222 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 12223 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 12224 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 12225 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 12226 mbx_sli4_parameters); 12227 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 12228 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 12229 mbx_sli4_parameters); 12230 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 12231 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 12232 12233 /* Check for Extended Pre-Registered SGL support */ 12234 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 12235 12236 /* Check for firmware nvme support */ 12237 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 12238 bf_get(cfg_xib, mbx_sli4_parameters)); 12239 12240 if (rc) { 12241 /* Save this to indicate the Firmware supports NVME */ 12242 sli4_params->nvme = 1; 12243 12244 /* Firmware NVME support, check driver FC4 NVME support */ 12245 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 12246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12247 "6133 Disabling NVME support: " 12248 "FC4 type not supported: x%x\n", 12249 phba->cfg_enable_fc4_type); 12250 goto fcponly; 12251 } 12252 } else { 12253 /* No firmware NVME support, check driver FC4 NVME support */ 12254 sli4_params->nvme = 0; 12255 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 12257 "6101 Disabling NVME support: Not " 12258 "supported by firmware (%d %d) x%x\n", 12259 bf_get(cfg_nvme, mbx_sli4_parameters), 12260 bf_get(cfg_xib, mbx_sli4_parameters), 12261 phba->cfg_enable_fc4_type); 12262 fcponly: 12263 phba->nvmet_support = 0; 12264 phba->cfg_nvmet_mrq = 0; 12265 phba->cfg_nvme_seg_cnt = 0; 12266 12267 /* If no FC4 type support, move to just SCSI support */ 12268 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 12269 return -ENODEV; 12270 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 12271 } 12272 } 12273 12274 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 12275 * accommodate 512K and 1M IOs in a single nvme buf. 12276 */ 12277 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12278 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 12279 12280 /* Enable embedded Payload BDE if support is indicated */ 12281 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 12282 phba->cfg_enable_pbde = 1; 12283 else 12284 phba->cfg_enable_pbde = 0; 12285 12286 /* 12287 * To support Suppress Response feature we must satisfy 3 conditions. 12288 * lpfc_suppress_rsp module parameter must be set (default). 12289 * In SLI4-Parameters Descriptor: 12290 * Extended Inline Buffers (XIB) must be supported. 12291 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 12292 * (double negative). 12293 */ 12294 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 12295 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 12296 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 12297 else 12298 phba->cfg_suppress_rsp = 0; 12299 12300 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 12301 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 12302 12303 /* Make sure that sge_supp_len can be handled by the driver */ 12304 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 12305 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 12306 12307 /* 12308 * Check whether the adapter supports an embedded copy of the 12309 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 12310 * to use this option, 128-byte WQEs must be used. 12311 */ 12312 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 12313 phba->fcp_embed_io = 1; 12314 else 12315 phba->fcp_embed_io = 0; 12316 12317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12318 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 12319 bf_get(cfg_xib, mbx_sli4_parameters), 12320 phba->cfg_enable_pbde, 12321 phba->fcp_embed_io, sli4_params->nvme, 12322 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 12323 12324 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 12325 LPFC_SLI_INTF_IF_TYPE_2) && 12326 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 12327 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 12328 exp_wqcq_pages = false; 12329 12330 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 12331 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 12332 exp_wqcq_pages && 12333 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 12334 phba->enab_exp_wqcq_pages = 1; 12335 else 12336 phba->enab_exp_wqcq_pages = 0; 12337 /* 12338 * Check if the SLI port supports MDS Diagnostics 12339 */ 12340 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 12341 phba->mds_diags_support = 1; 12342 else 12343 phba->mds_diags_support = 0; 12344 12345 /* 12346 * Check if the SLI port supports NSLER 12347 */ 12348 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 12349 phba->nsler = 1; 12350 else 12351 phba->nsler = 0; 12352 12353 /* Save PB info for use during HBA setup */ 12354 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters); 12355 sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters); 12356 sli4_params->mib_size = mbx_sli4_parameters->mib_size; 12357 sli4_params->mi_value = LPFC_DFLT_MIB_VAL; 12358 12359 /* Next we check for Vendor MIB support */ 12360 if (sli4_params->mi_ver && phba->cfg_enable_mi) 12361 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; 12362 12363 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12364 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n", 12365 sli4_params->mi_ver, phba->cfg_enable_mi, 12366 sli4_params->mi_value, sli4_params->mib_bde_cnt, 12367 sli4_params->mib_size); 12368 return 0; 12369 } 12370 12371 /** 12372 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 12373 * @pdev: pointer to PCI device 12374 * @pid: pointer to PCI device identifier 12375 * 12376 * This routine is to be called to attach a device with SLI-3 interface spec 12377 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12378 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12379 * information of the device and driver to see if the driver state that it can 12380 * support this kind of device. If the match is successful, the driver core 12381 * invokes this routine. If this routine determines it can claim the HBA, it 12382 * does all the initialization that it needs to do to handle the HBA properly. 12383 * 12384 * Return code 12385 * 0 - driver can claim the device 12386 * negative value - driver can not claim the device 12387 **/ 12388 static int 12389 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 12390 { 12391 struct lpfc_hba *phba; 12392 struct lpfc_vport *vport = NULL; 12393 struct Scsi_Host *shost = NULL; 12394 int error; 12395 uint32_t cfg_mode, intr_mode; 12396 12397 /* Allocate memory for HBA structure */ 12398 phba = lpfc_hba_alloc(pdev); 12399 if (!phba) 12400 return -ENOMEM; 12401 12402 /* Perform generic PCI device enabling operation */ 12403 error = lpfc_enable_pci_dev(phba); 12404 if (error) 12405 goto out_free_phba; 12406 12407 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 12408 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 12409 if (error) 12410 goto out_disable_pci_dev; 12411 12412 /* Set up SLI-3 specific device PCI memory space */ 12413 error = lpfc_sli_pci_mem_setup(phba); 12414 if (error) { 12415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12416 "1402 Failed to set up pci memory space.\n"); 12417 goto out_disable_pci_dev; 12418 } 12419 12420 /* Set up SLI-3 specific device driver resources */ 12421 error = lpfc_sli_driver_resource_setup(phba); 12422 if (error) { 12423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12424 "1404 Failed to set up driver resource.\n"); 12425 goto out_unset_pci_mem_s3; 12426 } 12427 12428 /* Initialize and populate the iocb list per host */ 12429 12430 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 12431 if (error) { 12432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12433 "1405 Failed to initialize iocb list.\n"); 12434 goto out_unset_driver_resource_s3; 12435 } 12436 12437 /* Set up common device driver resources */ 12438 error = lpfc_setup_driver_resource_phase2(phba); 12439 if (error) { 12440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12441 "1406 Failed to set up driver resource.\n"); 12442 goto out_free_iocb_list; 12443 } 12444 12445 /* Get the default values for Model Name and Description */ 12446 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12447 12448 /* Create SCSI host to the physical port */ 12449 error = lpfc_create_shost(phba); 12450 if (error) { 12451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12452 "1407 Failed to create scsi host.\n"); 12453 goto out_unset_driver_resource; 12454 } 12455 12456 /* Configure sysfs attributes */ 12457 vport = phba->pport; 12458 error = lpfc_alloc_sysfs_attr(vport); 12459 if (error) { 12460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12461 "1476 Failed to allocate sysfs attr\n"); 12462 goto out_destroy_shost; 12463 } 12464 12465 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12466 /* Now, trying to enable interrupt and bring up the device */ 12467 cfg_mode = phba->cfg_use_msi; 12468 while (true) { 12469 /* Put device to a known state before enabling interrupt */ 12470 lpfc_stop_port(phba); 12471 /* Configure and enable interrupt */ 12472 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 12473 if (intr_mode == LPFC_INTR_ERROR) { 12474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12475 "0431 Failed to enable interrupt.\n"); 12476 error = -ENODEV; 12477 goto out_free_sysfs_attr; 12478 } 12479 /* SLI-3 HBA setup */ 12480 if (lpfc_sli_hba_setup(phba)) { 12481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12482 "1477 Failed to set up hba\n"); 12483 error = -ENODEV; 12484 goto out_remove_device; 12485 } 12486 12487 /* Wait 50ms for the interrupts of previous mailbox commands */ 12488 msleep(50); 12489 /* Check active interrupts on message signaled interrupts */ 12490 if (intr_mode == 0 || 12491 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 12492 /* Log the current active interrupt mode */ 12493 phba->intr_mode = intr_mode; 12494 lpfc_log_intr_mode(phba, intr_mode); 12495 break; 12496 } else { 12497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12498 "0447 Configure interrupt mode (%d) " 12499 "failed active interrupt test.\n", 12500 intr_mode); 12501 /* Disable the current interrupt mode */ 12502 lpfc_sli_disable_intr(phba); 12503 /* Try next level of interrupt mode */ 12504 cfg_mode = --intr_mode; 12505 } 12506 } 12507 12508 /* Perform post initialization setup */ 12509 lpfc_post_init_setup(phba); 12510 12511 /* Check if there are static vports to be created. */ 12512 lpfc_create_static_vport(phba); 12513 12514 return 0; 12515 12516 out_remove_device: 12517 lpfc_unset_hba(phba); 12518 out_free_sysfs_attr: 12519 lpfc_free_sysfs_attr(vport); 12520 out_destroy_shost: 12521 lpfc_destroy_shost(phba); 12522 out_unset_driver_resource: 12523 lpfc_unset_driver_resource_phase2(phba); 12524 out_free_iocb_list: 12525 lpfc_free_iocb_list(phba); 12526 out_unset_driver_resource_s3: 12527 lpfc_sli_driver_resource_unset(phba); 12528 out_unset_pci_mem_s3: 12529 lpfc_sli_pci_mem_unset(phba); 12530 out_disable_pci_dev: 12531 lpfc_disable_pci_dev(phba); 12532 if (shost) 12533 scsi_host_put(shost); 12534 out_free_phba: 12535 lpfc_hba_free(phba); 12536 return error; 12537 } 12538 12539 /** 12540 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 12541 * @pdev: pointer to PCI device 12542 * 12543 * This routine is to be called to disattach a device with SLI-3 interface 12544 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12545 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12546 * device to be removed from the PCI subsystem properly. 12547 **/ 12548 static void 12549 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 12550 { 12551 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12552 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12553 struct lpfc_vport **vports; 12554 struct lpfc_hba *phba = vport->phba; 12555 int i; 12556 12557 spin_lock_irq(&phba->hbalock); 12558 vport->load_flag |= FC_UNLOADING; 12559 spin_unlock_irq(&phba->hbalock); 12560 12561 lpfc_free_sysfs_attr(vport); 12562 12563 /* Release all the vports against this physical port */ 12564 vports = lpfc_create_vport_work_array(phba); 12565 if (vports != NULL) 12566 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12567 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12568 continue; 12569 fc_vport_terminate(vports[i]->fc_vport); 12570 } 12571 lpfc_destroy_vport_work_array(phba, vports); 12572 12573 /* Remove FC host with the physical port */ 12574 fc_remove_host(shost); 12575 scsi_remove_host(shost); 12576 12577 /* Clean up all nodes, mailboxes and IOs. */ 12578 lpfc_cleanup(vport); 12579 12580 /* 12581 * Bring down the SLI Layer. This step disable all interrupts, 12582 * clears the rings, discards all mailbox commands, and resets 12583 * the HBA. 12584 */ 12585 12586 /* HBA interrupt will be disabled after this call */ 12587 lpfc_sli_hba_down(phba); 12588 /* Stop kthread signal shall trigger work_done one more time */ 12589 kthread_stop(phba->worker_thread); 12590 /* Final cleanup of txcmplq and reset the HBA */ 12591 lpfc_sli_brdrestart(phba); 12592 12593 kfree(phba->vpi_bmask); 12594 kfree(phba->vpi_ids); 12595 12596 lpfc_stop_hba_timers(phba); 12597 spin_lock_irq(&phba->port_list_lock); 12598 list_del_init(&vport->listentry); 12599 spin_unlock_irq(&phba->port_list_lock); 12600 12601 lpfc_debugfs_terminate(vport); 12602 12603 /* Disable SR-IOV if enabled */ 12604 if (phba->cfg_sriov_nr_virtfn) 12605 pci_disable_sriov(pdev); 12606 12607 /* Disable interrupt */ 12608 lpfc_sli_disable_intr(phba); 12609 12610 scsi_host_put(shost); 12611 12612 /* 12613 * Call scsi_free before mem_free since scsi bufs are released to their 12614 * corresponding pools here. 12615 */ 12616 lpfc_scsi_free(phba); 12617 lpfc_free_iocb_list(phba); 12618 12619 lpfc_mem_free_all(phba); 12620 12621 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 12622 phba->hbqslimp.virt, phba->hbqslimp.phys); 12623 12624 /* Free resources associated with SLI2 interface */ 12625 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 12626 phba->slim2p.virt, phba->slim2p.phys); 12627 12628 /* unmap adapter SLIM and Control Registers */ 12629 iounmap(phba->ctrl_regs_memmap_p); 12630 iounmap(phba->slim_memmap_p); 12631 12632 lpfc_hba_free(phba); 12633 12634 pci_release_mem_regions(pdev); 12635 pci_disable_device(pdev); 12636 } 12637 12638 /** 12639 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 12640 * @dev_d: pointer to device 12641 * 12642 * This routine is to be called from the kernel's PCI subsystem to support 12643 * system Power Management (PM) to device with SLI-3 interface spec. When 12644 * PM invokes this method, it quiesces the device by stopping the driver's 12645 * worker thread for the device, turning off device's interrupt and DMA, 12646 * and bring the device offline. Note that as the driver implements the 12647 * minimum PM requirements to a power-aware driver's PM support for the 12648 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12649 * to the suspend() method call will be treated as SUSPEND and the driver will 12650 * fully reinitialize its device during resume() method call, the driver will 12651 * set device to PCI_D3hot state in PCI config space instead of setting it 12652 * according to the @msg provided by the PM. 12653 * 12654 * Return code 12655 * 0 - driver suspended the device 12656 * Error otherwise 12657 **/ 12658 static int __maybe_unused 12659 lpfc_pci_suspend_one_s3(struct device *dev_d) 12660 { 12661 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 12662 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12663 12664 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12665 "0473 PCI device Power Management suspend.\n"); 12666 12667 /* Bring down the device */ 12668 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12669 lpfc_offline(phba); 12670 kthread_stop(phba->worker_thread); 12671 12672 /* Disable interrupt from device */ 12673 lpfc_sli_disable_intr(phba); 12674 12675 return 0; 12676 } 12677 12678 /** 12679 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 12680 * @dev_d: pointer to device 12681 * 12682 * This routine is to be called from the kernel's PCI subsystem to support 12683 * system Power Management (PM) to device with SLI-3 interface spec. When PM 12684 * invokes this method, it restores the device's PCI config space state and 12685 * fully reinitializes the device and brings it online. Note that as the 12686 * driver implements the minimum PM requirements to a power-aware driver's 12687 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 12688 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 12689 * driver will fully reinitialize its device during resume() method call, 12690 * the device will be set to PCI_D0 directly in PCI config space before 12691 * restoring the state. 12692 * 12693 * Return code 12694 * 0 - driver suspended the device 12695 * Error otherwise 12696 **/ 12697 static int __maybe_unused 12698 lpfc_pci_resume_one_s3(struct device *dev_d) 12699 { 12700 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 12701 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12702 uint32_t intr_mode; 12703 int error; 12704 12705 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12706 "0452 PCI device Power Management resume.\n"); 12707 12708 /* Startup the kernel thread for this host adapter. */ 12709 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12710 "lpfc_worker_%d", phba->brd_no); 12711 if (IS_ERR(phba->worker_thread)) { 12712 error = PTR_ERR(phba->worker_thread); 12713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12714 "0434 PM resume failed to start worker " 12715 "thread: error=x%x.\n", error); 12716 return error; 12717 } 12718 12719 /* Configure and enable interrupt */ 12720 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12721 if (intr_mode == LPFC_INTR_ERROR) { 12722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12723 "0430 PM resume Failed to enable interrupt\n"); 12724 return -EIO; 12725 } else 12726 phba->intr_mode = intr_mode; 12727 12728 /* Restart HBA and bring it online */ 12729 lpfc_sli_brdrestart(phba); 12730 lpfc_online(phba); 12731 12732 /* Log the current active interrupt mode */ 12733 lpfc_log_intr_mode(phba, phba->intr_mode); 12734 12735 return 0; 12736 } 12737 12738 /** 12739 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12740 * @phba: pointer to lpfc hba data structure. 12741 * 12742 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12743 * aborts all the outstanding SCSI I/Os to the pci device. 12744 **/ 12745 static void 12746 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12747 { 12748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12749 "2723 PCI channel I/O abort preparing for recovery\n"); 12750 12751 /* 12752 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12753 * and let the SCSI mid-layer to retry them to recover. 12754 */ 12755 lpfc_sli_abort_fcp_rings(phba); 12756 } 12757 12758 /** 12759 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12760 * @phba: pointer to lpfc hba data structure. 12761 * 12762 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12763 * disables the device interrupt and pci device, and aborts the internal FCP 12764 * pending I/Os. 12765 **/ 12766 static void 12767 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12768 { 12769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12770 "2710 PCI channel disable preparing for reset\n"); 12771 12772 /* Block any management I/Os to the device */ 12773 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12774 12775 /* Block all SCSI devices' I/Os on the host */ 12776 lpfc_scsi_dev_block(phba); 12777 12778 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12779 lpfc_sli_flush_io_rings(phba); 12780 12781 /* stop all timers */ 12782 lpfc_stop_hba_timers(phba); 12783 12784 /* Disable interrupt and pci device */ 12785 lpfc_sli_disable_intr(phba); 12786 pci_disable_device(phba->pcidev); 12787 } 12788 12789 /** 12790 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12791 * @phba: pointer to lpfc hba data structure. 12792 * 12793 * This routine is called to prepare the SLI3 device for PCI slot permanently 12794 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12795 * pending I/Os. 12796 **/ 12797 static void 12798 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12799 { 12800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12801 "2711 PCI channel permanent disable for failure\n"); 12802 /* Block all SCSI devices' I/Os on the host */ 12803 lpfc_scsi_dev_block(phba); 12804 12805 /* stop all timers */ 12806 lpfc_stop_hba_timers(phba); 12807 12808 /* Clean up all driver's outstanding SCSI I/Os */ 12809 lpfc_sli_flush_io_rings(phba); 12810 } 12811 12812 /** 12813 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12814 * @pdev: pointer to PCI device. 12815 * @state: the current PCI connection state. 12816 * 12817 * This routine is called from the PCI subsystem for I/O error handling to 12818 * device with SLI-3 interface spec. This function is called by the PCI 12819 * subsystem after a PCI bus error affecting this device has been detected. 12820 * When this function is invoked, it will need to stop all the I/Os and 12821 * interrupt(s) to the device. Once that is done, it will return 12822 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12823 * as desired. 12824 * 12825 * Return codes 12826 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12827 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12828 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12829 **/ 12830 static pci_ers_result_t 12831 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12832 { 12833 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12834 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12835 12836 switch (state) { 12837 case pci_channel_io_normal: 12838 /* Non-fatal error, prepare for recovery */ 12839 lpfc_sli_prep_dev_for_recover(phba); 12840 return PCI_ERS_RESULT_CAN_RECOVER; 12841 case pci_channel_io_frozen: 12842 /* Fatal error, prepare for slot reset */ 12843 lpfc_sli_prep_dev_for_reset(phba); 12844 return PCI_ERS_RESULT_NEED_RESET; 12845 case pci_channel_io_perm_failure: 12846 /* Permanent failure, prepare for device down */ 12847 lpfc_sli_prep_dev_for_perm_failure(phba); 12848 return PCI_ERS_RESULT_DISCONNECT; 12849 default: 12850 /* Unknown state, prepare and request slot reset */ 12851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12852 "0472 Unknown PCI error state: x%x\n", state); 12853 lpfc_sli_prep_dev_for_reset(phba); 12854 return PCI_ERS_RESULT_NEED_RESET; 12855 } 12856 } 12857 12858 /** 12859 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12860 * @pdev: pointer to PCI device. 12861 * 12862 * This routine is called from the PCI subsystem for error handling to 12863 * device with SLI-3 interface spec. This is called after PCI bus has been 12864 * reset to restart the PCI card from scratch, as if from a cold-boot. 12865 * During the PCI subsystem error recovery, after driver returns 12866 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12867 * recovery and then call this routine before calling the .resume method 12868 * to recover the device. This function will initialize the HBA device, 12869 * enable the interrupt, but it will just put the HBA to offline state 12870 * without passing any I/O traffic. 12871 * 12872 * Return codes 12873 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12874 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12875 */ 12876 static pci_ers_result_t 12877 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12878 { 12879 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12880 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12881 struct lpfc_sli *psli = &phba->sli; 12882 uint32_t intr_mode; 12883 12884 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12885 if (pci_enable_device_mem(pdev)) { 12886 printk(KERN_ERR "lpfc: Cannot re-enable " 12887 "PCI device after reset.\n"); 12888 return PCI_ERS_RESULT_DISCONNECT; 12889 } 12890 12891 pci_restore_state(pdev); 12892 12893 /* 12894 * As the new kernel behavior of pci_restore_state() API call clears 12895 * device saved_state flag, need to save the restored state again. 12896 */ 12897 pci_save_state(pdev); 12898 12899 if (pdev->is_busmaster) 12900 pci_set_master(pdev); 12901 12902 spin_lock_irq(&phba->hbalock); 12903 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12904 spin_unlock_irq(&phba->hbalock); 12905 12906 /* Configure and enable interrupt */ 12907 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12908 if (intr_mode == LPFC_INTR_ERROR) { 12909 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12910 "0427 Cannot re-enable interrupt after " 12911 "slot reset.\n"); 12912 return PCI_ERS_RESULT_DISCONNECT; 12913 } else 12914 phba->intr_mode = intr_mode; 12915 12916 /* Take device offline, it will perform cleanup */ 12917 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12918 lpfc_offline(phba); 12919 lpfc_sli_brdrestart(phba); 12920 12921 /* Log the current active interrupt mode */ 12922 lpfc_log_intr_mode(phba, phba->intr_mode); 12923 12924 return PCI_ERS_RESULT_RECOVERED; 12925 } 12926 12927 /** 12928 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12929 * @pdev: pointer to PCI device 12930 * 12931 * This routine is called from the PCI subsystem for error handling to device 12932 * with SLI-3 interface spec. It is called when kernel error recovery tells 12933 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12934 * error recovery. After this call, traffic can start to flow from this device 12935 * again. 12936 */ 12937 static void 12938 lpfc_io_resume_s3(struct pci_dev *pdev) 12939 { 12940 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12941 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12942 12943 /* Bring device online, it will be no-op for non-fatal error resume */ 12944 lpfc_online(phba); 12945 } 12946 12947 /** 12948 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12949 * @phba: pointer to lpfc hba data structure. 12950 * 12951 * returns the number of ELS/CT IOCBs to reserve 12952 **/ 12953 int 12954 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12955 { 12956 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12957 12958 if (phba->sli_rev == LPFC_SLI_REV4) { 12959 if (max_xri <= 100) 12960 return 10; 12961 else if (max_xri <= 256) 12962 return 25; 12963 else if (max_xri <= 512) 12964 return 50; 12965 else if (max_xri <= 1024) 12966 return 100; 12967 else if (max_xri <= 1536) 12968 return 150; 12969 else if (max_xri <= 2048) 12970 return 200; 12971 else 12972 return 250; 12973 } else 12974 return 0; 12975 } 12976 12977 /** 12978 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12979 * @phba: pointer to lpfc hba data structure. 12980 * 12981 * returns the number of ELS/CT + NVMET IOCBs to reserve 12982 **/ 12983 int 12984 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12985 { 12986 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12987 12988 if (phba->nvmet_support) 12989 max_xri += LPFC_NVMET_BUF_POST; 12990 return max_xri; 12991 } 12992 12993 12994 static int 12995 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12996 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12997 const struct firmware *fw) 12998 { 12999 int rc; 13000 u8 sli_family; 13001 13002 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 13003 /* Three cases: (1) FW was not supported on the detected adapter. 13004 * (2) FW update has been locked out administratively. 13005 * (3) Some other error during FW update. 13006 * In each case, an unmaskable message is written to the console 13007 * for admin diagnosis. 13008 */ 13009 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 13010 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 13011 magic_number != MAGIC_NUMBER_G6) || 13012 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 13013 magic_number != MAGIC_NUMBER_G7) || 13014 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 13015 magic_number != MAGIC_NUMBER_G7P)) { 13016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13017 "3030 This firmware version is not supported on" 13018 " this HBA model. Device:%x Magic:%x Type:%x " 13019 "ID:%x Size %d %zd\n", 13020 phba->pcidev->device, magic_number, ftype, fid, 13021 fsize, fw->size); 13022 rc = -EINVAL; 13023 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 13024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13025 "3021 Firmware downloads have been prohibited " 13026 "by a system configuration setting on " 13027 "Device:%x Magic:%x Type:%x ID:%x Size %d " 13028 "%zd\n", 13029 phba->pcidev->device, magic_number, ftype, fid, 13030 fsize, fw->size); 13031 rc = -EACCES; 13032 } else { 13033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13034 "3022 FW Download failed. Add Status x%x " 13035 "Device:%x Magic:%x Type:%x ID:%x Size %d " 13036 "%zd\n", 13037 offset, phba->pcidev->device, magic_number, 13038 ftype, fid, fsize, fw->size); 13039 rc = -EIO; 13040 } 13041 return rc; 13042 } 13043 13044 /** 13045 * lpfc_write_firmware - attempt to write a firmware image to the port 13046 * @fw: pointer to firmware image returned from request_firmware. 13047 * @context: pointer to firmware image returned from request_firmware. 13048 * 13049 **/ 13050 static void 13051 lpfc_write_firmware(const struct firmware *fw, void *context) 13052 { 13053 struct lpfc_hba *phba = (struct lpfc_hba *)context; 13054 char fwrev[FW_REV_STR_SIZE]; 13055 struct lpfc_grp_hdr *image; 13056 struct list_head dma_buffer_list; 13057 int i, rc = 0; 13058 struct lpfc_dmabuf *dmabuf, *next; 13059 uint32_t offset = 0, temp_offset = 0; 13060 uint32_t magic_number, ftype, fid, fsize; 13061 13062 /* It can be null in no-wait mode, sanity check */ 13063 if (!fw) { 13064 rc = -ENXIO; 13065 goto out; 13066 } 13067 image = (struct lpfc_grp_hdr *)fw->data; 13068 13069 magic_number = be32_to_cpu(image->magic_number); 13070 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 13071 fid = bf_get_be32(lpfc_grp_hdr_id, image); 13072 fsize = be32_to_cpu(image->size); 13073 13074 INIT_LIST_HEAD(&dma_buffer_list); 13075 lpfc_decode_firmware_rev(phba, fwrev, 1); 13076 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 13077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13078 "3023 Updating Firmware, Current Version:%s " 13079 "New Version:%s\n", 13080 fwrev, image->revision); 13081 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 13082 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 13083 GFP_KERNEL); 13084 if (!dmabuf) { 13085 rc = -ENOMEM; 13086 goto release_out; 13087 } 13088 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 13089 SLI4_PAGE_SIZE, 13090 &dmabuf->phys, 13091 GFP_KERNEL); 13092 if (!dmabuf->virt) { 13093 kfree(dmabuf); 13094 rc = -ENOMEM; 13095 goto release_out; 13096 } 13097 list_add_tail(&dmabuf->list, &dma_buffer_list); 13098 } 13099 while (offset < fw->size) { 13100 temp_offset = offset; 13101 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 13102 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 13103 memcpy(dmabuf->virt, 13104 fw->data + temp_offset, 13105 fw->size - temp_offset); 13106 temp_offset = fw->size; 13107 break; 13108 } 13109 memcpy(dmabuf->virt, fw->data + temp_offset, 13110 SLI4_PAGE_SIZE); 13111 temp_offset += SLI4_PAGE_SIZE; 13112 } 13113 rc = lpfc_wr_object(phba, &dma_buffer_list, 13114 (fw->size - offset), &offset); 13115 if (rc) { 13116 rc = lpfc_log_write_firmware_error(phba, offset, 13117 magic_number, 13118 ftype, 13119 fid, 13120 fsize, 13121 fw); 13122 goto release_out; 13123 } 13124 } 13125 rc = offset; 13126 } else 13127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13128 "3029 Skipped Firmware update, Current " 13129 "Version:%s New Version:%s\n", 13130 fwrev, image->revision); 13131 13132 release_out: 13133 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 13134 list_del(&dmabuf->list); 13135 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 13136 dmabuf->virt, dmabuf->phys); 13137 kfree(dmabuf); 13138 } 13139 release_firmware(fw); 13140 out: 13141 if (rc < 0) 13142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13143 "3062 Firmware update error, status %d.\n", rc); 13144 else 13145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13146 "3024 Firmware update success: size %d.\n", rc); 13147 } 13148 13149 /** 13150 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 13151 * @phba: pointer to lpfc hba data structure. 13152 * @fw_upgrade: which firmware to update. 13153 * 13154 * This routine is called to perform Linux generic firmware upgrade on device 13155 * that supports such feature. 13156 **/ 13157 int 13158 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 13159 { 13160 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 13161 int ret; 13162 const struct firmware *fw; 13163 13164 /* Only supported on SLI4 interface type 2 for now */ 13165 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 13166 LPFC_SLI_INTF_IF_TYPE_2) 13167 return -EPERM; 13168 13169 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 13170 13171 if (fw_upgrade == INT_FW_UPGRADE) { 13172 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 13173 file_name, &phba->pcidev->dev, 13174 GFP_KERNEL, (void *)phba, 13175 lpfc_write_firmware); 13176 } else if (fw_upgrade == RUN_FW_UPGRADE) { 13177 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 13178 if (!ret) 13179 lpfc_write_firmware(fw, (void *)phba); 13180 } else { 13181 ret = -EINVAL; 13182 } 13183 13184 return ret; 13185 } 13186 13187 /** 13188 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 13189 * @pdev: pointer to PCI device 13190 * @pid: pointer to PCI device identifier 13191 * 13192 * This routine is called from the kernel's PCI subsystem to device with 13193 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13194 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13195 * information of the device and driver to see if the driver state that it 13196 * can support this kind of device. If the match is successful, the driver 13197 * core invokes this routine. If this routine determines it can claim the HBA, 13198 * it does all the initialization that it needs to do to handle the HBA 13199 * properly. 13200 * 13201 * Return code 13202 * 0 - driver can claim the device 13203 * negative value - driver can not claim the device 13204 **/ 13205 static int 13206 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 13207 { 13208 struct lpfc_hba *phba; 13209 struct lpfc_vport *vport = NULL; 13210 struct Scsi_Host *shost = NULL; 13211 int error; 13212 uint32_t cfg_mode, intr_mode; 13213 13214 /* Allocate memory for HBA structure */ 13215 phba = lpfc_hba_alloc(pdev); 13216 if (!phba) 13217 return -ENOMEM; 13218 13219 /* Perform generic PCI device enabling operation */ 13220 error = lpfc_enable_pci_dev(phba); 13221 if (error) 13222 goto out_free_phba; 13223 13224 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 13225 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 13226 if (error) 13227 goto out_disable_pci_dev; 13228 13229 /* Set up SLI-4 specific device PCI memory space */ 13230 error = lpfc_sli4_pci_mem_setup(phba); 13231 if (error) { 13232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13233 "1410 Failed to set up pci memory space.\n"); 13234 goto out_disable_pci_dev; 13235 } 13236 13237 /* Set up SLI-4 Specific device driver resources */ 13238 error = lpfc_sli4_driver_resource_setup(phba); 13239 if (error) { 13240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13241 "1412 Failed to set up driver resource.\n"); 13242 goto out_unset_pci_mem_s4; 13243 } 13244 13245 INIT_LIST_HEAD(&phba->active_rrq_list); 13246 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 13247 13248 /* Set up common device driver resources */ 13249 error = lpfc_setup_driver_resource_phase2(phba); 13250 if (error) { 13251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13252 "1414 Failed to set up driver resource.\n"); 13253 goto out_unset_driver_resource_s4; 13254 } 13255 13256 /* Get the default values for Model Name and Description */ 13257 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13258 13259 /* Now, trying to enable interrupt and bring up the device */ 13260 cfg_mode = phba->cfg_use_msi; 13261 13262 /* Put device to a known state before enabling interrupt */ 13263 phba->pport = NULL; 13264 lpfc_stop_port(phba); 13265 13266 /* Init cpu_map array */ 13267 lpfc_cpu_map_array_init(phba); 13268 13269 /* Init hba_eq_hdl array */ 13270 lpfc_hba_eq_hdl_array_init(phba); 13271 13272 /* Configure and enable interrupt */ 13273 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 13274 if (intr_mode == LPFC_INTR_ERROR) { 13275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13276 "0426 Failed to enable interrupt.\n"); 13277 error = -ENODEV; 13278 goto out_unset_driver_resource; 13279 } 13280 /* Default to single EQ for non-MSI-X */ 13281 if (phba->intr_type != MSIX) { 13282 phba->cfg_irq_chann = 1; 13283 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13284 if (phba->nvmet_support) 13285 phba->cfg_nvmet_mrq = 1; 13286 } 13287 } 13288 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 13289 13290 /* Create SCSI host to the physical port */ 13291 error = lpfc_create_shost(phba); 13292 if (error) { 13293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13294 "1415 Failed to create scsi host.\n"); 13295 goto out_disable_intr; 13296 } 13297 vport = phba->pport; 13298 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13299 13300 /* Configure sysfs attributes */ 13301 error = lpfc_alloc_sysfs_attr(vport); 13302 if (error) { 13303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13304 "1416 Failed to allocate sysfs attr\n"); 13305 goto out_destroy_shost; 13306 } 13307 13308 /* Set up SLI-4 HBA */ 13309 if (lpfc_sli4_hba_setup(phba)) { 13310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13311 "1421 Failed to set up hba\n"); 13312 error = -ENODEV; 13313 goto out_free_sysfs_attr; 13314 } 13315 13316 /* Log the current active interrupt mode */ 13317 phba->intr_mode = intr_mode; 13318 lpfc_log_intr_mode(phba, intr_mode); 13319 13320 /* Perform post initialization setup */ 13321 lpfc_post_init_setup(phba); 13322 13323 /* NVME support in FW earlier in the driver load corrects the 13324 * FC4 type making a check for nvme_support unnecessary. 13325 */ 13326 if (phba->nvmet_support == 0) { 13327 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13328 /* Create NVME binding with nvme_fc_transport. This 13329 * ensures the vport is initialized. If the localport 13330 * create fails, it should not unload the driver to 13331 * support field issues. 13332 */ 13333 error = lpfc_nvme_create_localport(vport); 13334 if (error) { 13335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13336 "6004 NVME registration " 13337 "failed, error x%x\n", 13338 error); 13339 } 13340 } 13341 } 13342 13343 /* check for firmware upgrade or downgrade */ 13344 if (phba->cfg_request_firmware_upgrade) 13345 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 13346 13347 /* Check if there are static vports to be created. */ 13348 lpfc_create_static_vport(phba); 13349 13350 /* Enable RAS FW log support */ 13351 lpfc_sli4_ras_setup(phba); 13352 13353 INIT_LIST_HEAD(&phba->poll_list); 13354 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 13355 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13356 13357 return 0; 13358 13359 out_free_sysfs_attr: 13360 lpfc_free_sysfs_attr(vport); 13361 out_destroy_shost: 13362 lpfc_destroy_shost(phba); 13363 out_disable_intr: 13364 lpfc_sli4_disable_intr(phba); 13365 out_unset_driver_resource: 13366 lpfc_unset_driver_resource_phase2(phba); 13367 out_unset_driver_resource_s4: 13368 lpfc_sli4_driver_resource_unset(phba); 13369 out_unset_pci_mem_s4: 13370 lpfc_sli4_pci_mem_unset(phba); 13371 out_disable_pci_dev: 13372 lpfc_disable_pci_dev(phba); 13373 if (shost) 13374 scsi_host_put(shost); 13375 out_free_phba: 13376 lpfc_hba_free(phba); 13377 return error; 13378 } 13379 13380 /** 13381 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 13382 * @pdev: pointer to PCI device 13383 * 13384 * This routine is called from the kernel's PCI subsystem to device with 13385 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13386 * removed from PCI bus, it performs all the necessary cleanup for the HBA 13387 * device to be removed from the PCI subsystem properly. 13388 **/ 13389 static void 13390 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 13391 { 13392 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13393 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 13394 struct lpfc_vport **vports; 13395 struct lpfc_hba *phba = vport->phba; 13396 int i; 13397 13398 /* Mark the device unloading flag */ 13399 spin_lock_irq(&phba->hbalock); 13400 vport->load_flag |= FC_UNLOADING; 13401 spin_unlock_irq(&phba->hbalock); 13402 13403 lpfc_free_sysfs_attr(vport); 13404 13405 /* Release all the vports against this physical port */ 13406 vports = lpfc_create_vport_work_array(phba); 13407 if (vports != NULL) 13408 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 13409 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 13410 continue; 13411 fc_vport_terminate(vports[i]->fc_vport); 13412 } 13413 lpfc_destroy_vport_work_array(phba, vports); 13414 13415 /* Remove FC host with the physical port */ 13416 fc_remove_host(shost); 13417 scsi_remove_host(shost); 13418 13419 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 13420 * localports are destroyed after to cleanup all transport memory. 13421 */ 13422 lpfc_cleanup(vport); 13423 lpfc_nvmet_destroy_targetport(phba); 13424 lpfc_nvme_destroy_localport(vport); 13425 13426 /* De-allocate multi-XRI pools */ 13427 if (phba->cfg_xri_rebalancing) 13428 lpfc_destroy_multixri_pools(phba); 13429 13430 /* 13431 * Bring down the SLI Layer. This step disables all interrupts, 13432 * clears the rings, discards all mailbox commands, and resets 13433 * the HBA FCoE function. 13434 */ 13435 lpfc_debugfs_terminate(vport); 13436 13437 lpfc_stop_hba_timers(phba); 13438 spin_lock_irq(&phba->port_list_lock); 13439 list_del_init(&vport->listentry); 13440 spin_unlock_irq(&phba->port_list_lock); 13441 13442 /* Perform scsi free before driver resource_unset since scsi 13443 * buffers are released to their corresponding pools here. 13444 */ 13445 lpfc_io_free(phba); 13446 lpfc_free_iocb_list(phba); 13447 lpfc_sli4_hba_unset(phba); 13448 13449 lpfc_unset_driver_resource_phase2(phba); 13450 lpfc_sli4_driver_resource_unset(phba); 13451 13452 /* Unmap adapter Control and Doorbell registers */ 13453 lpfc_sli4_pci_mem_unset(phba); 13454 13455 /* Release PCI resources and disable device's PCI function */ 13456 scsi_host_put(shost); 13457 lpfc_disable_pci_dev(phba); 13458 13459 /* Finally, free the driver's device data structure */ 13460 lpfc_hba_free(phba); 13461 13462 return; 13463 } 13464 13465 /** 13466 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 13467 * @dev_d: pointer to device 13468 * 13469 * This routine is called from the kernel's PCI subsystem to support system 13470 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 13471 * this method, it quiesces the device by stopping the driver's worker 13472 * thread for the device, turning off device's interrupt and DMA, and bring 13473 * the device offline. Note that as the driver implements the minimum PM 13474 * requirements to a power-aware driver's PM support for suspend/resume -- all 13475 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 13476 * method call will be treated as SUSPEND and the driver will fully 13477 * reinitialize its device during resume() method call, the driver will set 13478 * device to PCI_D3hot state in PCI config space instead of setting it 13479 * according to the @msg provided by the PM. 13480 * 13481 * Return code 13482 * 0 - driver suspended the device 13483 * Error otherwise 13484 **/ 13485 static int __maybe_unused 13486 lpfc_pci_suspend_one_s4(struct device *dev_d) 13487 { 13488 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 13489 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13490 13491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13492 "2843 PCI device Power Management suspend.\n"); 13493 13494 /* Bring down the device */ 13495 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13496 lpfc_offline(phba); 13497 kthread_stop(phba->worker_thread); 13498 13499 /* Disable interrupt from device */ 13500 lpfc_sli4_disable_intr(phba); 13501 lpfc_sli4_queue_destroy(phba); 13502 13503 return 0; 13504 } 13505 13506 /** 13507 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 13508 * @dev_d: pointer to device 13509 * 13510 * This routine is called from the kernel's PCI subsystem to support system 13511 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 13512 * this method, it restores the device's PCI config space state and fully 13513 * reinitializes the device and brings it online. Note that as the driver 13514 * implements the minimum PM requirements to a power-aware driver's PM for 13515 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 13516 * to the suspend() method call will be treated as SUSPEND and the driver 13517 * will fully reinitialize its device during resume() method call, the device 13518 * will be set to PCI_D0 directly in PCI config space before restoring the 13519 * state. 13520 * 13521 * Return code 13522 * 0 - driver suspended the device 13523 * Error otherwise 13524 **/ 13525 static int __maybe_unused 13526 lpfc_pci_resume_one_s4(struct device *dev_d) 13527 { 13528 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 13529 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13530 uint32_t intr_mode; 13531 int error; 13532 13533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13534 "0292 PCI device Power Management resume.\n"); 13535 13536 /* Startup the kernel thread for this host adapter. */ 13537 phba->worker_thread = kthread_run(lpfc_do_work, phba, 13538 "lpfc_worker_%d", phba->brd_no); 13539 if (IS_ERR(phba->worker_thread)) { 13540 error = PTR_ERR(phba->worker_thread); 13541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13542 "0293 PM resume failed to start worker " 13543 "thread: error=x%x.\n", error); 13544 return error; 13545 } 13546 13547 /* Configure and enable interrupt */ 13548 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13549 if (intr_mode == LPFC_INTR_ERROR) { 13550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13551 "0294 PM resume Failed to enable interrupt\n"); 13552 return -EIO; 13553 } else 13554 phba->intr_mode = intr_mode; 13555 13556 /* Restart HBA and bring it online */ 13557 lpfc_sli_brdrestart(phba); 13558 lpfc_online(phba); 13559 13560 /* Log the current active interrupt mode */ 13561 lpfc_log_intr_mode(phba, phba->intr_mode); 13562 13563 return 0; 13564 } 13565 13566 /** 13567 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 13568 * @phba: pointer to lpfc hba data structure. 13569 * 13570 * This routine is called to prepare the SLI4 device for PCI slot recover. It 13571 * aborts all the outstanding SCSI I/Os to the pci device. 13572 **/ 13573 static void 13574 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 13575 { 13576 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13577 "2828 PCI channel I/O abort preparing for recovery\n"); 13578 /* 13579 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 13580 * and let the SCSI mid-layer to retry them to recover. 13581 */ 13582 lpfc_sli_abort_fcp_rings(phba); 13583 } 13584 13585 /** 13586 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 13587 * @phba: pointer to lpfc hba data structure. 13588 * 13589 * This routine is called to prepare the SLI4 device for PCI slot reset. It 13590 * disables the device interrupt and pci device, and aborts the internal FCP 13591 * pending I/Os. 13592 **/ 13593 static void 13594 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 13595 { 13596 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13597 "2826 PCI channel disable preparing for reset\n"); 13598 13599 /* Block any management I/Os to the device */ 13600 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 13601 13602 /* Block all SCSI devices' I/Os on the host */ 13603 lpfc_scsi_dev_block(phba); 13604 13605 /* Flush all driver's outstanding I/Os as we are to reset */ 13606 lpfc_sli_flush_io_rings(phba); 13607 13608 /* stop all timers */ 13609 lpfc_stop_hba_timers(phba); 13610 13611 /* Disable interrupt and pci device */ 13612 lpfc_sli4_disable_intr(phba); 13613 lpfc_sli4_queue_destroy(phba); 13614 pci_disable_device(phba->pcidev); 13615 } 13616 13617 /** 13618 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 13619 * @phba: pointer to lpfc hba data structure. 13620 * 13621 * This routine is called to prepare the SLI4 device for PCI slot permanently 13622 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 13623 * pending I/Os. 13624 **/ 13625 static void 13626 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 13627 { 13628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13629 "2827 PCI channel permanent disable for failure\n"); 13630 13631 /* Block all SCSI devices' I/Os on the host */ 13632 lpfc_scsi_dev_block(phba); 13633 13634 /* stop all timers */ 13635 lpfc_stop_hba_timers(phba); 13636 13637 /* Clean up all driver's outstanding I/Os */ 13638 lpfc_sli_flush_io_rings(phba); 13639 } 13640 13641 /** 13642 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 13643 * @pdev: pointer to PCI device. 13644 * @state: the current PCI connection state. 13645 * 13646 * This routine is called from the PCI subsystem for error handling to device 13647 * with SLI-4 interface spec. This function is called by the PCI subsystem 13648 * after a PCI bus error affecting this device has been detected. When this 13649 * function is invoked, it will need to stop all the I/Os and interrupt(s) 13650 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 13651 * for the PCI subsystem to perform proper recovery as desired. 13652 * 13653 * Return codes 13654 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13655 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13656 **/ 13657 static pci_ers_result_t 13658 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 13659 { 13660 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13661 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13662 13663 switch (state) { 13664 case pci_channel_io_normal: 13665 /* Non-fatal error, prepare for recovery */ 13666 lpfc_sli4_prep_dev_for_recover(phba); 13667 return PCI_ERS_RESULT_CAN_RECOVER; 13668 case pci_channel_io_frozen: 13669 /* Fatal error, prepare for slot reset */ 13670 lpfc_sli4_prep_dev_for_reset(phba); 13671 return PCI_ERS_RESULT_NEED_RESET; 13672 case pci_channel_io_perm_failure: 13673 /* Permanent failure, prepare for device down */ 13674 lpfc_sli4_prep_dev_for_perm_failure(phba); 13675 return PCI_ERS_RESULT_DISCONNECT; 13676 default: 13677 /* Unknown state, prepare and request slot reset */ 13678 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13679 "2825 Unknown PCI error state: x%x\n", state); 13680 lpfc_sli4_prep_dev_for_reset(phba); 13681 return PCI_ERS_RESULT_NEED_RESET; 13682 } 13683 } 13684 13685 /** 13686 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 13687 * @pdev: pointer to PCI device. 13688 * 13689 * This routine is called from the PCI subsystem for error handling to device 13690 * with SLI-4 interface spec. It is called after PCI bus has been reset to 13691 * restart the PCI card from scratch, as if from a cold-boot. During the 13692 * PCI subsystem error recovery, after the driver returns 13693 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 13694 * recovery and then call this routine before calling the .resume method to 13695 * recover the device. This function will initialize the HBA device, enable 13696 * the interrupt, but it will just put the HBA to offline state without 13697 * passing any I/O traffic. 13698 * 13699 * Return codes 13700 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13701 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13702 */ 13703 static pci_ers_result_t 13704 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13705 { 13706 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13707 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13708 struct lpfc_sli *psli = &phba->sli; 13709 uint32_t intr_mode; 13710 13711 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13712 if (pci_enable_device_mem(pdev)) { 13713 printk(KERN_ERR "lpfc: Cannot re-enable " 13714 "PCI device after reset.\n"); 13715 return PCI_ERS_RESULT_DISCONNECT; 13716 } 13717 13718 pci_restore_state(pdev); 13719 13720 /* 13721 * As the new kernel behavior of pci_restore_state() API call clears 13722 * device saved_state flag, need to save the restored state again. 13723 */ 13724 pci_save_state(pdev); 13725 13726 if (pdev->is_busmaster) 13727 pci_set_master(pdev); 13728 13729 spin_lock_irq(&phba->hbalock); 13730 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13731 spin_unlock_irq(&phba->hbalock); 13732 13733 /* Configure and enable interrupt */ 13734 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13735 if (intr_mode == LPFC_INTR_ERROR) { 13736 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13737 "2824 Cannot re-enable interrupt after " 13738 "slot reset.\n"); 13739 return PCI_ERS_RESULT_DISCONNECT; 13740 } else 13741 phba->intr_mode = intr_mode; 13742 13743 /* Log the current active interrupt mode */ 13744 lpfc_log_intr_mode(phba, phba->intr_mode); 13745 13746 return PCI_ERS_RESULT_RECOVERED; 13747 } 13748 13749 /** 13750 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13751 * @pdev: pointer to PCI device 13752 * 13753 * This routine is called from the PCI subsystem for error handling to device 13754 * with SLI-4 interface spec. It is called when kernel error recovery tells 13755 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13756 * error recovery. After this call, traffic can start to flow from this device 13757 * again. 13758 **/ 13759 static void 13760 lpfc_io_resume_s4(struct pci_dev *pdev) 13761 { 13762 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13763 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13764 13765 /* 13766 * In case of slot reset, as function reset is performed through 13767 * mailbox command which needs DMA to be enabled, this operation 13768 * has to be moved to the io resume phase. Taking device offline 13769 * will perform the necessary cleanup. 13770 */ 13771 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13772 /* Perform device reset */ 13773 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13774 lpfc_offline(phba); 13775 lpfc_sli_brdrestart(phba); 13776 /* Bring the device back online */ 13777 lpfc_online(phba); 13778 } 13779 } 13780 13781 /** 13782 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13783 * @pdev: pointer to PCI device 13784 * @pid: pointer to PCI device identifier 13785 * 13786 * This routine is to be registered to the kernel's PCI subsystem. When an 13787 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13788 * at PCI device-specific information of the device and driver to see if the 13789 * driver state that it can support this kind of device. If the match is 13790 * successful, the driver core invokes this routine. This routine dispatches 13791 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13792 * do all the initialization that it needs to do to handle the HBA device 13793 * properly. 13794 * 13795 * Return code 13796 * 0 - driver can claim the device 13797 * negative value - driver can not claim the device 13798 **/ 13799 static int 13800 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13801 { 13802 int rc; 13803 struct lpfc_sli_intf intf; 13804 13805 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13806 return -ENODEV; 13807 13808 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13809 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13810 rc = lpfc_pci_probe_one_s4(pdev, pid); 13811 else 13812 rc = lpfc_pci_probe_one_s3(pdev, pid); 13813 13814 return rc; 13815 } 13816 13817 /** 13818 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13819 * @pdev: pointer to PCI device 13820 * 13821 * This routine is to be registered to the kernel's PCI subsystem. When an 13822 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13823 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13824 * remove routine, which will perform all the necessary cleanup for the 13825 * device to be removed from the PCI subsystem properly. 13826 **/ 13827 static void 13828 lpfc_pci_remove_one(struct pci_dev *pdev) 13829 { 13830 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13831 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13832 13833 switch (phba->pci_dev_grp) { 13834 case LPFC_PCI_DEV_LP: 13835 lpfc_pci_remove_one_s3(pdev); 13836 break; 13837 case LPFC_PCI_DEV_OC: 13838 lpfc_pci_remove_one_s4(pdev); 13839 break; 13840 default: 13841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13842 "1424 Invalid PCI device group: 0x%x\n", 13843 phba->pci_dev_grp); 13844 break; 13845 } 13846 return; 13847 } 13848 13849 /** 13850 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13851 * @dev: pointer to device 13852 * 13853 * This routine is to be registered to the kernel's PCI subsystem to support 13854 * system Power Management (PM). When PM invokes this method, it dispatches 13855 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13856 * suspend the device. 13857 * 13858 * Return code 13859 * 0 - driver suspended the device 13860 * Error otherwise 13861 **/ 13862 static int __maybe_unused 13863 lpfc_pci_suspend_one(struct device *dev) 13864 { 13865 struct Scsi_Host *shost = dev_get_drvdata(dev); 13866 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13867 int rc = -ENODEV; 13868 13869 switch (phba->pci_dev_grp) { 13870 case LPFC_PCI_DEV_LP: 13871 rc = lpfc_pci_suspend_one_s3(dev); 13872 break; 13873 case LPFC_PCI_DEV_OC: 13874 rc = lpfc_pci_suspend_one_s4(dev); 13875 break; 13876 default: 13877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13878 "1425 Invalid PCI device group: 0x%x\n", 13879 phba->pci_dev_grp); 13880 break; 13881 } 13882 return rc; 13883 } 13884 13885 /** 13886 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13887 * @dev: pointer to device 13888 * 13889 * This routine is to be registered to the kernel's PCI subsystem to support 13890 * system Power Management (PM). When PM invokes this method, it dispatches 13891 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13892 * resume the device. 13893 * 13894 * Return code 13895 * 0 - driver suspended the device 13896 * Error otherwise 13897 **/ 13898 static int __maybe_unused 13899 lpfc_pci_resume_one(struct device *dev) 13900 { 13901 struct Scsi_Host *shost = dev_get_drvdata(dev); 13902 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13903 int rc = -ENODEV; 13904 13905 switch (phba->pci_dev_grp) { 13906 case LPFC_PCI_DEV_LP: 13907 rc = lpfc_pci_resume_one_s3(dev); 13908 break; 13909 case LPFC_PCI_DEV_OC: 13910 rc = lpfc_pci_resume_one_s4(dev); 13911 break; 13912 default: 13913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13914 "1426 Invalid PCI device group: 0x%x\n", 13915 phba->pci_dev_grp); 13916 break; 13917 } 13918 return rc; 13919 } 13920 13921 /** 13922 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13923 * @pdev: pointer to PCI device. 13924 * @state: the current PCI connection state. 13925 * 13926 * This routine is registered to the PCI subsystem for error handling. This 13927 * function is called by the PCI subsystem after a PCI bus error affecting 13928 * this device has been detected. When this routine is invoked, it dispatches 13929 * the action to the proper SLI-3 or SLI-4 device error detected handling 13930 * routine, which will perform the proper error detected operation. 13931 * 13932 * Return codes 13933 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13934 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13935 **/ 13936 static pci_ers_result_t 13937 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13938 { 13939 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13940 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13941 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13942 13943 switch (phba->pci_dev_grp) { 13944 case LPFC_PCI_DEV_LP: 13945 rc = lpfc_io_error_detected_s3(pdev, state); 13946 break; 13947 case LPFC_PCI_DEV_OC: 13948 rc = lpfc_io_error_detected_s4(pdev, state); 13949 break; 13950 default: 13951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13952 "1427 Invalid PCI device group: 0x%x\n", 13953 phba->pci_dev_grp); 13954 break; 13955 } 13956 return rc; 13957 } 13958 13959 /** 13960 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13961 * @pdev: pointer to PCI device. 13962 * 13963 * This routine is registered to the PCI subsystem for error handling. This 13964 * function is called after PCI bus has been reset to restart the PCI card 13965 * from scratch, as if from a cold-boot. When this routine is invoked, it 13966 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13967 * routine, which will perform the proper device reset. 13968 * 13969 * Return codes 13970 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13971 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13972 **/ 13973 static pci_ers_result_t 13974 lpfc_io_slot_reset(struct pci_dev *pdev) 13975 { 13976 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13977 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13978 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13979 13980 switch (phba->pci_dev_grp) { 13981 case LPFC_PCI_DEV_LP: 13982 rc = lpfc_io_slot_reset_s3(pdev); 13983 break; 13984 case LPFC_PCI_DEV_OC: 13985 rc = lpfc_io_slot_reset_s4(pdev); 13986 break; 13987 default: 13988 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13989 "1428 Invalid PCI device group: 0x%x\n", 13990 phba->pci_dev_grp); 13991 break; 13992 } 13993 return rc; 13994 } 13995 13996 /** 13997 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13998 * @pdev: pointer to PCI device 13999 * 14000 * This routine is registered to the PCI subsystem for error handling. It 14001 * is called when kernel error recovery tells the lpfc driver that it is 14002 * OK to resume normal PCI operation after PCI bus error recovery. When 14003 * this routine is invoked, it dispatches the action to the proper SLI-3 14004 * or SLI-4 device io_resume routine, which will resume the device operation. 14005 **/ 14006 static void 14007 lpfc_io_resume(struct pci_dev *pdev) 14008 { 14009 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14010 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14011 14012 switch (phba->pci_dev_grp) { 14013 case LPFC_PCI_DEV_LP: 14014 lpfc_io_resume_s3(pdev); 14015 break; 14016 case LPFC_PCI_DEV_OC: 14017 lpfc_io_resume_s4(pdev); 14018 break; 14019 default: 14020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14021 "1429 Invalid PCI device group: 0x%x\n", 14022 phba->pci_dev_grp); 14023 break; 14024 } 14025 return; 14026 } 14027 14028 /** 14029 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 14030 * @phba: pointer to lpfc hba data structure. 14031 * 14032 * This routine checks to see if OAS is supported for this adapter. If 14033 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 14034 * the enable oas flag is cleared and the pool created for OAS device data 14035 * is destroyed. 14036 * 14037 **/ 14038 static void 14039 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 14040 { 14041 14042 if (!phba->cfg_EnableXLane) 14043 return; 14044 14045 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 14046 phba->cfg_fof = 1; 14047 } else { 14048 phba->cfg_fof = 0; 14049 mempool_destroy(phba->device_data_mem_pool); 14050 phba->device_data_mem_pool = NULL; 14051 } 14052 14053 return; 14054 } 14055 14056 /** 14057 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 14058 * @phba: pointer to lpfc hba data structure. 14059 * 14060 * This routine checks to see if RAS is supported by the adapter. Check the 14061 * function through which RAS support enablement is to be done. 14062 **/ 14063 void 14064 lpfc_sli4_ras_init(struct lpfc_hba *phba) 14065 { 14066 /* if ASIC_GEN_NUM >= 0xC) */ 14067 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 14068 LPFC_SLI_INTF_IF_TYPE_6) || 14069 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 14070 LPFC_SLI_INTF_FAMILY_G6)) { 14071 phba->ras_fwlog.ras_hwsupport = true; 14072 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 14073 phba->cfg_ras_fwlog_buffsize) 14074 phba->ras_fwlog.ras_enabled = true; 14075 else 14076 phba->ras_fwlog.ras_enabled = false; 14077 } else { 14078 phba->ras_fwlog.ras_hwsupport = false; 14079 } 14080 } 14081 14082 14083 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 14084 14085 static const struct pci_error_handlers lpfc_err_handler = { 14086 .error_detected = lpfc_io_error_detected, 14087 .slot_reset = lpfc_io_slot_reset, 14088 .resume = lpfc_io_resume, 14089 }; 14090 14091 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 14092 lpfc_pci_suspend_one, 14093 lpfc_pci_resume_one); 14094 14095 static struct pci_driver lpfc_driver = { 14096 .name = LPFC_DRIVER_NAME, 14097 .id_table = lpfc_id_table, 14098 .probe = lpfc_pci_probe_one, 14099 .remove = lpfc_pci_remove_one, 14100 .shutdown = lpfc_pci_remove_one, 14101 .driver.pm = &lpfc_pci_pm_ops_one, 14102 .err_handler = &lpfc_err_handler, 14103 }; 14104 14105 static const struct file_operations lpfc_mgmt_fop = { 14106 .owner = THIS_MODULE, 14107 }; 14108 14109 static struct miscdevice lpfc_mgmt_dev = { 14110 .minor = MISC_DYNAMIC_MINOR, 14111 .name = "lpfcmgmt", 14112 .fops = &lpfc_mgmt_fop, 14113 }; 14114 14115 /** 14116 * lpfc_init - lpfc module initialization routine 14117 * 14118 * This routine is to be invoked when the lpfc module is loaded into the 14119 * kernel. The special kernel macro module_init() is used to indicate the 14120 * role of this routine to the kernel as lpfc module entry point. 14121 * 14122 * Return codes 14123 * 0 - successful 14124 * -ENOMEM - FC attach transport failed 14125 * all others - failed 14126 */ 14127 static int __init 14128 lpfc_init(void) 14129 { 14130 int error = 0; 14131 14132 pr_info(LPFC_MODULE_DESC "\n"); 14133 pr_info(LPFC_COPYRIGHT "\n"); 14134 14135 error = misc_register(&lpfc_mgmt_dev); 14136 if (error) 14137 printk(KERN_ERR "Could not register lpfcmgmt device, " 14138 "misc_register returned with status %d", error); 14139 14140 error = -ENOMEM; 14141 lpfc_transport_functions.vport_create = lpfc_vport_create; 14142 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 14143 lpfc_transport_template = 14144 fc_attach_transport(&lpfc_transport_functions); 14145 if (lpfc_transport_template == NULL) 14146 goto unregister; 14147 lpfc_vport_transport_template = 14148 fc_attach_transport(&lpfc_vport_transport_functions); 14149 if (lpfc_vport_transport_template == NULL) { 14150 fc_release_transport(lpfc_transport_template); 14151 goto unregister; 14152 } 14153 lpfc_wqe_cmd_template(); 14154 lpfc_nvmet_cmd_template(); 14155 14156 /* Initialize in case vector mapping is needed */ 14157 lpfc_present_cpu = num_present_cpus(); 14158 14159 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 14160 "lpfc/sli4:online", 14161 lpfc_cpu_online, lpfc_cpu_offline); 14162 if (error < 0) 14163 goto cpuhp_failure; 14164 lpfc_cpuhp_state = error; 14165 14166 error = pci_register_driver(&lpfc_driver); 14167 if (error) 14168 goto unwind; 14169 14170 return error; 14171 14172 unwind: 14173 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14174 cpuhp_failure: 14175 fc_release_transport(lpfc_transport_template); 14176 fc_release_transport(lpfc_vport_transport_template); 14177 unregister: 14178 misc_deregister(&lpfc_mgmt_dev); 14179 14180 return error; 14181 } 14182 14183 void lpfc_dmp_dbg(struct lpfc_hba *phba) 14184 { 14185 unsigned int start_idx; 14186 unsigned int dbg_cnt; 14187 unsigned int temp_idx; 14188 int i; 14189 int j = 0; 14190 unsigned long rem_nsec, iflags; 14191 bool log_verbose = false; 14192 struct lpfc_vport *port_iterator; 14193 14194 /* Don't dump messages if we explicitly set log_verbose for the 14195 * physical port or any vport. 14196 */ 14197 if (phba->cfg_log_verbose) 14198 return; 14199 14200 spin_lock_irqsave(&phba->port_list_lock, iflags); 14201 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 14202 if (port_iterator->load_flag & FC_UNLOADING) 14203 continue; 14204 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 14205 if (port_iterator->cfg_log_verbose) 14206 log_verbose = true; 14207 14208 scsi_host_put(lpfc_shost_from_vport(port_iterator)); 14209 14210 if (log_verbose) { 14211 spin_unlock_irqrestore(&phba->port_list_lock, 14212 iflags); 14213 return; 14214 } 14215 } 14216 } 14217 spin_unlock_irqrestore(&phba->port_list_lock, iflags); 14218 14219 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 14220 return; 14221 14222 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 14223 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 14224 if (!dbg_cnt) 14225 goto out; 14226 temp_idx = start_idx; 14227 if (dbg_cnt >= DBG_LOG_SZ) { 14228 dbg_cnt = DBG_LOG_SZ; 14229 temp_idx -= 1; 14230 } else { 14231 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 14232 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 14233 } else { 14234 if (start_idx < dbg_cnt) 14235 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 14236 else 14237 start_idx -= dbg_cnt; 14238 } 14239 } 14240 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 14241 start_idx, temp_idx, dbg_cnt); 14242 14243 for (i = 0; i < dbg_cnt; i++) { 14244 if ((start_idx + i) < DBG_LOG_SZ) 14245 temp_idx = (start_idx + i) % DBG_LOG_SZ; 14246 else 14247 temp_idx = j++; 14248 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 14249 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 14250 temp_idx, 14251 (unsigned long)phba->dbg_log[temp_idx].t_ns, 14252 rem_nsec / 1000, 14253 phba->dbg_log[temp_idx].log); 14254 } 14255 out: 14256 atomic_set(&phba->dbg_log_cnt, 0); 14257 atomic_set(&phba->dbg_log_dmping, 0); 14258 } 14259 14260 __printf(2, 3) 14261 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 14262 { 14263 unsigned int idx; 14264 va_list args; 14265 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 14266 struct va_format vaf; 14267 14268 14269 va_start(args, fmt); 14270 if (unlikely(dbg_dmping)) { 14271 vaf.fmt = fmt; 14272 vaf.va = &args; 14273 dev_info(&phba->pcidev->dev, "%pV", &vaf); 14274 va_end(args); 14275 return; 14276 } 14277 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 14278 DBG_LOG_SZ; 14279 14280 atomic_inc(&phba->dbg_log_cnt); 14281 14282 vscnprintf(phba->dbg_log[idx].log, 14283 sizeof(phba->dbg_log[idx].log), fmt, args); 14284 va_end(args); 14285 14286 phba->dbg_log[idx].t_ns = local_clock(); 14287 } 14288 14289 /** 14290 * lpfc_exit - lpfc module removal routine 14291 * 14292 * This routine is invoked when the lpfc module is removed from the kernel. 14293 * The special kernel macro module_exit() is used to indicate the role of 14294 * this routine to the kernel as lpfc module exit point. 14295 */ 14296 static void __exit 14297 lpfc_exit(void) 14298 { 14299 misc_deregister(&lpfc_mgmt_dev); 14300 pci_unregister_driver(&lpfc_driver); 14301 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14302 fc_release_transport(lpfc_transport_template); 14303 fc_release_transport(lpfc_vport_transport_template); 14304 idr_destroy(&lpfc_hba_index); 14305 } 14306 14307 module_init(lpfc_init); 14308 module_exit(lpfc_exit); 14309 MODULE_LICENSE("GPL"); 14310 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 14311 MODULE_AUTHOR("Broadcom"); 14312 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 14313