1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 76 static int lpfc_post_rcv_buf(struct lpfc_hba *); 77 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 79 static int lpfc_setup_endian_order(struct lpfc_hba *); 80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 81 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 83 static void lpfc_init_sgl_list(struct lpfc_hba *); 84 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 85 static void lpfc_free_active_sgl(struct lpfc_hba *); 86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 91 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 96 97 static struct scsi_transport_template *lpfc_transport_template = NULL; 98 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 99 static DEFINE_IDR(lpfc_hba_index); 100 #define LPFC_NVMET_BUF_POST 254 101 102 /** 103 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 104 * @phba: pointer to lpfc hba data structure. 105 * 106 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 107 * mailbox command. It retrieves the revision information from the HBA and 108 * collects the Vital Product Data (VPD) about the HBA for preparing the 109 * configuration of the HBA. 110 * 111 * Return codes: 112 * 0 - success. 113 * -ERESTART - requests the SLI layer to reset the HBA and try again. 114 * Any other value - indicates an error. 115 **/ 116 int 117 lpfc_config_port_prep(struct lpfc_hba *phba) 118 { 119 lpfc_vpd_t *vp = &phba->vpd; 120 int i = 0, rc; 121 LPFC_MBOXQ_t *pmb; 122 MAILBOX_t *mb; 123 char *lpfc_vpd_data = NULL; 124 uint16_t offset = 0; 125 static char licensed[56] = 126 "key unlock for use with gnu public licensed code only\0"; 127 static int init_key = 1; 128 129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 130 if (!pmb) { 131 phba->link_state = LPFC_HBA_ERROR; 132 return -ENOMEM; 133 } 134 135 mb = &pmb->u.mb; 136 phba->link_state = LPFC_INIT_MBX_CMDS; 137 138 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 139 if (init_key) { 140 uint32_t *ptext = (uint32_t *) licensed; 141 142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 143 *ptext = cpu_to_be32(*ptext); 144 init_key = 0; 145 } 146 147 lpfc_read_nv(phba, pmb); 148 memset((char*)mb->un.varRDnvp.rsvd3, 0, 149 sizeof (mb->un.varRDnvp.rsvd3)); 150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 151 sizeof (licensed)); 152 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 155 if (rc != MBX_SUCCESS) { 156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 157 "0324 Config Port initialization " 158 "error, mbxCmd x%x READ_NVPARM, " 159 "mbxStatus x%x\n", 160 mb->mbxCommand, mb->mbxStatus); 161 mempool_free(pmb, phba->mbox_mem_pool); 162 return -ERESTART; 163 } 164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 165 sizeof(phba->wwnn)); 166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 167 sizeof(phba->wwpn)); 168 } 169 170 /* 171 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 172 * which was already set in lpfc_get_cfgparam() 173 */ 174 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 175 176 /* Setup and issue mailbox READ REV command */ 177 lpfc_read_rev(phba, pmb); 178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 179 if (rc != MBX_SUCCESS) { 180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 181 "0439 Adapter failed to init, mbxCmd x%x " 182 "READ_REV, mbxStatus x%x\n", 183 mb->mbxCommand, mb->mbxStatus); 184 mempool_free( pmb, phba->mbox_mem_pool); 185 return -ERESTART; 186 } 187 188 189 /* 190 * The value of rr must be 1 since the driver set the cv field to 1. 191 * This setting requires the FW to set all revision fields. 192 */ 193 if (mb->un.varRdRev.rr == 0) { 194 vp->rev.rBit = 0; 195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 196 "0440 Adapter failed to init, READ_REV has " 197 "missing revision information.\n"); 198 mempool_free(pmb, phba->mbox_mem_pool); 199 return -ERESTART; 200 } 201 202 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 203 mempool_free(pmb, phba->mbox_mem_pool); 204 return -EINVAL; 205 } 206 207 /* Save information as VPD data */ 208 vp->rev.rBit = 1; 209 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 210 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 211 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 212 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 213 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 214 vp->rev.biuRev = mb->un.varRdRev.biuRev; 215 vp->rev.smRev = mb->un.varRdRev.smRev; 216 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 217 vp->rev.endecRev = mb->un.varRdRev.endecRev; 218 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 219 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 220 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 221 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 222 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 223 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 224 225 /* If the sli feature level is less then 9, we must 226 * tear down all RPIs and VPIs on link down if NPIV 227 * is enabled. 228 */ 229 if (vp->rev.feaLevelHigh < 9) 230 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 231 232 if (lpfc_is_LC_HBA(phba->pcidev->device)) 233 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 234 sizeof (phba->RandomData)); 235 236 /* Get adapter VPD information */ 237 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 238 if (!lpfc_vpd_data) 239 goto out_free_mbox; 240 do { 241 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 242 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 243 244 if (rc != MBX_SUCCESS) { 245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 246 "0441 VPD not present on adapter, " 247 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 248 mb->mbxCommand, mb->mbxStatus); 249 mb->un.varDmp.word_cnt = 0; 250 } 251 /* dump mem may return a zero when finished or we got a 252 * mailbox error, either way we are done. 253 */ 254 if (mb->un.varDmp.word_cnt == 0) 255 break; 256 257 i = mb->un.varDmp.word_cnt * sizeof(uint32_t); 258 if (offset + i > DMP_VPD_SIZE) 259 i = DMP_VPD_SIZE - offset; 260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 261 lpfc_vpd_data + offset, i); 262 offset += i; 263 } while (offset < DMP_VPD_SIZE); 264 265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 266 267 kfree(lpfc_vpd_data); 268 out_free_mbox: 269 mempool_free(pmb, phba->mbox_mem_pool); 270 return 0; 271 } 272 273 /** 274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 275 * @phba: pointer to lpfc hba data structure. 276 * @pmboxq: pointer to the driver internal queue element for mailbox command. 277 * 278 * This is the completion handler for driver's configuring asynchronous event 279 * mailbox command to the device. If the mailbox command returns successfully, 280 * it will set internal async event support flag to 1; otherwise, it will 281 * set internal async event support flag to 0. 282 **/ 283 static void 284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 285 { 286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 287 phba->temp_sensor_support = 1; 288 else 289 phba->temp_sensor_support = 0; 290 mempool_free(pmboxq, phba->mbox_mem_pool); 291 return; 292 } 293 294 /** 295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 296 * @phba: pointer to lpfc hba data structure. 297 * @pmboxq: pointer to the driver internal queue element for mailbox command. 298 * 299 * This is the completion handler for dump mailbox command for getting 300 * wake up parameters. When this command complete, the response contain 301 * Option rom version of the HBA. This function translate the version number 302 * into a human readable string and store it in OptionROMVersion. 303 **/ 304 static void 305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 306 { 307 struct prog_id *prg; 308 uint32_t prog_id_word; 309 char dist = ' '; 310 /* character array used for decoding dist type. */ 311 char dist_char[] = "nabx"; 312 313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 314 mempool_free(pmboxq, phba->mbox_mem_pool); 315 return; 316 } 317 318 prg = (struct prog_id *) &prog_id_word; 319 320 /* word 7 contain option rom version */ 321 prog_id_word = pmboxq->u.mb.un.varWords[7]; 322 323 /* Decode the Option rom version word to a readable string */ 324 if (prg->dist < 4) 325 dist = dist_char[prg->dist]; 326 327 if ((prg->dist == 3) && (prg->num == 0)) 328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 329 prg->ver, prg->rev, prg->lev); 330 else 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 332 prg->ver, prg->rev, prg->lev, 333 dist, prg->num); 334 mempool_free(pmboxq, phba->mbox_mem_pool); 335 return; 336 } 337 338 /** 339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 340 * cfg_soft_wwnn, cfg_soft_wwpn 341 * @vport: pointer to lpfc vport data structure. 342 * 343 * 344 * Return codes 345 * None. 346 **/ 347 void 348 lpfc_update_vport_wwn(struct lpfc_vport *vport) 349 { 350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 352 353 /* If the soft name exists then update it using the service params */ 354 if (vport->phba->cfg_soft_wwnn) 355 u64_to_wwn(vport->phba->cfg_soft_wwnn, 356 vport->fc_sparam.nodeName.u.wwn); 357 if (vport->phba->cfg_soft_wwpn) 358 u64_to_wwn(vport->phba->cfg_soft_wwpn, 359 vport->fc_sparam.portName.u.wwn); 360 361 /* 362 * If the name is empty or there exists a soft name 363 * then copy the service params name, otherwise use the fc name 364 */ 365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 367 sizeof(struct lpfc_name)); 368 else 369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 370 sizeof(struct lpfc_name)); 371 372 /* 373 * If the port name has changed, then set the Param changes flag 374 * to unreg the login 375 */ 376 if (vport->fc_portname.u.wwn[0] != 0 && 377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 378 sizeof(struct lpfc_name))) 379 vport->vport_flag |= FAWWPN_PARAM_CHG; 380 381 if (vport->fc_portname.u.wwn[0] == 0 || 382 vport->phba->cfg_soft_wwpn || 383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 384 vport->vport_flag & FAWWPN_SET) { 385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 386 sizeof(struct lpfc_name)); 387 vport->vport_flag &= ~FAWWPN_SET; 388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 389 vport->vport_flag |= FAWWPN_SET; 390 } 391 else 392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 393 sizeof(struct lpfc_name)); 394 } 395 396 /** 397 * lpfc_config_port_post - Perform lpfc initialization after config port 398 * @phba: pointer to lpfc hba data structure. 399 * 400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 401 * command call. It performs all internal resource and state setups on the 402 * port: post IOCB buffers, enable appropriate host interrupt attentions, 403 * ELS ring timers, etc. 404 * 405 * Return codes 406 * 0 - success. 407 * Any other value - error. 408 **/ 409 int 410 lpfc_config_port_post(struct lpfc_hba *phba) 411 { 412 struct lpfc_vport *vport = phba->pport; 413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 414 LPFC_MBOXQ_t *pmb; 415 MAILBOX_t *mb; 416 struct lpfc_dmabuf *mp; 417 struct lpfc_sli *psli = &phba->sli; 418 uint32_t status, timeout; 419 int i, j; 420 int rc; 421 422 spin_lock_irq(&phba->hbalock); 423 /* 424 * If the Config port completed correctly the HBA is not 425 * over heated any more. 426 */ 427 if (phba->over_temp_state == HBA_OVER_TEMP) 428 phba->over_temp_state = HBA_NORMAL_TEMP; 429 spin_unlock_irq(&phba->hbalock); 430 431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 432 if (!pmb) { 433 phba->link_state = LPFC_HBA_ERROR; 434 return -ENOMEM; 435 } 436 mb = &pmb->u.mb; 437 438 /* Get login parameters for NID. */ 439 rc = lpfc_read_sparam(phba, pmb, 0); 440 if (rc) { 441 mempool_free(pmb, phba->mbox_mem_pool); 442 return -ENOMEM; 443 } 444 445 pmb->vport = vport; 446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 448 "0448 Adapter failed init, mbxCmd x%x " 449 "READ_SPARM mbxStatus x%x\n", 450 mb->mbxCommand, mb->mbxStatus); 451 phba->link_state = LPFC_HBA_ERROR; 452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 453 mempool_free(pmb, phba->mbox_mem_pool); 454 lpfc_mbuf_free(phba, mp->virt, mp->phys); 455 kfree(mp); 456 return -EIO; 457 } 458 459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 460 461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 462 lpfc_mbuf_free(phba, mp->virt, mp->phys); 463 kfree(mp); 464 pmb->ctx_buf = NULL; 465 lpfc_update_vport_wwn(vport); 466 467 /* Update the fc_host data structures with new wwn. */ 468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 470 fc_host_max_npiv_vports(shost) = phba->max_vpi; 471 472 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 473 /* This should be consolidated into parse_vpd ? - mr */ 474 if (phba->SerialNumber[0] == 0) { 475 uint8_t *outptr; 476 477 outptr = &vport->fc_nodename.u.s.IEEE[0]; 478 for (i = 0; i < 12; i++) { 479 status = *outptr++; 480 j = ((status & 0xf0) >> 4); 481 if (j <= 9) 482 phba->SerialNumber[i] = 483 (char)((uint8_t) 0x30 + (uint8_t) j); 484 else 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 487 i++; 488 j = (status & 0xf); 489 if (j <= 9) 490 phba->SerialNumber[i] = 491 (char)((uint8_t) 0x30 + (uint8_t) j); 492 else 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 495 } 496 } 497 498 lpfc_read_config(phba, pmb); 499 pmb->vport = vport; 500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 502 "0453 Adapter failed to init, mbxCmd x%x " 503 "READ_CONFIG, mbxStatus x%x\n", 504 mb->mbxCommand, mb->mbxStatus); 505 phba->link_state = LPFC_HBA_ERROR; 506 mempool_free( pmb, phba->mbox_mem_pool); 507 return -EIO; 508 } 509 510 /* Check if the port is disabled */ 511 lpfc_sli_read_link_ste(phba); 512 513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 514 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 516 "3359 HBA queue depth changed from %d to %d\n", 517 phba->cfg_hba_queue_depth, 518 mb->un.varRdConfig.max_xri); 519 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 520 } 521 522 phba->lmt = mb->un.varRdConfig.lmt; 523 524 /* Get the default values for Model Name and Description */ 525 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 526 527 phba->link_state = LPFC_LINK_DOWN; 528 529 /* Only process IOCBs on ELS ring till hba_state is READY */ 530 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 531 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 532 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 533 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 534 535 /* Post receive buffers for desired rings */ 536 if (phba->sli_rev != 3) 537 lpfc_post_rcv_buf(phba); 538 539 /* 540 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 541 */ 542 if (phba->intr_type == MSIX) { 543 rc = lpfc_config_msi(phba, pmb); 544 if (rc) { 545 mempool_free(pmb, phba->mbox_mem_pool); 546 return -EIO; 547 } 548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 549 if (rc != MBX_SUCCESS) { 550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 551 "0352 Config MSI mailbox command " 552 "failed, mbxCmd x%x, mbxStatus x%x\n", 553 pmb->u.mb.mbxCommand, 554 pmb->u.mb.mbxStatus); 555 mempool_free(pmb, phba->mbox_mem_pool); 556 return -EIO; 557 } 558 } 559 560 spin_lock_irq(&phba->hbalock); 561 /* Initialize ERATT handling flag */ 562 phba->hba_flag &= ~HBA_ERATT_HANDLED; 563 564 /* Enable appropriate host interrupts */ 565 if (lpfc_readl(phba->HCregaddr, &status)) { 566 spin_unlock_irq(&phba->hbalock); 567 return -EIO; 568 } 569 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 570 if (psli->num_rings > 0) 571 status |= HC_R0INT_ENA; 572 if (psli->num_rings > 1) 573 status |= HC_R1INT_ENA; 574 if (psli->num_rings > 2) 575 status |= HC_R2INT_ENA; 576 if (psli->num_rings > 3) 577 status |= HC_R3INT_ENA; 578 579 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 580 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 581 status &= ~(HC_R0INT_ENA); 582 583 writel(status, phba->HCregaddr); 584 readl(phba->HCregaddr); /* flush */ 585 spin_unlock_irq(&phba->hbalock); 586 587 /* Set up ring-0 (ELS) timer */ 588 timeout = phba->fc_ratov * 2; 589 mod_timer(&vport->els_tmofunc, 590 jiffies + msecs_to_jiffies(1000 * timeout)); 591 /* Set up heart beat (HB) timer */ 592 mod_timer(&phba->hb_tmofunc, 593 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 594 phba->hb_outstanding = 0; 595 phba->last_completion_time = jiffies; 596 /* Set up error attention (ERATT) polling timer */ 597 mod_timer(&phba->eratt_poll, 598 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 599 600 if (phba->hba_flag & LINK_DISABLED) { 601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 602 "2598 Adapter Link is disabled.\n"); 603 lpfc_down_link(phba, pmb); 604 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 606 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 608 "2599 Adapter failed to issue DOWN_LINK" 609 " mbox command rc 0x%x\n", rc); 610 611 mempool_free(pmb, phba->mbox_mem_pool); 612 return -EIO; 613 } 614 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 615 mempool_free(pmb, phba->mbox_mem_pool); 616 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 617 if (rc) 618 return rc; 619 } 620 /* MBOX buffer will be freed in mbox compl */ 621 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 622 if (!pmb) { 623 phba->link_state = LPFC_HBA_ERROR; 624 return -ENOMEM; 625 } 626 627 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 628 pmb->mbox_cmpl = lpfc_config_async_cmpl; 629 pmb->vport = phba->pport; 630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 631 632 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 634 "0456 Adapter failed to issue " 635 "ASYNCEVT_ENABLE mbox status x%x\n", 636 rc); 637 mempool_free(pmb, phba->mbox_mem_pool); 638 } 639 640 /* Get Option rom version */ 641 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 642 if (!pmb) { 643 phba->link_state = LPFC_HBA_ERROR; 644 return -ENOMEM; 645 } 646 647 lpfc_dump_wakeup_param(phba, pmb); 648 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 649 pmb->vport = phba->pport; 650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 651 652 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 654 "0435 Adapter failed " 655 "to get Option ROM version status x%x\n", rc); 656 mempool_free(pmb, phba->mbox_mem_pool); 657 } 658 659 return 0; 660 } 661 662 /** 663 * lpfc_hba_init_link - Initialize the FC link 664 * @phba: pointer to lpfc hba data structure. 665 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 666 * 667 * This routine will issue the INIT_LINK mailbox command call. 668 * It is available to other drivers through the lpfc_hba data 669 * structure for use as a delayed link up mechanism with the 670 * module parameter lpfc_suppress_link_up. 671 * 672 * Return code 673 * 0 - success 674 * Any other value - error 675 **/ 676 static int 677 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 678 { 679 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 680 } 681 682 /** 683 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 684 * @phba: pointer to lpfc hba data structure. 685 * @fc_topology: desired fc topology. 686 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 687 * 688 * This routine will issue the INIT_LINK mailbox command call. 689 * It is available to other drivers through the lpfc_hba data 690 * structure for use as a delayed link up mechanism with the 691 * module parameter lpfc_suppress_link_up. 692 * 693 * Return code 694 * 0 - success 695 * Any other value - error 696 **/ 697 int 698 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 699 uint32_t flag) 700 { 701 struct lpfc_vport *vport = phba->pport; 702 LPFC_MBOXQ_t *pmb; 703 MAILBOX_t *mb; 704 int rc; 705 706 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 707 if (!pmb) { 708 phba->link_state = LPFC_HBA_ERROR; 709 return -ENOMEM; 710 } 711 mb = &pmb->u.mb; 712 pmb->vport = vport; 713 714 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 715 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 716 !(phba->lmt & LMT_1Gb)) || 717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 718 !(phba->lmt & LMT_2Gb)) || 719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 720 !(phba->lmt & LMT_4Gb)) || 721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 722 !(phba->lmt & LMT_8Gb)) || 723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 724 !(phba->lmt & LMT_10Gb)) || 725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 726 !(phba->lmt & LMT_16Gb)) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 728 !(phba->lmt & LMT_32Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 730 !(phba->lmt & LMT_64Gb))) { 731 /* Reset link speed to auto */ 732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 733 "1302 Invalid speed for this board:%d " 734 "Reset link speed to auto.\n", 735 phba->cfg_link_speed); 736 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 737 } 738 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 739 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 740 if (phba->sli_rev < LPFC_SLI_REV4) 741 lpfc_set_loopback_flag(phba); 742 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 743 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 745 "0498 Adapter failed to init, mbxCmd x%x " 746 "INIT_LINK, mbxStatus x%x\n", 747 mb->mbxCommand, mb->mbxStatus); 748 if (phba->sli_rev <= LPFC_SLI_REV3) { 749 /* Clear all interrupt enable conditions */ 750 writel(0, phba->HCregaddr); 751 readl(phba->HCregaddr); /* flush */ 752 /* Clear all pending interrupts */ 753 writel(0xffffffff, phba->HAregaddr); 754 readl(phba->HAregaddr); /* flush */ 755 } 756 phba->link_state = LPFC_HBA_ERROR; 757 if (rc != MBX_BUSY || flag == MBX_POLL) 758 mempool_free(pmb, phba->mbox_mem_pool); 759 return -EIO; 760 } 761 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 762 if (flag == MBX_POLL) 763 mempool_free(pmb, phba->mbox_mem_pool); 764 765 return 0; 766 } 767 768 /** 769 * lpfc_hba_down_link - this routine downs the FC link 770 * @phba: pointer to lpfc hba data structure. 771 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 772 * 773 * This routine will issue the DOWN_LINK mailbox command call. 774 * It is available to other drivers through the lpfc_hba data 775 * structure for use to stop the link. 776 * 777 * Return code 778 * 0 - success 779 * Any other value - error 780 **/ 781 static int 782 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 783 { 784 LPFC_MBOXQ_t *pmb; 785 int rc; 786 787 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 788 if (!pmb) { 789 phba->link_state = LPFC_HBA_ERROR; 790 return -ENOMEM; 791 } 792 793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 794 "0491 Adapter Link is disabled.\n"); 795 lpfc_down_link(phba, pmb); 796 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 797 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 798 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 800 "2522 Adapter failed to issue DOWN_LINK" 801 " mbox command rc 0x%x\n", rc); 802 803 mempool_free(pmb, phba->mbox_mem_pool); 804 return -EIO; 805 } 806 if (flag == MBX_POLL) 807 mempool_free(pmb, phba->mbox_mem_pool); 808 809 return 0; 810 } 811 812 /** 813 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 814 * @phba: pointer to lpfc HBA data structure. 815 * 816 * This routine will do LPFC uninitialization before the HBA is reset when 817 * bringing down the SLI Layer. 818 * 819 * Return codes 820 * 0 - success. 821 * Any other value - error. 822 **/ 823 int 824 lpfc_hba_down_prep(struct lpfc_hba *phba) 825 { 826 struct lpfc_vport **vports; 827 int i; 828 829 if (phba->sli_rev <= LPFC_SLI_REV3) { 830 /* Disable interrupts */ 831 writel(0, phba->HCregaddr); 832 readl(phba->HCregaddr); /* flush */ 833 } 834 835 if (phba->pport->load_flag & FC_UNLOADING) 836 lpfc_cleanup_discovery_resources(phba->pport); 837 else { 838 vports = lpfc_create_vport_work_array(phba); 839 if (vports != NULL) 840 for (i = 0; i <= phba->max_vports && 841 vports[i] != NULL; i++) 842 lpfc_cleanup_discovery_resources(vports[i]); 843 lpfc_destroy_vport_work_array(phba, vports); 844 } 845 return 0; 846 } 847 848 /** 849 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 850 * rspiocb which got deferred 851 * 852 * @phba: pointer to lpfc HBA data structure. 853 * 854 * This routine will cleanup completed slow path events after HBA is reset 855 * when bringing down the SLI Layer. 856 * 857 * 858 * Return codes 859 * void. 860 **/ 861 static void 862 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 863 { 864 struct lpfc_iocbq *rspiocbq; 865 struct hbq_dmabuf *dmabuf; 866 struct lpfc_cq_event *cq_event; 867 868 spin_lock_irq(&phba->hbalock); 869 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 870 spin_unlock_irq(&phba->hbalock); 871 872 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 873 /* Get the response iocb from the head of work queue */ 874 spin_lock_irq(&phba->hbalock); 875 list_remove_head(&phba->sli4_hba.sp_queue_event, 876 cq_event, struct lpfc_cq_event, list); 877 spin_unlock_irq(&phba->hbalock); 878 879 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 880 case CQE_CODE_COMPL_WQE: 881 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 882 cq_event); 883 lpfc_sli_release_iocbq(phba, rspiocbq); 884 break; 885 case CQE_CODE_RECEIVE: 886 case CQE_CODE_RECEIVE_V1: 887 dmabuf = container_of(cq_event, struct hbq_dmabuf, 888 cq_event); 889 lpfc_in_buf_free(phba, &dmabuf->dbuf); 890 } 891 } 892 } 893 894 /** 895 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 896 * @phba: pointer to lpfc HBA data structure. 897 * 898 * This routine will cleanup posted ELS buffers after the HBA is reset 899 * when bringing down the SLI Layer. 900 * 901 * 902 * Return codes 903 * void. 904 **/ 905 static void 906 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 907 { 908 struct lpfc_sli *psli = &phba->sli; 909 struct lpfc_sli_ring *pring; 910 struct lpfc_dmabuf *mp, *next_mp; 911 LIST_HEAD(buflist); 912 int count; 913 914 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 915 lpfc_sli_hbqbuf_free_all(phba); 916 else { 917 /* Cleanup preposted buffers on the ELS ring */ 918 pring = &psli->sli3_ring[LPFC_ELS_RING]; 919 spin_lock_irq(&phba->hbalock); 920 list_splice_init(&pring->postbufq, &buflist); 921 spin_unlock_irq(&phba->hbalock); 922 923 count = 0; 924 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 925 list_del(&mp->list); 926 count++; 927 lpfc_mbuf_free(phba, mp->virt, mp->phys); 928 kfree(mp); 929 } 930 931 spin_lock_irq(&phba->hbalock); 932 pring->postbufq_cnt -= count; 933 spin_unlock_irq(&phba->hbalock); 934 } 935 } 936 937 /** 938 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 939 * @phba: pointer to lpfc HBA data structure. 940 * 941 * This routine will cleanup the txcmplq after the HBA is reset when bringing 942 * down the SLI Layer. 943 * 944 * Return codes 945 * void 946 **/ 947 static void 948 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 949 { 950 struct lpfc_sli *psli = &phba->sli; 951 struct lpfc_queue *qp = NULL; 952 struct lpfc_sli_ring *pring; 953 LIST_HEAD(completions); 954 int i; 955 struct lpfc_iocbq *piocb, *next_iocb; 956 957 if (phba->sli_rev != LPFC_SLI_REV4) { 958 for (i = 0; i < psli->num_rings; i++) { 959 pring = &psli->sli3_ring[i]; 960 spin_lock_irq(&phba->hbalock); 961 /* At this point in time the HBA is either reset or DOA 962 * Nothing should be on txcmplq as it will 963 * NEVER complete. 964 */ 965 list_splice_init(&pring->txcmplq, &completions); 966 pring->txcmplq_cnt = 0; 967 spin_unlock_irq(&phba->hbalock); 968 969 lpfc_sli_abort_iocb_ring(phba, pring); 970 } 971 /* Cancel all the IOCBs from the completions list */ 972 lpfc_sli_cancel_iocbs(phba, &completions, 973 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 974 return; 975 } 976 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 977 pring = qp->pring; 978 if (!pring) 979 continue; 980 spin_lock_irq(&pring->ring_lock); 981 list_for_each_entry_safe(piocb, next_iocb, 982 &pring->txcmplq, list) 983 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 984 list_splice_init(&pring->txcmplq, &completions); 985 pring->txcmplq_cnt = 0; 986 spin_unlock_irq(&pring->ring_lock); 987 lpfc_sli_abort_iocb_ring(phba, pring); 988 } 989 /* Cancel all the IOCBs from the completions list */ 990 lpfc_sli_cancel_iocbs(phba, &completions, 991 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 992 } 993 994 /** 995 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 996 * @phba: pointer to lpfc HBA data structure. 997 * 998 * This routine will do uninitialization after the HBA is reset when bring 999 * down the SLI Layer. 1000 * 1001 * Return codes 1002 * 0 - success. 1003 * Any other value - error. 1004 **/ 1005 static int 1006 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1007 { 1008 lpfc_hba_free_post_buf(phba); 1009 lpfc_hba_clean_txcmplq(phba); 1010 return 0; 1011 } 1012 1013 /** 1014 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1015 * @phba: pointer to lpfc HBA data structure. 1016 * 1017 * This routine will do uninitialization after the HBA is reset when bring 1018 * down the SLI Layer. 1019 * 1020 * Return codes 1021 * 0 - success. 1022 * Any other value - error. 1023 **/ 1024 static int 1025 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1026 { 1027 struct lpfc_io_buf *psb, *psb_next; 1028 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1029 struct lpfc_sli4_hdw_queue *qp; 1030 LIST_HEAD(aborts); 1031 LIST_HEAD(nvme_aborts); 1032 LIST_HEAD(nvmet_aborts); 1033 struct lpfc_sglq *sglq_entry = NULL; 1034 int cnt, idx; 1035 1036 1037 lpfc_sli_hbqbuf_free_all(phba); 1038 lpfc_hba_clean_txcmplq(phba); 1039 1040 /* At this point in time the HBA is either reset or DOA. Either 1041 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1042 * on the lpfc_els_sgl_list so that it can either be freed if the 1043 * driver is unloading or reposted if the driver is restarting 1044 * the port. 1045 */ 1046 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1047 /* scsl_buf_list */ 1048 /* sgl_list_lock required because worker thread uses this 1049 * list. 1050 */ 1051 spin_lock(&phba->sli4_hba.sgl_list_lock); 1052 list_for_each_entry(sglq_entry, 1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1054 sglq_entry->state = SGL_FREED; 1055 1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1057 &phba->sli4_hba.lpfc_els_sgl_list); 1058 1059 1060 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1061 1062 /* abts_xxxx_buf_list_lock required because worker thread uses this 1063 * list. 1064 */ 1065 cnt = 0; 1066 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1067 qp = &phba->sli4_hba.hdwq[idx]; 1068 1069 spin_lock(&qp->abts_io_buf_list_lock); 1070 list_splice_init(&qp->lpfc_abts_io_buf_list, 1071 &aborts); 1072 1073 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1074 psb->pCmd = NULL; 1075 psb->status = IOSTAT_SUCCESS; 1076 cnt++; 1077 } 1078 spin_lock(&qp->io_buf_list_put_lock); 1079 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1080 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1081 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1082 qp->abts_scsi_io_bufs = 0; 1083 qp->abts_nvme_io_bufs = 0; 1084 spin_unlock(&qp->io_buf_list_put_lock); 1085 spin_unlock(&qp->abts_io_buf_list_lock); 1086 } 1087 spin_unlock_irq(&phba->hbalock); 1088 1089 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1090 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1091 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1092 &nvmet_aborts); 1093 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1094 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1095 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1096 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1097 } 1098 } 1099 1100 lpfc_sli4_free_sp_events(phba); 1101 return cnt; 1102 } 1103 1104 /** 1105 * lpfc_hba_down_post - Wrapper func for hba down post routine 1106 * @phba: pointer to lpfc HBA data structure. 1107 * 1108 * This routine wraps the actual SLI3 or SLI4 routine for performing 1109 * uninitialization after the HBA is reset when bring down the SLI Layer. 1110 * 1111 * Return codes 1112 * 0 - success. 1113 * Any other value - error. 1114 **/ 1115 int 1116 lpfc_hba_down_post(struct lpfc_hba *phba) 1117 { 1118 return (*phba->lpfc_hba_down_post)(phba); 1119 } 1120 1121 /** 1122 * lpfc_hb_timeout - The HBA-timer timeout handler 1123 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1124 * 1125 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1126 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1127 * work-port-events bitmap and the worker thread is notified. This timeout 1128 * event will be used by the worker thread to invoke the actual timeout 1129 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1130 * be performed in the timeout handler and the HBA timeout event bit shall 1131 * be cleared by the worker thread after it has taken the event bitmap out. 1132 **/ 1133 static void 1134 lpfc_hb_timeout(struct timer_list *t) 1135 { 1136 struct lpfc_hba *phba; 1137 uint32_t tmo_posted; 1138 unsigned long iflag; 1139 1140 phba = from_timer(phba, t, hb_tmofunc); 1141 1142 /* Check for heart beat timeout conditions */ 1143 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1144 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1145 if (!tmo_posted) 1146 phba->pport->work_port_events |= WORKER_HB_TMO; 1147 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1148 1149 /* Tell the worker thread there is work to do */ 1150 if (!tmo_posted) 1151 lpfc_worker_wake_up(phba); 1152 return; 1153 } 1154 1155 /** 1156 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1157 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1158 * 1159 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1160 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1161 * work-port-events bitmap and the worker thread is notified. This timeout 1162 * event will be used by the worker thread to invoke the actual timeout 1163 * handler routine, lpfc_rrq_handler. Any periodical operations will 1164 * be performed in the timeout handler and the RRQ timeout event bit shall 1165 * be cleared by the worker thread after it has taken the event bitmap out. 1166 **/ 1167 static void 1168 lpfc_rrq_timeout(struct timer_list *t) 1169 { 1170 struct lpfc_hba *phba; 1171 unsigned long iflag; 1172 1173 phba = from_timer(phba, t, rrq_tmr); 1174 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1175 if (!(phba->pport->load_flag & FC_UNLOADING)) 1176 phba->hba_flag |= HBA_RRQ_ACTIVE; 1177 else 1178 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1179 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1180 1181 if (!(phba->pport->load_flag & FC_UNLOADING)) 1182 lpfc_worker_wake_up(phba); 1183 } 1184 1185 /** 1186 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1187 * @phba: pointer to lpfc hba data structure. 1188 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1189 * 1190 * This is the callback function to the lpfc heart-beat mailbox command. 1191 * If configured, the lpfc driver issues the heart-beat mailbox command to 1192 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1193 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1194 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1195 * heart-beat outstanding state. Once the mailbox command comes back and 1196 * no error conditions detected, the heart-beat mailbox command timer is 1197 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1198 * state is cleared for the next heart-beat. If the timer expired with the 1199 * heart-beat outstanding state set, the driver will put the HBA offline. 1200 **/ 1201 static void 1202 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1203 { 1204 unsigned long drvr_flag; 1205 1206 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1207 phba->hb_outstanding = 0; 1208 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1209 1210 /* Check and reset heart-beat timer is necessary */ 1211 mempool_free(pmboxq, phba->mbox_mem_pool); 1212 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1213 !(phba->link_state == LPFC_HBA_ERROR) && 1214 !(phba->pport->load_flag & FC_UNLOADING)) 1215 mod_timer(&phba->hb_tmofunc, 1216 jiffies + 1217 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1218 return; 1219 } 1220 1221 /* 1222 * lpfc_idle_stat_delay_work - idle_stat tracking 1223 * 1224 * This routine tracks per-cq idle_stat and determines polling decisions. 1225 * 1226 * Return codes: 1227 * None 1228 **/ 1229 static void 1230 lpfc_idle_stat_delay_work(struct work_struct *work) 1231 { 1232 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1233 struct lpfc_hba, 1234 idle_stat_delay_work); 1235 struct lpfc_queue *cq; 1236 struct lpfc_sli4_hdw_queue *hdwq; 1237 struct lpfc_idle_stat *idle_stat; 1238 u32 i, idle_percent; 1239 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1240 1241 if (phba->pport->load_flag & FC_UNLOADING) 1242 return; 1243 1244 if (phba->link_state == LPFC_HBA_ERROR || 1245 phba->pport->fc_flag & FC_OFFLINE_MODE) 1246 goto requeue; 1247 1248 for_each_present_cpu(i) { 1249 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1250 cq = hdwq->io_cq; 1251 1252 /* Skip if we've already handled this cq's primary CPU */ 1253 if (cq->chann != i) 1254 continue; 1255 1256 idle_stat = &phba->sli4_hba.idle_stat[i]; 1257 1258 /* get_cpu_idle_time returns values as running counters. Thus, 1259 * to know the amount for this period, the prior counter values 1260 * need to be subtracted from the current counter values. 1261 * From there, the idle time stat can be calculated as a 1262 * percentage of 100 - the sum of the other consumption times. 1263 */ 1264 wall_idle = get_cpu_idle_time(i, &wall, 1); 1265 diff_idle = wall_idle - idle_stat->prev_idle; 1266 diff_wall = wall - idle_stat->prev_wall; 1267 1268 if (diff_wall <= diff_idle) 1269 busy_time = 0; 1270 else 1271 busy_time = diff_wall - diff_idle; 1272 1273 idle_percent = div64_u64(100 * busy_time, diff_wall); 1274 idle_percent = 100 - idle_percent; 1275 1276 if (idle_percent < 15) 1277 cq->poll_mode = LPFC_QUEUE_WORK; 1278 else 1279 cq->poll_mode = LPFC_IRQ_POLL; 1280 1281 idle_stat->prev_idle = wall_idle; 1282 idle_stat->prev_wall = wall; 1283 } 1284 1285 requeue: 1286 schedule_delayed_work(&phba->idle_stat_delay_work, 1287 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1288 } 1289 1290 static void 1291 lpfc_hb_eq_delay_work(struct work_struct *work) 1292 { 1293 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1294 struct lpfc_hba, eq_delay_work); 1295 struct lpfc_eq_intr_info *eqi, *eqi_new; 1296 struct lpfc_queue *eq, *eq_next; 1297 unsigned char *ena_delay = NULL; 1298 uint32_t usdelay; 1299 int i; 1300 1301 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1302 return; 1303 1304 if (phba->link_state == LPFC_HBA_ERROR || 1305 phba->pport->fc_flag & FC_OFFLINE_MODE) 1306 goto requeue; 1307 1308 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1309 GFP_KERNEL); 1310 if (!ena_delay) 1311 goto requeue; 1312 1313 for (i = 0; i < phba->cfg_irq_chann; i++) { 1314 /* Get the EQ corresponding to the IRQ vector */ 1315 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1316 if (!eq) 1317 continue; 1318 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1319 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1320 ena_delay[eq->last_cpu] = 1; 1321 } 1322 } 1323 1324 for_each_present_cpu(i) { 1325 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1326 if (ena_delay[i]) { 1327 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1328 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1329 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1330 } else { 1331 usdelay = 0; 1332 } 1333 1334 eqi->icnt = 0; 1335 1336 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1337 if (unlikely(eq->last_cpu != i)) { 1338 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1339 eq->last_cpu); 1340 list_move_tail(&eq->cpu_list, &eqi_new->list); 1341 continue; 1342 } 1343 if (usdelay != eq->q_mode) 1344 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1345 usdelay); 1346 } 1347 } 1348 1349 kfree(ena_delay); 1350 1351 requeue: 1352 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1353 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1354 } 1355 1356 /** 1357 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1358 * @phba: pointer to lpfc hba data structure. 1359 * 1360 * For each heartbeat, this routine does some heuristic methods to adjust 1361 * XRI distribution. The goal is to fully utilize free XRIs. 1362 **/ 1363 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1364 { 1365 u32 i; 1366 u32 hwq_count; 1367 1368 hwq_count = phba->cfg_hdw_queue; 1369 for (i = 0; i < hwq_count; i++) { 1370 /* Adjust XRIs in private pool */ 1371 lpfc_adjust_pvt_pool_count(phba, i); 1372 1373 /* Adjust high watermark */ 1374 lpfc_adjust_high_watermark(phba, i); 1375 1376 #ifdef LPFC_MXP_STAT 1377 /* Snapshot pbl, pvt and busy count */ 1378 lpfc_snapshot_mxp(phba, i); 1379 #endif 1380 } 1381 } 1382 1383 /** 1384 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1385 * @phba: pointer to lpfc hba data structure. 1386 * 1387 * This is the actual HBA-timer timeout handler to be invoked by the worker 1388 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1389 * handler performs any periodic operations needed for the device. If such 1390 * periodic event has already been attended to either in the interrupt handler 1391 * or by processing slow-ring or fast-ring events within the HBA-timer 1392 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1393 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1394 * is configured and there is no heart-beat mailbox command outstanding, a 1395 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1396 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1397 * to offline. 1398 **/ 1399 void 1400 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1401 { 1402 struct lpfc_vport **vports; 1403 LPFC_MBOXQ_t *pmboxq; 1404 struct lpfc_dmabuf *buf_ptr; 1405 int retval, i; 1406 struct lpfc_sli *psli = &phba->sli; 1407 LIST_HEAD(completions); 1408 1409 if (phba->cfg_xri_rebalancing) { 1410 /* Multi-XRI pools handler */ 1411 lpfc_hb_mxp_handler(phba); 1412 } 1413 1414 vports = lpfc_create_vport_work_array(phba); 1415 if (vports != NULL) 1416 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1417 lpfc_rcv_seq_check_edtov(vports[i]); 1418 lpfc_fdmi_change_check(vports[i]); 1419 } 1420 lpfc_destroy_vport_work_array(phba, vports); 1421 1422 if ((phba->link_state == LPFC_HBA_ERROR) || 1423 (phba->pport->load_flag & FC_UNLOADING) || 1424 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1425 return; 1426 1427 spin_lock_irq(&phba->pport->work_port_lock); 1428 1429 if (time_after(phba->last_completion_time + 1430 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1431 jiffies)) { 1432 spin_unlock_irq(&phba->pport->work_port_lock); 1433 if (!phba->hb_outstanding) 1434 mod_timer(&phba->hb_tmofunc, 1435 jiffies + 1436 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1437 else 1438 mod_timer(&phba->hb_tmofunc, 1439 jiffies + 1440 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1441 return; 1442 } 1443 spin_unlock_irq(&phba->pport->work_port_lock); 1444 1445 if (phba->elsbuf_cnt && 1446 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1447 spin_lock_irq(&phba->hbalock); 1448 list_splice_init(&phba->elsbuf, &completions); 1449 phba->elsbuf_cnt = 0; 1450 phba->elsbuf_prev_cnt = 0; 1451 spin_unlock_irq(&phba->hbalock); 1452 1453 while (!list_empty(&completions)) { 1454 list_remove_head(&completions, buf_ptr, 1455 struct lpfc_dmabuf, list); 1456 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1457 kfree(buf_ptr); 1458 } 1459 } 1460 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1461 1462 /* If there is no heart beat outstanding, issue a heartbeat command */ 1463 if (phba->cfg_enable_hba_heartbeat) { 1464 if (!phba->hb_outstanding) { 1465 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1466 (list_empty(&psli->mboxq))) { 1467 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1468 GFP_KERNEL); 1469 if (!pmboxq) { 1470 mod_timer(&phba->hb_tmofunc, 1471 jiffies + 1472 msecs_to_jiffies(1000 * 1473 LPFC_HB_MBOX_INTERVAL)); 1474 return; 1475 } 1476 1477 lpfc_heart_beat(phba, pmboxq); 1478 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1479 pmboxq->vport = phba->pport; 1480 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1481 MBX_NOWAIT); 1482 1483 if (retval != MBX_BUSY && 1484 retval != MBX_SUCCESS) { 1485 mempool_free(pmboxq, 1486 phba->mbox_mem_pool); 1487 mod_timer(&phba->hb_tmofunc, 1488 jiffies + 1489 msecs_to_jiffies(1000 * 1490 LPFC_HB_MBOX_INTERVAL)); 1491 return; 1492 } 1493 phba->skipped_hb = 0; 1494 phba->hb_outstanding = 1; 1495 } else if (time_before_eq(phba->last_completion_time, 1496 phba->skipped_hb)) { 1497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1498 "2857 Last completion time not " 1499 " updated in %d ms\n", 1500 jiffies_to_msecs(jiffies 1501 - phba->last_completion_time)); 1502 } else 1503 phba->skipped_hb = jiffies; 1504 1505 mod_timer(&phba->hb_tmofunc, 1506 jiffies + 1507 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1508 return; 1509 } else { 1510 /* 1511 * If heart beat timeout called with hb_outstanding set 1512 * we need to give the hb mailbox cmd a chance to 1513 * complete or TMO. 1514 */ 1515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1516 "0459 Adapter heartbeat still out" 1517 "standing:last compl time was %d ms.\n", 1518 jiffies_to_msecs(jiffies 1519 - phba->last_completion_time)); 1520 mod_timer(&phba->hb_tmofunc, 1521 jiffies + 1522 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1523 } 1524 } else { 1525 mod_timer(&phba->hb_tmofunc, 1526 jiffies + 1527 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1528 } 1529 } 1530 1531 /** 1532 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1533 * @phba: pointer to lpfc hba data structure. 1534 * 1535 * This routine is called to bring the HBA offline when HBA hardware error 1536 * other than Port Error 6 has been detected. 1537 **/ 1538 static void 1539 lpfc_offline_eratt(struct lpfc_hba *phba) 1540 { 1541 struct lpfc_sli *psli = &phba->sli; 1542 1543 spin_lock_irq(&phba->hbalock); 1544 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1545 spin_unlock_irq(&phba->hbalock); 1546 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1547 1548 lpfc_offline(phba); 1549 lpfc_reset_barrier(phba); 1550 spin_lock_irq(&phba->hbalock); 1551 lpfc_sli_brdreset(phba); 1552 spin_unlock_irq(&phba->hbalock); 1553 lpfc_hba_down_post(phba); 1554 lpfc_sli_brdready(phba, HS_MBRDY); 1555 lpfc_unblock_mgmt_io(phba); 1556 phba->link_state = LPFC_HBA_ERROR; 1557 return; 1558 } 1559 1560 /** 1561 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1562 * @phba: pointer to lpfc hba data structure. 1563 * 1564 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1565 * other than Port Error 6 has been detected. 1566 **/ 1567 void 1568 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1569 { 1570 spin_lock_irq(&phba->hbalock); 1571 phba->link_state = LPFC_HBA_ERROR; 1572 spin_unlock_irq(&phba->hbalock); 1573 1574 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1575 lpfc_sli_flush_io_rings(phba); 1576 lpfc_offline(phba); 1577 lpfc_hba_down_post(phba); 1578 lpfc_unblock_mgmt_io(phba); 1579 } 1580 1581 /** 1582 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1583 * @phba: pointer to lpfc hba data structure. 1584 * 1585 * This routine is invoked to handle the deferred HBA hardware error 1586 * conditions. This type of error is indicated by HBA by setting ER1 1587 * and another ER bit in the host status register. The driver will 1588 * wait until the ER1 bit clears before handling the error condition. 1589 **/ 1590 static void 1591 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1592 { 1593 uint32_t old_host_status = phba->work_hs; 1594 struct lpfc_sli *psli = &phba->sli; 1595 1596 /* If the pci channel is offline, ignore possible errors, 1597 * since we cannot communicate with the pci card anyway. 1598 */ 1599 if (pci_channel_offline(phba->pcidev)) { 1600 spin_lock_irq(&phba->hbalock); 1601 phba->hba_flag &= ~DEFER_ERATT; 1602 spin_unlock_irq(&phba->hbalock); 1603 return; 1604 } 1605 1606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1607 "0479 Deferred Adapter Hardware Error " 1608 "Data: x%x x%x x%x\n", 1609 phba->work_hs, phba->work_status[0], 1610 phba->work_status[1]); 1611 1612 spin_lock_irq(&phba->hbalock); 1613 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1614 spin_unlock_irq(&phba->hbalock); 1615 1616 1617 /* 1618 * Firmware stops when it triggred erratt. That could cause the I/Os 1619 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1620 * SCSI layer retry it after re-establishing link. 1621 */ 1622 lpfc_sli_abort_fcp_rings(phba); 1623 1624 /* 1625 * There was a firmware error. Take the hba offline and then 1626 * attempt to restart it. 1627 */ 1628 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1629 lpfc_offline(phba); 1630 1631 /* Wait for the ER1 bit to clear.*/ 1632 while (phba->work_hs & HS_FFER1) { 1633 msleep(100); 1634 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1635 phba->work_hs = UNPLUG_ERR ; 1636 break; 1637 } 1638 /* If driver is unloading let the worker thread continue */ 1639 if (phba->pport->load_flag & FC_UNLOADING) { 1640 phba->work_hs = 0; 1641 break; 1642 } 1643 } 1644 1645 /* 1646 * This is to ptrotect against a race condition in which 1647 * first write to the host attention register clear the 1648 * host status register. 1649 */ 1650 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1651 phba->work_hs = old_host_status & ~HS_FFER1; 1652 1653 spin_lock_irq(&phba->hbalock); 1654 phba->hba_flag &= ~DEFER_ERATT; 1655 spin_unlock_irq(&phba->hbalock); 1656 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1657 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1658 } 1659 1660 static void 1661 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1662 { 1663 struct lpfc_board_event_header board_event; 1664 struct Scsi_Host *shost; 1665 1666 board_event.event_type = FC_REG_BOARD_EVENT; 1667 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1668 shost = lpfc_shost_from_vport(phba->pport); 1669 fc_host_post_vendor_event(shost, fc_get_event_number(), 1670 sizeof(board_event), 1671 (char *) &board_event, 1672 LPFC_NL_VENDOR_ID); 1673 } 1674 1675 /** 1676 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1677 * @phba: pointer to lpfc hba data structure. 1678 * 1679 * This routine is invoked to handle the following HBA hardware error 1680 * conditions: 1681 * 1 - HBA error attention interrupt 1682 * 2 - DMA ring index out of range 1683 * 3 - Mailbox command came back as unknown 1684 **/ 1685 static void 1686 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1687 { 1688 struct lpfc_vport *vport = phba->pport; 1689 struct lpfc_sli *psli = &phba->sli; 1690 uint32_t event_data; 1691 unsigned long temperature; 1692 struct temp_event temp_event_data; 1693 struct Scsi_Host *shost; 1694 1695 /* If the pci channel is offline, ignore possible errors, 1696 * since we cannot communicate with the pci card anyway. 1697 */ 1698 if (pci_channel_offline(phba->pcidev)) { 1699 spin_lock_irq(&phba->hbalock); 1700 phba->hba_flag &= ~DEFER_ERATT; 1701 spin_unlock_irq(&phba->hbalock); 1702 return; 1703 } 1704 1705 /* If resets are disabled then leave the HBA alone and return */ 1706 if (!phba->cfg_enable_hba_reset) 1707 return; 1708 1709 /* Send an internal error event to mgmt application */ 1710 lpfc_board_errevt_to_mgmt(phba); 1711 1712 if (phba->hba_flag & DEFER_ERATT) 1713 lpfc_handle_deferred_eratt(phba); 1714 1715 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1716 if (phba->work_hs & HS_FFER6) 1717 /* Re-establishing Link */ 1718 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1719 "1301 Re-establishing Link " 1720 "Data: x%x x%x x%x\n", 1721 phba->work_hs, phba->work_status[0], 1722 phba->work_status[1]); 1723 if (phba->work_hs & HS_FFER8) 1724 /* Device Zeroization */ 1725 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1726 "2861 Host Authentication device " 1727 "zeroization Data:x%x x%x x%x\n", 1728 phba->work_hs, phba->work_status[0], 1729 phba->work_status[1]); 1730 1731 spin_lock_irq(&phba->hbalock); 1732 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1733 spin_unlock_irq(&phba->hbalock); 1734 1735 /* 1736 * Firmware stops when it triggled erratt with HS_FFER6. 1737 * That could cause the I/Os dropped by the firmware. 1738 * Error iocb (I/O) on txcmplq and let the SCSI layer 1739 * retry it after re-establishing link. 1740 */ 1741 lpfc_sli_abort_fcp_rings(phba); 1742 1743 /* 1744 * There was a firmware error. Take the hba offline and then 1745 * attempt to restart it. 1746 */ 1747 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1748 lpfc_offline(phba); 1749 lpfc_sli_brdrestart(phba); 1750 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1751 lpfc_unblock_mgmt_io(phba); 1752 return; 1753 } 1754 lpfc_unblock_mgmt_io(phba); 1755 } else if (phba->work_hs & HS_CRIT_TEMP) { 1756 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1757 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1758 temp_event_data.event_code = LPFC_CRIT_TEMP; 1759 temp_event_data.data = (uint32_t)temperature; 1760 1761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1762 "0406 Adapter maximum temperature exceeded " 1763 "(%ld), taking this port offline " 1764 "Data: x%x x%x x%x\n", 1765 temperature, phba->work_hs, 1766 phba->work_status[0], phba->work_status[1]); 1767 1768 shost = lpfc_shost_from_vport(phba->pport); 1769 fc_host_post_vendor_event(shost, fc_get_event_number(), 1770 sizeof(temp_event_data), 1771 (char *) &temp_event_data, 1772 SCSI_NL_VID_TYPE_PCI 1773 | PCI_VENDOR_ID_EMULEX); 1774 1775 spin_lock_irq(&phba->hbalock); 1776 phba->over_temp_state = HBA_OVER_TEMP; 1777 spin_unlock_irq(&phba->hbalock); 1778 lpfc_offline_eratt(phba); 1779 1780 } else { 1781 /* The if clause above forces this code path when the status 1782 * failure is a value other than FFER6. Do not call the offline 1783 * twice. This is the adapter hardware error path. 1784 */ 1785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1786 "0457 Adapter Hardware Error " 1787 "Data: x%x x%x x%x\n", 1788 phba->work_hs, 1789 phba->work_status[0], phba->work_status[1]); 1790 1791 event_data = FC_REG_DUMP_EVENT; 1792 shost = lpfc_shost_from_vport(vport); 1793 fc_host_post_vendor_event(shost, fc_get_event_number(), 1794 sizeof(event_data), (char *) &event_data, 1795 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1796 1797 lpfc_offline_eratt(phba); 1798 } 1799 return; 1800 } 1801 1802 /** 1803 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1804 * @phba: pointer to lpfc hba data structure. 1805 * @mbx_action: flag for mailbox shutdown action. 1806 * @en_rn_msg: send reset/port recovery message. 1807 * This routine is invoked to perform an SLI4 port PCI function reset in 1808 * response to port status register polling attention. It waits for port 1809 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1810 * During this process, interrupt vectors are freed and later requested 1811 * for handling possible port resource change. 1812 **/ 1813 static int 1814 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1815 bool en_rn_msg) 1816 { 1817 int rc; 1818 uint32_t intr_mode; 1819 1820 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1821 LPFC_SLI_INTF_IF_TYPE_2) { 1822 /* 1823 * On error status condition, driver need to wait for port 1824 * ready before performing reset. 1825 */ 1826 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1827 if (rc) 1828 return rc; 1829 } 1830 1831 /* need reset: attempt for port recovery */ 1832 if (en_rn_msg) 1833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1834 "2887 Reset Needed: Attempting Port " 1835 "Recovery...\n"); 1836 lpfc_offline_prep(phba, mbx_action); 1837 lpfc_sli_flush_io_rings(phba); 1838 lpfc_offline(phba); 1839 /* release interrupt for possible resource change */ 1840 lpfc_sli4_disable_intr(phba); 1841 rc = lpfc_sli_brdrestart(phba); 1842 if (rc) { 1843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1844 "6309 Failed to restart board\n"); 1845 return rc; 1846 } 1847 /* request and enable interrupt */ 1848 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1849 if (intr_mode == LPFC_INTR_ERROR) { 1850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1851 "3175 Failed to enable interrupt\n"); 1852 return -EIO; 1853 } 1854 phba->intr_mode = intr_mode; 1855 rc = lpfc_online(phba); 1856 if (rc == 0) 1857 lpfc_unblock_mgmt_io(phba); 1858 1859 return rc; 1860 } 1861 1862 /** 1863 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1864 * @phba: pointer to lpfc hba data structure. 1865 * 1866 * This routine is invoked to handle the SLI4 HBA hardware error attention 1867 * conditions. 1868 **/ 1869 static void 1870 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1871 { 1872 struct lpfc_vport *vport = phba->pport; 1873 uint32_t event_data; 1874 struct Scsi_Host *shost; 1875 uint32_t if_type; 1876 struct lpfc_register portstat_reg = {0}; 1877 uint32_t reg_err1, reg_err2; 1878 uint32_t uerrlo_reg, uemasklo_reg; 1879 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1880 bool en_rn_msg = true; 1881 struct temp_event temp_event_data; 1882 struct lpfc_register portsmphr_reg; 1883 int rc, i; 1884 1885 /* If the pci channel is offline, ignore possible errors, since 1886 * we cannot communicate with the pci card anyway. 1887 */ 1888 if (pci_channel_offline(phba->pcidev)) { 1889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1890 "3166 pci channel is offline\n"); 1891 lpfc_sli4_offline_eratt(phba); 1892 return; 1893 } 1894 1895 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1896 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1897 switch (if_type) { 1898 case LPFC_SLI_INTF_IF_TYPE_0: 1899 pci_rd_rc1 = lpfc_readl( 1900 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1901 &uerrlo_reg); 1902 pci_rd_rc2 = lpfc_readl( 1903 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1904 &uemasklo_reg); 1905 /* consider PCI bus read error as pci_channel_offline */ 1906 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1907 return; 1908 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1909 lpfc_sli4_offline_eratt(phba); 1910 return; 1911 } 1912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1913 "7623 Checking UE recoverable"); 1914 1915 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1916 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1917 &portsmphr_reg.word0)) 1918 continue; 1919 1920 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1921 &portsmphr_reg); 1922 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1923 LPFC_PORT_SEM_UE_RECOVERABLE) 1924 break; 1925 /*Sleep for 1Sec, before checking SEMAPHORE */ 1926 msleep(1000); 1927 } 1928 1929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1930 "4827 smphr_port_status x%x : Waited %dSec", 1931 smphr_port_status, i); 1932 1933 /* Recoverable UE, reset the HBA device */ 1934 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1935 LPFC_PORT_SEM_UE_RECOVERABLE) { 1936 for (i = 0; i < 20; i++) { 1937 msleep(1000); 1938 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1939 &portsmphr_reg.word0) && 1940 (LPFC_POST_STAGE_PORT_READY == 1941 bf_get(lpfc_port_smphr_port_status, 1942 &portsmphr_reg))) { 1943 rc = lpfc_sli4_port_sta_fn_reset(phba, 1944 LPFC_MBX_NO_WAIT, en_rn_msg); 1945 if (rc == 0) 1946 return; 1947 lpfc_printf_log(phba, KERN_ERR, 1948 LOG_TRACE_EVENT, 1949 "4215 Failed to recover UE"); 1950 break; 1951 } 1952 } 1953 } 1954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1955 "7624 Firmware not ready: Failing UE recovery," 1956 " waited %dSec", i); 1957 phba->link_state = LPFC_HBA_ERROR; 1958 break; 1959 1960 case LPFC_SLI_INTF_IF_TYPE_2: 1961 case LPFC_SLI_INTF_IF_TYPE_6: 1962 pci_rd_rc1 = lpfc_readl( 1963 phba->sli4_hba.u.if_type2.STATUSregaddr, 1964 &portstat_reg.word0); 1965 /* consider PCI bus read error as pci_channel_offline */ 1966 if (pci_rd_rc1 == -EIO) { 1967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1968 "3151 PCI bus read access failure: x%x\n", 1969 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1970 lpfc_sli4_offline_eratt(phba); 1971 return; 1972 } 1973 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1974 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1975 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1977 "2889 Port Overtemperature event, " 1978 "taking port offline Data: x%x x%x\n", 1979 reg_err1, reg_err2); 1980 1981 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1982 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1983 temp_event_data.event_code = LPFC_CRIT_TEMP; 1984 temp_event_data.data = 0xFFFFFFFF; 1985 1986 shost = lpfc_shost_from_vport(phba->pport); 1987 fc_host_post_vendor_event(shost, fc_get_event_number(), 1988 sizeof(temp_event_data), 1989 (char *)&temp_event_data, 1990 SCSI_NL_VID_TYPE_PCI 1991 | PCI_VENDOR_ID_EMULEX); 1992 1993 spin_lock_irq(&phba->hbalock); 1994 phba->over_temp_state = HBA_OVER_TEMP; 1995 spin_unlock_irq(&phba->hbalock); 1996 lpfc_sli4_offline_eratt(phba); 1997 return; 1998 } 1999 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2000 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2002 "3143 Port Down: Firmware Update " 2003 "Detected\n"); 2004 en_rn_msg = false; 2005 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2006 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2008 "3144 Port Down: Debug Dump\n"); 2009 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2010 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2012 "3145 Port Down: Provisioning\n"); 2013 2014 /* If resets are disabled then leave the HBA alone and return */ 2015 if (!phba->cfg_enable_hba_reset) 2016 return; 2017 2018 /* Check port status register for function reset */ 2019 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2020 en_rn_msg); 2021 if (rc == 0) { 2022 /* don't report event on forced debug dump */ 2023 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2024 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2025 return; 2026 else 2027 break; 2028 } 2029 /* fall through for not able to recover */ 2030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2031 "3152 Unrecoverable error\n"); 2032 phba->link_state = LPFC_HBA_ERROR; 2033 break; 2034 case LPFC_SLI_INTF_IF_TYPE_1: 2035 default: 2036 break; 2037 } 2038 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2039 "3123 Report dump event to upper layer\n"); 2040 /* Send an internal error event to mgmt application */ 2041 lpfc_board_errevt_to_mgmt(phba); 2042 2043 event_data = FC_REG_DUMP_EVENT; 2044 shost = lpfc_shost_from_vport(vport); 2045 fc_host_post_vendor_event(shost, fc_get_event_number(), 2046 sizeof(event_data), (char *) &event_data, 2047 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2048 } 2049 2050 /** 2051 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2052 * @phba: pointer to lpfc HBA data structure. 2053 * 2054 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2055 * routine from the API jump table function pointer from the lpfc_hba struct. 2056 * 2057 * Return codes 2058 * 0 - success. 2059 * Any other value - error. 2060 **/ 2061 void 2062 lpfc_handle_eratt(struct lpfc_hba *phba) 2063 { 2064 (*phba->lpfc_handle_eratt)(phba); 2065 } 2066 2067 /** 2068 * lpfc_handle_latt - The HBA link event handler 2069 * @phba: pointer to lpfc hba data structure. 2070 * 2071 * This routine is invoked from the worker thread to handle a HBA host 2072 * attention link event. SLI3 only. 2073 **/ 2074 void 2075 lpfc_handle_latt(struct lpfc_hba *phba) 2076 { 2077 struct lpfc_vport *vport = phba->pport; 2078 struct lpfc_sli *psli = &phba->sli; 2079 LPFC_MBOXQ_t *pmb; 2080 volatile uint32_t control; 2081 struct lpfc_dmabuf *mp; 2082 int rc = 0; 2083 2084 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2085 if (!pmb) { 2086 rc = 1; 2087 goto lpfc_handle_latt_err_exit; 2088 } 2089 2090 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2091 if (!mp) { 2092 rc = 2; 2093 goto lpfc_handle_latt_free_pmb; 2094 } 2095 2096 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2097 if (!mp->virt) { 2098 rc = 3; 2099 goto lpfc_handle_latt_free_mp; 2100 } 2101 2102 /* Cleanup any outstanding ELS commands */ 2103 lpfc_els_flush_all_cmd(phba); 2104 2105 psli->slistat.link_event++; 2106 lpfc_read_topology(phba, pmb, mp); 2107 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2108 pmb->vport = vport; 2109 /* Block ELS IOCBs until we have processed this mbox command */ 2110 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2111 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2112 if (rc == MBX_NOT_FINISHED) { 2113 rc = 4; 2114 goto lpfc_handle_latt_free_mbuf; 2115 } 2116 2117 /* Clear Link Attention in HA REG */ 2118 spin_lock_irq(&phba->hbalock); 2119 writel(HA_LATT, phba->HAregaddr); 2120 readl(phba->HAregaddr); /* flush */ 2121 spin_unlock_irq(&phba->hbalock); 2122 2123 return; 2124 2125 lpfc_handle_latt_free_mbuf: 2126 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2127 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2128 lpfc_handle_latt_free_mp: 2129 kfree(mp); 2130 lpfc_handle_latt_free_pmb: 2131 mempool_free(pmb, phba->mbox_mem_pool); 2132 lpfc_handle_latt_err_exit: 2133 /* Enable Link attention interrupts */ 2134 spin_lock_irq(&phba->hbalock); 2135 psli->sli_flag |= LPFC_PROCESS_LA; 2136 control = readl(phba->HCregaddr); 2137 control |= HC_LAINT_ENA; 2138 writel(control, phba->HCregaddr); 2139 readl(phba->HCregaddr); /* flush */ 2140 2141 /* Clear Link Attention in HA REG */ 2142 writel(HA_LATT, phba->HAregaddr); 2143 readl(phba->HAregaddr); /* flush */ 2144 spin_unlock_irq(&phba->hbalock); 2145 lpfc_linkdown(phba); 2146 phba->link_state = LPFC_HBA_ERROR; 2147 2148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2149 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2150 2151 return; 2152 } 2153 2154 /** 2155 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2156 * @phba: pointer to lpfc hba data structure. 2157 * @vpd: pointer to the vital product data. 2158 * @len: length of the vital product data in bytes. 2159 * 2160 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2161 * an array of characters. In this routine, the ModelName, ProgramType, and 2162 * ModelDesc, etc. fields of the phba data structure will be populated. 2163 * 2164 * Return codes 2165 * 0 - pointer to the VPD passed in is NULL 2166 * 1 - success 2167 **/ 2168 int 2169 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2170 { 2171 uint8_t lenlo, lenhi; 2172 int Length; 2173 int i, j; 2174 int finished = 0; 2175 int index = 0; 2176 2177 if (!vpd) 2178 return 0; 2179 2180 /* Vital Product */ 2181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2182 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2183 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2184 (uint32_t) vpd[3]); 2185 while (!finished && (index < (len - 4))) { 2186 switch (vpd[index]) { 2187 case 0x82: 2188 case 0x91: 2189 index += 1; 2190 lenlo = vpd[index]; 2191 index += 1; 2192 lenhi = vpd[index]; 2193 index += 1; 2194 i = ((((unsigned short)lenhi) << 8) + lenlo); 2195 index += i; 2196 break; 2197 case 0x90: 2198 index += 1; 2199 lenlo = vpd[index]; 2200 index += 1; 2201 lenhi = vpd[index]; 2202 index += 1; 2203 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2204 if (Length > len - index) 2205 Length = len - index; 2206 while (Length > 0) { 2207 /* Look for Serial Number */ 2208 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2209 index += 2; 2210 i = vpd[index]; 2211 index += 1; 2212 j = 0; 2213 Length -= (3+i); 2214 while(i--) { 2215 phba->SerialNumber[j++] = vpd[index++]; 2216 if (j == 31) 2217 break; 2218 } 2219 phba->SerialNumber[j] = 0; 2220 continue; 2221 } 2222 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2223 phba->vpd_flag |= VPD_MODEL_DESC; 2224 index += 2; 2225 i = vpd[index]; 2226 index += 1; 2227 j = 0; 2228 Length -= (3+i); 2229 while(i--) { 2230 phba->ModelDesc[j++] = vpd[index++]; 2231 if (j == 255) 2232 break; 2233 } 2234 phba->ModelDesc[j] = 0; 2235 continue; 2236 } 2237 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2238 phba->vpd_flag |= VPD_MODEL_NAME; 2239 index += 2; 2240 i = vpd[index]; 2241 index += 1; 2242 j = 0; 2243 Length -= (3+i); 2244 while(i--) { 2245 phba->ModelName[j++] = vpd[index++]; 2246 if (j == 79) 2247 break; 2248 } 2249 phba->ModelName[j] = 0; 2250 continue; 2251 } 2252 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2253 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2254 index += 2; 2255 i = vpd[index]; 2256 index += 1; 2257 j = 0; 2258 Length -= (3+i); 2259 while(i--) { 2260 phba->ProgramType[j++] = vpd[index++]; 2261 if (j == 255) 2262 break; 2263 } 2264 phba->ProgramType[j] = 0; 2265 continue; 2266 } 2267 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2268 phba->vpd_flag |= VPD_PORT; 2269 index += 2; 2270 i = vpd[index]; 2271 index += 1; 2272 j = 0; 2273 Length -= (3+i); 2274 while(i--) { 2275 if ((phba->sli_rev == LPFC_SLI_REV4) && 2276 (phba->sli4_hba.pport_name_sta == 2277 LPFC_SLI4_PPNAME_GET)) { 2278 j++; 2279 index++; 2280 } else 2281 phba->Port[j++] = vpd[index++]; 2282 if (j == 19) 2283 break; 2284 } 2285 if ((phba->sli_rev != LPFC_SLI_REV4) || 2286 (phba->sli4_hba.pport_name_sta == 2287 LPFC_SLI4_PPNAME_NON)) 2288 phba->Port[j] = 0; 2289 continue; 2290 } 2291 else { 2292 index += 2; 2293 i = vpd[index]; 2294 index += 1; 2295 index += i; 2296 Length -= (3 + i); 2297 } 2298 } 2299 finished = 0; 2300 break; 2301 case 0x78: 2302 finished = 1; 2303 break; 2304 default: 2305 index ++; 2306 break; 2307 } 2308 } 2309 2310 return(1); 2311 } 2312 2313 /** 2314 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2315 * @phba: pointer to lpfc hba data structure. 2316 * @mdp: pointer to the data structure to hold the derived model name. 2317 * @descp: pointer to the data structure to hold the derived description. 2318 * 2319 * This routine retrieves HBA's description based on its registered PCI device 2320 * ID. The @descp passed into this function points to an array of 256 chars. It 2321 * shall be returned with the model name, maximum speed, and the host bus type. 2322 * The @mdp passed into this function points to an array of 80 chars. When the 2323 * function returns, the @mdp will be filled with the model name. 2324 **/ 2325 static void 2326 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2327 { 2328 lpfc_vpd_t *vp; 2329 uint16_t dev_id = phba->pcidev->device; 2330 int max_speed; 2331 int GE = 0; 2332 int oneConnect = 0; /* default is not a oneConnect */ 2333 struct { 2334 char *name; 2335 char *bus; 2336 char *function; 2337 } m = {"<Unknown>", "", ""}; 2338 2339 if (mdp && mdp[0] != '\0' 2340 && descp && descp[0] != '\0') 2341 return; 2342 2343 if (phba->lmt & LMT_64Gb) 2344 max_speed = 64; 2345 else if (phba->lmt & LMT_32Gb) 2346 max_speed = 32; 2347 else if (phba->lmt & LMT_16Gb) 2348 max_speed = 16; 2349 else if (phba->lmt & LMT_10Gb) 2350 max_speed = 10; 2351 else if (phba->lmt & LMT_8Gb) 2352 max_speed = 8; 2353 else if (phba->lmt & LMT_4Gb) 2354 max_speed = 4; 2355 else if (phba->lmt & LMT_2Gb) 2356 max_speed = 2; 2357 else if (phba->lmt & LMT_1Gb) 2358 max_speed = 1; 2359 else 2360 max_speed = 0; 2361 2362 vp = &phba->vpd; 2363 2364 switch (dev_id) { 2365 case PCI_DEVICE_ID_FIREFLY: 2366 m = (typeof(m)){"LP6000", "PCI", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_SUPERFLY: 2370 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2371 m = (typeof(m)){"LP7000", "PCI", ""}; 2372 else 2373 m = (typeof(m)){"LP7000E", "PCI", ""}; 2374 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2375 break; 2376 case PCI_DEVICE_ID_DRAGONFLY: 2377 m = (typeof(m)){"LP8000", "PCI", 2378 "Obsolete, Unsupported Fibre Channel Adapter"}; 2379 break; 2380 case PCI_DEVICE_ID_CENTAUR: 2381 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2382 m = (typeof(m)){"LP9002", "PCI", ""}; 2383 else 2384 m = (typeof(m)){"LP9000", "PCI", ""}; 2385 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2386 break; 2387 case PCI_DEVICE_ID_RFLY: 2388 m = (typeof(m)){"LP952", "PCI", 2389 "Obsolete, Unsupported Fibre Channel Adapter"}; 2390 break; 2391 case PCI_DEVICE_ID_PEGASUS: 2392 m = (typeof(m)){"LP9802", "PCI-X", 2393 "Obsolete, Unsupported Fibre Channel Adapter"}; 2394 break; 2395 case PCI_DEVICE_ID_THOR: 2396 m = (typeof(m)){"LP10000", "PCI-X", 2397 "Obsolete, Unsupported Fibre Channel Adapter"}; 2398 break; 2399 case PCI_DEVICE_ID_VIPER: 2400 m = (typeof(m)){"LPX1000", "PCI-X", 2401 "Obsolete, Unsupported Fibre Channel Adapter"}; 2402 break; 2403 case PCI_DEVICE_ID_PFLY: 2404 m = (typeof(m)){"LP982", "PCI-X", 2405 "Obsolete, Unsupported Fibre Channel Adapter"}; 2406 break; 2407 case PCI_DEVICE_ID_TFLY: 2408 m = (typeof(m)){"LP1050", "PCI-X", 2409 "Obsolete, Unsupported Fibre Channel Adapter"}; 2410 break; 2411 case PCI_DEVICE_ID_HELIOS: 2412 m = (typeof(m)){"LP11000", "PCI-X2", 2413 "Obsolete, Unsupported Fibre Channel Adapter"}; 2414 break; 2415 case PCI_DEVICE_ID_HELIOS_SCSP: 2416 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2417 "Obsolete, Unsupported Fibre Channel Adapter"}; 2418 break; 2419 case PCI_DEVICE_ID_HELIOS_DCSP: 2420 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2421 "Obsolete, Unsupported Fibre Channel Adapter"}; 2422 break; 2423 case PCI_DEVICE_ID_NEPTUNE: 2424 m = (typeof(m)){"LPe1000", "PCIe", 2425 "Obsolete, Unsupported Fibre Channel Adapter"}; 2426 break; 2427 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2428 m = (typeof(m)){"LPe1000-SP", "PCIe", 2429 "Obsolete, Unsupported Fibre Channel Adapter"}; 2430 break; 2431 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2432 m = (typeof(m)){"LPe1002-SP", "PCIe", 2433 "Obsolete, Unsupported Fibre Channel Adapter"}; 2434 break; 2435 case PCI_DEVICE_ID_BMID: 2436 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2437 break; 2438 case PCI_DEVICE_ID_BSMB: 2439 m = (typeof(m)){"LP111", "PCI-X2", 2440 "Obsolete, Unsupported Fibre Channel Adapter"}; 2441 break; 2442 case PCI_DEVICE_ID_ZEPHYR: 2443 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2444 break; 2445 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2446 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2447 break; 2448 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2449 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2450 GE = 1; 2451 break; 2452 case PCI_DEVICE_ID_ZMID: 2453 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2454 break; 2455 case PCI_DEVICE_ID_ZSMB: 2456 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2457 break; 2458 case PCI_DEVICE_ID_LP101: 2459 m = (typeof(m)){"LP101", "PCI-X", 2460 "Obsolete, Unsupported Fibre Channel Adapter"}; 2461 break; 2462 case PCI_DEVICE_ID_LP10000S: 2463 m = (typeof(m)){"LP10000-S", "PCI", 2464 "Obsolete, Unsupported Fibre Channel Adapter"}; 2465 break; 2466 case PCI_DEVICE_ID_LP11000S: 2467 m = (typeof(m)){"LP11000-S", "PCI-X2", 2468 "Obsolete, Unsupported Fibre Channel Adapter"}; 2469 break; 2470 case PCI_DEVICE_ID_LPE11000S: 2471 m = (typeof(m)){"LPe11000-S", "PCIe", 2472 "Obsolete, Unsupported Fibre Channel Adapter"}; 2473 break; 2474 case PCI_DEVICE_ID_SAT: 2475 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2476 break; 2477 case PCI_DEVICE_ID_SAT_MID: 2478 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2479 break; 2480 case PCI_DEVICE_ID_SAT_SMB: 2481 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2482 break; 2483 case PCI_DEVICE_ID_SAT_DCSP: 2484 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2485 break; 2486 case PCI_DEVICE_ID_SAT_SCSP: 2487 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2488 break; 2489 case PCI_DEVICE_ID_SAT_S: 2490 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2491 break; 2492 case PCI_DEVICE_ID_HORNET: 2493 m = (typeof(m)){"LP21000", "PCIe", 2494 "Obsolete, Unsupported FCoE Adapter"}; 2495 GE = 1; 2496 break; 2497 case PCI_DEVICE_ID_PROTEUS_VF: 2498 m = (typeof(m)){"LPev12000", "PCIe IOV", 2499 "Obsolete, Unsupported Fibre Channel Adapter"}; 2500 break; 2501 case PCI_DEVICE_ID_PROTEUS_PF: 2502 m = (typeof(m)){"LPev12000", "PCIe IOV", 2503 "Obsolete, Unsupported Fibre Channel Adapter"}; 2504 break; 2505 case PCI_DEVICE_ID_PROTEUS_S: 2506 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2507 "Obsolete, Unsupported Fibre Channel Adapter"}; 2508 break; 2509 case PCI_DEVICE_ID_TIGERSHARK: 2510 oneConnect = 1; 2511 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2512 break; 2513 case PCI_DEVICE_ID_TOMCAT: 2514 oneConnect = 1; 2515 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2516 break; 2517 case PCI_DEVICE_ID_FALCON: 2518 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2519 "EmulexSecure Fibre"}; 2520 break; 2521 case PCI_DEVICE_ID_BALIUS: 2522 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2523 "Obsolete, Unsupported Fibre Channel Adapter"}; 2524 break; 2525 case PCI_DEVICE_ID_LANCER_FC: 2526 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2527 break; 2528 case PCI_DEVICE_ID_LANCER_FC_VF: 2529 m = (typeof(m)){"LPe16000", "PCIe", 2530 "Obsolete, Unsupported Fibre Channel Adapter"}; 2531 break; 2532 case PCI_DEVICE_ID_LANCER_FCOE: 2533 oneConnect = 1; 2534 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2535 break; 2536 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2537 oneConnect = 1; 2538 m = (typeof(m)){"OCe15100", "PCIe", 2539 "Obsolete, Unsupported FCoE"}; 2540 break; 2541 case PCI_DEVICE_ID_LANCER_G6_FC: 2542 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2543 break; 2544 case PCI_DEVICE_ID_LANCER_G7_FC: 2545 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2546 break; 2547 case PCI_DEVICE_ID_SKYHAWK: 2548 case PCI_DEVICE_ID_SKYHAWK_VF: 2549 oneConnect = 1; 2550 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2551 break; 2552 default: 2553 m = (typeof(m)){"Unknown", "", ""}; 2554 break; 2555 } 2556 2557 if (mdp && mdp[0] == '\0') 2558 snprintf(mdp, 79,"%s", m.name); 2559 /* 2560 * oneConnect hba requires special processing, they are all initiators 2561 * and we put the port number on the end 2562 */ 2563 if (descp && descp[0] == '\0') { 2564 if (oneConnect) 2565 snprintf(descp, 255, 2566 "Emulex OneConnect %s, %s Initiator %s", 2567 m.name, m.function, 2568 phba->Port); 2569 else if (max_speed == 0) 2570 snprintf(descp, 255, 2571 "Emulex %s %s %s", 2572 m.name, m.bus, m.function); 2573 else 2574 snprintf(descp, 255, 2575 "Emulex %s %d%s %s %s", 2576 m.name, max_speed, (GE) ? "GE" : "Gb", 2577 m.bus, m.function); 2578 } 2579 } 2580 2581 /** 2582 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2583 * @phba: pointer to lpfc hba data structure. 2584 * @pring: pointer to a IOCB ring. 2585 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2586 * 2587 * This routine posts a given number of IOCBs with the associated DMA buffer 2588 * descriptors specified by the cnt argument to the given IOCB ring. 2589 * 2590 * Return codes 2591 * The number of IOCBs NOT able to be posted to the IOCB ring. 2592 **/ 2593 int 2594 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2595 { 2596 IOCB_t *icmd; 2597 struct lpfc_iocbq *iocb; 2598 struct lpfc_dmabuf *mp1, *mp2; 2599 2600 cnt += pring->missbufcnt; 2601 2602 /* While there are buffers to post */ 2603 while (cnt > 0) { 2604 /* Allocate buffer for command iocb */ 2605 iocb = lpfc_sli_get_iocbq(phba); 2606 if (iocb == NULL) { 2607 pring->missbufcnt = cnt; 2608 return cnt; 2609 } 2610 icmd = &iocb->iocb; 2611 2612 /* 2 buffers can be posted per command */ 2613 /* Allocate buffer to post */ 2614 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2615 if (mp1) 2616 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2617 if (!mp1 || !mp1->virt) { 2618 kfree(mp1); 2619 lpfc_sli_release_iocbq(phba, iocb); 2620 pring->missbufcnt = cnt; 2621 return cnt; 2622 } 2623 2624 INIT_LIST_HEAD(&mp1->list); 2625 /* Allocate buffer to post */ 2626 if (cnt > 1) { 2627 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2628 if (mp2) 2629 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2630 &mp2->phys); 2631 if (!mp2 || !mp2->virt) { 2632 kfree(mp2); 2633 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2634 kfree(mp1); 2635 lpfc_sli_release_iocbq(phba, iocb); 2636 pring->missbufcnt = cnt; 2637 return cnt; 2638 } 2639 2640 INIT_LIST_HEAD(&mp2->list); 2641 } else { 2642 mp2 = NULL; 2643 } 2644 2645 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2646 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2647 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2648 icmd->ulpBdeCount = 1; 2649 cnt--; 2650 if (mp2) { 2651 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2652 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2653 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2654 cnt--; 2655 icmd->ulpBdeCount = 2; 2656 } 2657 2658 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2659 icmd->ulpLe = 1; 2660 2661 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2662 IOCB_ERROR) { 2663 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2664 kfree(mp1); 2665 cnt++; 2666 if (mp2) { 2667 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2668 kfree(mp2); 2669 cnt++; 2670 } 2671 lpfc_sli_release_iocbq(phba, iocb); 2672 pring->missbufcnt = cnt; 2673 return cnt; 2674 } 2675 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2676 if (mp2) 2677 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2678 } 2679 pring->missbufcnt = 0; 2680 return 0; 2681 } 2682 2683 /** 2684 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2685 * @phba: pointer to lpfc hba data structure. 2686 * 2687 * This routine posts initial receive IOCB buffers to the ELS ring. The 2688 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2689 * set to 64 IOCBs. SLI3 only. 2690 * 2691 * Return codes 2692 * 0 - success (currently always success) 2693 **/ 2694 static int 2695 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2696 { 2697 struct lpfc_sli *psli = &phba->sli; 2698 2699 /* Ring 0, ELS / CT buffers */ 2700 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2701 /* Ring 2 - FCP no buffers needed */ 2702 2703 return 0; 2704 } 2705 2706 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2707 2708 /** 2709 * lpfc_sha_init - Set up initial array of hash table entries 2710 * @HashResultPointer: pointer to an array as hash table. 2711 * 2712 * This routine sets up the initial values to the array of hash table entries 2713 * for the LC HBAs. 2714 **/ 2715 static void 2716 lpfc_sha_init(uint32_t * HashResultPointer) 2717 { 2718 HashResultPointer[0] = 0x67452301; 2719 HashResultPointer[1] = 0xEFCDAB89; 2720 HashResultPointer[2] = 0x98BADCFE; 2721 HashResultPointer[3] = 0x10325476; 2722 HashResultPointer[4] = 0xC3D2E1F0; 2723 } 2724 2725 /** 2726 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2727 * @HashResultPointer: pointer to an initial/result hash table. 2728 * @HashWorkingPointer: pointer to an working hash table. 2729 * 2730 * This routine iterates an initial hash table pointed by @HashResultPointer 2731 * with the values from the working hash table pointeed by @HashWorkingPointer. 2732 * The results are putting back to the initial hash table, returned through 2733 * the @HashResultPointer as the result hash table. 2734 **/ 2735 static void 2736 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2737 { 2738 int t; 2739 uint32_t TEMP; 2740 uint32_t A, B, C, D, E; 2741 t = 16; 2742 do { 2743 HashWorkingPointer[t] = 2744 S(1, 2745 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2746 8] ^ 2747 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2748 } while (++t <= 79); 2749 t = 0; 2750 A = HashResultPointer[0]; 2751 B = HashResultPointer[1]; 2752 C = HashResultPointer[2]; 2753 D = HashResultPointer[3]; 2754 E = HashResultPointer[4]; 2755 2756 do { 2757 if (t < 20) { 2758 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2759 } else if (t < 40) { 2760 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2761 } else if (t < 60) { 2762 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2763 } else { 2764 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2765 } 2766 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2767 E = D; 2768 D = C; 2769 C = S(30, B); 2770 B = A; 2771 A = TEMP; 2772 } while (++t <= 79); 2773 2774 HashResultPointer[0] += A; 2775 HashResultPointer[1] += B; 2776 HashResultPointer[2] += C; 2777 HashResultPointer[3] += D; 2778 HashResultPointer[4] += E; 2779 2780 } 2781 2782 /** 2783 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2784 * @RandomChallenge: pointer to the entry of host challenge random number array. 2785 * @HashWorking: pointer to the entry of the working hash array. 2786 * 2787 * This routine calculates the working hash array referred by @HashWorking 2788 * from the challenge random numbers associated with the host, referred by 2789 * @RandomChallenge. The result is put into the entry of the working hash 2790 * array and returned by reference through @HashWorking. 2791 **/ 2792 static void 2793 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2794 { 2795 *HashWorking = (*RandomChallenge ^ *HashWorking); 2796 } 2797 2798 /** 2799 * lpfc_hba_init - Perform special handling for LC HBA initialization 2800 * @phba: pointer to lpfc hba data structure. 2801 * @hbainit: pointer to an array of unsigned 32-bit integers. 2802 * 2803 * This routine performs the special handling for LC HBA initialization. 2804 **/ 2805 void 2806 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2807 { 2808 int t; 2809 uint32_t *HashWorking; 2810 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2811 2812 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2813 if (!HashWorking) 2814 return; 2815 2816 HashWorking[0] = HashWorking[78] = *pwwnn++; 2817 HashWorking[1] = HashWorking[79] = *pwwnn; 2818 2819 for (t = 0; t < 7; t++) 2820 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2821 2822 lpfc_sha_init(hbainit); 2823 lpfc_sha_iterate(hbainit, HashWorking); 2824 kfree(HashWorking); 2825 } 2826 2827 /** 2828 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2829 * @vport: pointer to a virtual N_Port data structure. 2830 * 2831 * This routine performs the necessary cleanups before deleting the @vport. 2832 * It invokes the discovery state machine to perform necessary state 2833 * transitions and to release the ndlps associated with the @vport. Note, 2834 * the physical port is treated as @vport 0. 2835 **/ 2836 void 2837 lpfc_cleanup(struct lpfc_vport *vport) 2838 { 2839 struct lpfc_hba *phba = vport->phba; 2840 struct lpfc_nodelist *ndlp, *next_ndlp; 2841 int i = 0; 2842 2843 if (phba->link_state > LPFC_LINK_DOWN) 2844 lpfc_port_link_failure(vport); 2845 2846 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2847 if (vport->port_type != LPFC_PHYSICAL_PORT && 2848 ndlp->nlp_DID == Fabric_DID) { 2849 /* Just free up ndlp with Fabric_DID for vports */ 2850 lpfc_nlp_put(ndlp); 2851 continue; 2852 } 2853 2854 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2855 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2856 lpfc_nlp_put(ndlp); 2857 continue; 2858 } 2859 2860 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2861 * DEVICE_RM event. 2862 */ 2863 if (ndlp->nlp_type & NLP_FABRIC && 2864 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2865 lpfc_disc_state_machine(vport, ndlp, NULL, 2866 NLP_EVT_DEVICE_RECOVERY); 2867 2868 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2869 lpfc_disc_state_machine(vport, ndlp, NULL, 2870 NLP_EVT_DEVICE_RM); 2871 } 2872 2873 /* At this point, ALL ndlp's should be gone 2874 * because of the previous NLP_EVT_DEVICE_RM. 2875 * Lets wait for this to happen, if needed. 2876 */ 2877 while (!list_empty(&vport->fc_nodes)) { 2878 if (i++ > 3000) { 2879 lpfc_printf_vlog(vport, KERN_ERR, 2880 LOG_TRACE_EVENT, 2881 "0233 Nodelist not empty\n"); 2882 list_for_each_entry_safe(ndlp, next_ndlp, 2883 &vport->fc_nodes, nlp_listp) { 2884 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2885 LOG_TRACE_EVENT, 2886 "0282 did:x%x ndlp:x%px " 2887 "refcnt:%d xflags x%x nflag x%x\n", 2888 ndlp->nlp_DID, (void *)ndlp, 2889 kref_read(&ndlp->kref), 2890 ndlp->fc4_xpt_flags, 2891 ndlp->nlp_flag); 2892 } 2893 break; 2894 } 2895 2896 /* Wait for any activity on ndlps to settle */ 2897 msleep(10); 2898 } 2899 lpfc_cleanup_vports_rrqs(vport, NULL); 2900 } 2901 2902 /** 2903 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2904 * @vport: pointer to a virtual N_Port data structure. 2905 * 2906 * This routine stops all the timers associated with a @vport. This function 2907 * is invoked before disabling or deleting a @vport. Note that the physical 2908 * port is treated as @vport 0. 2909 **/ 2910 void 2911 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2912 { 2913 del_timer_sync(&vport->els_tmofunc); 2914 del_timer_sync(&vport->delayed_disc_tmo); 2915 lpfc_can_disctmo(vport); 2916 return; 2917 } 2918 2919 /** 2920 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2921 * @phba: pointer to lpfc hba data structure. 2922 * 2923 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2924 * caller of this routine should already hold the host lock. 2925 **/ 2926 void 2927 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2928 { 2929 /* Clear pending FCF rediscovery wait flag */ 2930 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2931 2932 /* Now, try to stop the timer */ 2933 del_timer(&phba->fcf.redisc_wait); 2934 } 2935 2936 /** 2937 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2938 * @phba: pointer to lpfc hba data structure. 2939 * 2940 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2941 * checks whether the FCF rediscovery wait timer is pending with the host 2942 * lock held before proceeding with disabling the timer and clearing the 2943 * wait timer pendig flag. 2944 **/ 2945 void 2946 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2947 { 2948 spin_lock_irq(&phba->hbalock); 2949 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2950 /* FCF rediscovery timer already fired or stopped */ 2951 spin_unlock_irq(&phba->hbalock); 2952 return; 2953 } 2954 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2955 /* Clear failover in progress flags */ 2956 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2957 spin_unlock_irq(&phba->hbalock); 2958 } 2959 2960 /** 2961 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2962 * @phba: pointer to lpfc hba data structure. 2963 * 2964 * This routine stops all the timers associated with a HBA. This function is 2965 * invoked before either putting a HBA offline or unloading the driver. 2966 **/ 2967 void 2968 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2969 { 2970 if (phba->pport) 2971 lpfc_stop_vport_timers(phba->pport); 2972 cancel_delayed_work_sync(&phba->eq_delay_work); 2973 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 2974 del_timer_sync(&phba->sli.mbox_tmo); 2975 del_timer_sync(&phba->fabric_block_timer); 2976 del_timer_sync(&phba->eratt_poll); 2977 del_timer_sync(&phba->hb_tmofunc); 2978 if (phba->sli_rev == LPFC_SLI_REV4) { 2979 del_timer_sync(&phba->rrq_tmr); 2980 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2981 } 2982 phba->hb_outstanding = 0; 2983 2984 switch (phba->pci_dev_grp) { 2985 case LPFC_PCI_DEV_LP: 2986 /* Stop any LightPulse device specific driver timers */ 2987 del_timer_sync(&phba->fcp_poll_timer); 2988 break; 2989 case LPFC_PCI_DEV_OC: 2990 /* Stop any OneConnect device specific driver timers */ 2991 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2992 break; 2993 default: 2994 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2995 "0297 Invalid device group (x%x)\n", 2996 phba->pci_dev_grp); 2997 break; 2998 } 2999 return; 3000 } 3001 3002 /** 3003 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3004 * @phba: pointer to lpfc hba data structure. 3005 * @mbx_action: flag for mailbox no wait action. 3006 * 3007 * This routine marks a HBA's management interface as blocked. Once the HBA's 3008 * management interface is marked as blocked, all the user space access to 3009 * the HBA, whether they are from sysfs interface or libdfc interface will 3010 * all be blocked. The HBA is set to block the management interface when the 3011 * driver prepares the HBA interface for online or offline. 3012 **/ 3013 static void 3014 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3015 { 3016 unsigned long iflag; 3017 uint8_t actcmd = MBX_HEARTBEAT; 3018 unsigned long timeout; 3019 3020 spin_lock_irqsave(&phba->hbalock, iflag); 3021 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3022 spin_unlock_irqrestore(&phba->hbalock, iflag); 3023 if (mbx_action == LPFC_MBX_NO_WAIT) 3024 return; 3025 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3026 spin_lock_irqsave(&phba->hbalock, iflag); 3027 if (phba->sli.mbox_active) { 3028 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3029 /* Determine how long we might wait for the active mailbox 3030 * command to be gracefully completed by firmware. 3031 */ 3032 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3033 phba->sli.mbox_active) * 1000) + jiffies; 3034 } 3035 spin_unlock_irqrestore(&phba->hbalock, iflag); 3036 3037 /* Wait for the outstnading mailbox command to complete */ 3038 while (phba->sli.mbox_active) { 3039 /* Check active mailbox complete status every 2ms */ 3040 msleep(2); 3041 if (time_after(jiffies, timeout)) { 3042 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3043 "2813 Mgmt IO is Blocked %x " 3044 "- mbox cmd %x still active\n", 3045 phba->sli.sli_flag, actcmd); 3046 break; 3047 } 3048 } 3049 } 3050 3051 /** 3052 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3053 * @phba: pointer to lpfc hba data structure. 3054 * 3055 * Allocate RPIs for all active remote nodes. This is needed whenever 3056 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3057 * is to fixup the temporary rpi assignments. 3058 **/ 3059 void 3060 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3061 { 3062 struct lpfc_nodelist *ndlp, *next_ndlp; 3063 struct lpfc_vport **vports; 3064 int i, rpi; 3065 3066 if (phba->sli_rev != LPFC_SLI_REV4) 3067 return; 3068 3069 vports = lpfc_create_vport_work_array(phba); 3070 if (vports == NULL) 3071 return; 3072 3073 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3074 if (vports[i]->load_flag & FC_UNLOADING) 3075 continue; 3076 3077 list_for_each_entry_safe(ndlp, next_ndlp, 3078 &vports[i]->fc_nodes, 3079 nlp_listp) { 3080 rpi = lpfc_sli4_alloc_rpi(phba); 3081 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3082 /* TODO print log? */ 3083 continue; 3084 } 3085 ndlp->nlp_rpi = rpi; 3086 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3087 LOG_NODE | LOG_DISCOVERY, 3088 "0009 Assign RPI x%x to ndlp x%px " 3089 "DID:x%06x flg:x%x\n", 3090 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3091 ndlp->nlp_flag); 3092 } 3093 } 3094 lpfc_destroy_vport_work_array(phba, vports); 3095 } 3096 3097 /** 3098 * lpfc_create_expedite_pool - create expedite pool 3099 * @phba: pointer to lpfc hba data structure. 3100 * 3101 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3102 * to expedite pool. Mark them as expedite. 3103 **/ 3104 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3105 { 3106 struct lpfc_sli4_hdw_queue *qp; 3107 struct lpfc_io_buf *lpfc_ncmd; 3108 struct lpfc_io_buf *lpfc_ncmd_next; 3109 struct lpfc_epd_pool *epd_pool; 3110 unsigned long iflag; 3111 3112 epd_pool = &phba->epd_pool; 3113 qp = &phba->sli4_hba.hdwq[0]; 3114 3115 spin_lock_init(&epd_pool->lock); 3116 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3117 spin_lock(&epd_pool->lock); 3118 INIT_LIST_HEAD(&epd_pool->list); 3119 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3120 &qp->lpfc_io_buf_list_put, list) { 3121 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3122 lpfc_ncmd->expedite = true; 3123 qp->put_io_bufs--; 3124 epd_pool->count++; 3125 if (epd_pool->count >= XRI_BATCH) 3126 break; 3127 } 3128 spin_unlock(&epd_pool->lock); 3129 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3130 } 3131 3132 /** 3133 * lpfc_destroy_expedite_pool - destroy expedite pool 3134 * @phba: pointer to lpfc hba data structure. 3135 * 3136 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3137 * of HWQ 0. Clear the mark. 3138 **/ 3139 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3140 { 3141 struct lpfc_sli4_hdw_queue *qp; 3142 struct lpfc_io_buf *lpfc_ncmd; 3143 struct lpfc_io_buf *lpfc_ncmd_next; 3144 struct lpfc_epd_pool *epd_pool; 3145 unsigned long iflag; 3146 3147 epd_pool = &phba->epd_pool; 3148 qp = &phba->sli4_hba.hdwq[0]; 3149 3150 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3151 spin_lock(&epd_pool->lock); 3152 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3153 &epd_pool->list, list) { 3154 list_move_tail(&lpfc_ncmd->list, 3155 &qp->lpfc_io_buf_list_put); 3156 lpfc_ncmd->flags = false; 3157 qp->put_io_bufs++; 3158 epd_pool->count--; 3159 } 3160 spin_unlock(&epd_pool->lock); 3161 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3162 } 3163 3164 /** 3165 * lpfc_create_multixri_pools - create multi-XRI pools 3166 * @phba: pointer to lpfc hba data structure. 3167 * 3168 * This routine initialize public, private per HWQ. Then, move XRIs from 3169 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3170 * Initialized. 3171 **/ 3172 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3173 { 3174 u32 i, j; 3175 u32 hwq_count; 3176 u32 count_per_hwq; 3177 struct lpfc_io_buf *lpfc_ncmd; 3178 struct lpfc_io_buf *lpfc_ncmd_next; 3179 unsigned long iflag; 3180 struct lpfc_sli4_hdw_queue *qp; 3181 struct lpfc_multixri_pool *multixri_pool; 3182 struct lpfc_pbl_pool *pbl_pool; 3183 struct lpfc_pvt_pool *pvt_pool; 3184 3185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3186 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3187 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3188 phba->sli4_hba.io_xri_cnt); 3189 3190 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3191 lpfc_create_expedite_pool(phba); 3192 3193 hwq_count = phba->cfg_hdw_queue; 3194 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3195 3196 for (i = 0; i < hwq_count; i++) { 3197 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3198 3199 if (!multixri_pool) { 3200 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3201 "1238 Failed to allocate memory for " 3202 "multixri_pool\n"); 3203 3204 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3205 lpfc_destroy_expedite_pool(phba); 3206 3207 j = 0; 3208 while (j < i) { 3209 qp = &phba->sli4_hba.hdwq[j]; 3210 kfree(qp->p_multixri_pool); 3211 j++; 3212 } 3213 phba->cfg_xri_rebalancing = 0; 3214 return; 3215 } 3216 3217 qp = &phba->sli4_hba.hdwq[i]; 3218 qp->p_multixri_pool = multixri_pool; 3219 3220 multixri_pool->xri_limit = count_per_hwq; 3221 multixri_pool->rrb_next_hwqid = i; 3222 3223 /* Deal with public free xri pool */ 3224 pbl_pool = &multixri_pool->pbl_pool; 3225 spin_lock_init(&pbl_pool->lock); 3226 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3227 spin_lock(&pbl_pool->lock); 3228 INIT_LIST_HEAD(&pbl_pool->list); 3229 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3230 &qp->lpfc_io_buf_list_put, list) { 3231 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3232 qp->put_io_bufs--; 3233 pbl_pool->count++; 3234 } 3235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3236 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3237 pbl_pool->count, i); 3238 spin_unlock(&pbl_pool->lock); 3239 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3240 3241 /* Deal with private free xri pool */ 3242 pvt_pool = &multixri_pool->pvt_pool; 3243 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3244 pvt_pool->low_watermark = XRI_BATCH; 3245 spin_lock_init(&pvt_pool->lock); 3246 spin_lock_irqsave(&pvt_pool->lock, iflag); 3247 INIT_LIST_HEAD(&pvt_pool->list); 3248 pvt_pool->count = 0; 3249 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3250 } 3251 } 3252 3253 /** 3254 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3255 * @phba: pointer to lpfc hba data structure. 3256 * 3257 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3258 **/ 3259 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3260 { 3261 u32 i; 3262 u32 hwq_count; 3263 struct lpfc_io_buf *lpfc_ncmd; 3264 struct lpfc_io_buf *lpfc_ncmd_next; 3265 unsigned long iflag; 3266 struct lpfc_sli4_hdw_queue *qp; 3267 struct lpfc_multixri_pool *multixri_pool; 3268 struct lpfc_pbl_pool *pbl_pool; 3269 struct lpfc_pvt_pool *pvt_pool; 3270 3271 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3272 lpfc_destroy_expedite_pool(phba); 3273 3274 if (!(phba->pport->load_flag & FC_UNLOADING)) 3275 lpfc_sli_flush_io_rings(phba); 3276 3277 hwq_count = phba->cfg_hdw_queue; 3278 3279 for (i = 0; i < hwq_count; i++) { 3280 qp = &phba->sli4_hba.hdwq[i]; 3281 multixri_pool = qp->p_multixri_pool; 3282 if (!multixri_pool) 3283 continue; 3284 3285 qp->p_multixri_pool = NULL; 3286 3287 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3288 3289 /* Deal with public free xri pool */ 3290 pbl_pool = &multixri_pool->pbl_pool; 3291 spin_lock(&pbl_pool->lock); 3292 3293 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3294 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3295 pbl_pool->count, i); 3296 3297 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3298 &pbl_pool->list, list) { 3299 list_move_tail(&lpfc_ncmd->list, 3300 &qp->lpfc_io_buf_list_put); 3301 qp->put_io_bufs++; 3302 pbl_pool->count--; 3303 } 3304 3305 INIT_LIST_HEAD(&pbl_pool->list); 3306 pbl_pool->count = 0; 3307 3308 spin_unlock(&pbl_pool->lock); 3309 3310 /* Deal with private free xri pool */ 3311 pvt_pool = &multixri_pool->pvt_pool; 3312 spin_lock(&pvt_pool->lock); 3313 3314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3315 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3316 pvt_pool->count, i); 3317 3318 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3319 &pvt_pool->list, list) { 3320 list_move_tail(&lpfc_ncmd->list, 3321 &qp->lpfc_io_buf_list_put); 3322 qp->put_io_bufs++; 3323 pvt_pool->count--; 3324 } 3325 3326 INIT_LIST_HEAD(&pvt_pool->list); 3327 pvt_pool->count = 0; 3328 3329 spin_unlock(&pvt_pool->lock); 3330 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3331 3332 kfree(multixri_pool); 3333 } 3334 } 3335 3336 /** 3337 * lpfc_online - Initialize and bring a HBA online 3338 * @phba: pointer to lpfc hba data structure. 3339 * 3340 * This routine initializes the HBA and brings a HBA online. During this 3341 * process, the management interface is blocked to prevent user space access 3342 * to the HBA interfering with the driver initialization. 3343 * 3344 * Return codes 3345 * 0 - successful 3346 * 1 - failed 3347 **/ 3348 int 3349 lpfc_online(struct lpfc_hba *phba) 3350 { 3351 struct lpfc_vport *vport; 3352 struct lpfc_vport **vports; 3353 int i, error = 0; 3354 bool vpis_cleared = false; 3355 3356 if (!phba) 3357 return 0; 3358 vport = phba->pport; 3359 3360 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3361 return 0; 3362 3363 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3364 "0458 Bring Adapter online\n"); 3365 3366 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3367 3368 if (phba->sli_rev == LPFC_SLI_REV4) { 3369 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3370 lpfc_unblock_mgmt_io(phba); 3371 return 1; 3372 } 3373 spin_lock_irq(&phba->hbalock); 3374 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3375 vpis_cleared = true; 3376 spin_unlock_irq(&phba->hbalock); 3377 3378 /* Reestablish the local initiator port. 3379 * The offline process destroyed the previous lport. 3380 */ 3381 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3382 !phba->nvmet_support) { 3383 error = lpfc_nvme_create_localport(phba->pport); 3384 if (error) 3385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3386 "6132 NVME restore reg failed " 3387 "on nvmei error x%x\n", error); 3388 } 3389 } else { 3390 lpfc_sli_queue_init(phba); 3391 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3392 lpfc_unblock_mgmt_io(phba); 3393 return 1; 3394 } 3395 } 3396 3397 vports = lpfc_create_vport_work_array(phba); 3398 if (vports != NULL) { 3399 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3400 struct Scsi_Host *shost; 3401 shost = lpfc_shost_from_vport(vports[i]); 3402 spin_lock_irq(shost->host_lock); 3403 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3404 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3405 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3406 if (phba->sli_rev == LPFC_SLI_REV4) { 3407 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3408 if ((vpis_cleared) && 3409 (vports[i]->port_type != 3410 LPFC_PHYSICAL_PORT)) 3411 vports[i]->vpi = 0; 3412 } 3413 spin_unlock_irq(shost->host_lock); 3414 } 3415 } 3416 lpfc_destroy_vport_work_array(phba, vports); 3417 3418 if (phba->cfg_xri_rebalancing) 3419 lpfc_create_multixri_pools(phba); 3420 3421 lpfc_cpuhp_add(phba); 3422 3423 lpfc_unblock_mgmt_io(phba); 3424 return 0; 3425 } 3426 3427 /** 3428 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3429 * @phba: pointer to lpfc hba data structure. 3430 * 3431 * This routine marks a HBA's management interface as not blocked. Once the 3432 * HBA's management interface is marked as not blocked, all the user space 3433 * access to the HBA, whether they are from sysfs interface or libdfc 3434 * interface will be allowed. The HBA is set to block the management interface 3435 * when the driver prepares the HBA interface for online or offline and then 3436 * set to unblock the management interface afterwards. 3437 **/ 3438 void 3439 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3440 { 3441 unsigned long iflag; 3442 3443 spin_lock_irqsave(&phba->hbalock, iflag); 3444 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3445 spin_unlock_irqrestore(&phba->hbalock, iflag); 3446 } 3447 3448 /** 3449 * lpfc_offline_prep - Prepare a HBA to be brought offline 3450 * @phba: pointer to lpfc hba data structure. 3451 * @mbx_action: flag for mailbox shutdown action. 3452 * 3453 * This routine is invoked to prepare a HBA to be brought offline. It performs 3454 * unregistration login to all the nodes on all vports and flushes the mailbox 3455 * queue to make it ready to be brought offline. 3456 **/ 3457 void 3458 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3459 { 3460 struct lpfc_vport *vport = phba->pport; 3461 struct lpfc_nodelist *ndlp, *next_ndlp; 3462 struct lpfc_vport **vports; 3463 struct Scsi_Host *shost; 3464 int i; 3465 3466 if (vport->fc_flag & FC_OFFLINE_MODE) 3467 return; 3468 3469 lpfc_block_mgmt_io(phba, mbx_action); 3470 3471 lpfc_linkdown(phba); 3472 3473 /* Issue an unreg_login to all nodes on all vports */ 3474 vports = lpfc_create_vport_work_array(phba); 3475 if (vports != NULL) { 3476 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3477 if (vports[i]->load_flag & FC_UNLOADING) 3478 continue; 3479 shost = lpfc_shost_from_vport(vports[i]); 3480 spin_lock_irq(shost->host_lock); 3481 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3482 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3483 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3484 spin_unlock_irq(shost->host_lock); 3485 3486 shost = lpfc_shost_from_vport(vports[i]); 3487 list_for_each_entry_safe(ndlp, next_ndlp, 3488 &vports[i]->fc_nodes, 3489 nlp_listp) { 3490 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3491 /* Driver must assume RPI is invalid for 3492 * any unused or inactive node. 3493 */ 3494 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3495 continue; 3496 } 3497 3498 spin_lock_irq(&ndlp->lock); 3499 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3500 spin_unlock_irq(&ndlp->lock); 3501 /* 3502 * Whenever an SLI4 port goes offline, free the 3503 * RPI. Get a new RPI when the adapter port 3504 * comes back online. 3505 */ 3506 if (phba->sli_rev == LPFC_SLI_REV4) { 3507 lpfc_printf_vlog(vports[i], KERN_INFO, 3508 LOG_NODE | LOG_DISCOVERY, 3509 "0011 Free RPI x%x on " 3510 "ndlp: %p did x%x\n", 3511 ndlp->nlp_rpi, ndlp, 3512 ndlp->nlp_DID); 3513 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3514 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3515 } 3516 lpfc_unreg_rpi(vports[i], ndlp); 3517 3518 if (ndlp->nlp_type & NLP_FABRIC) { 3519 lpfc_disc_state_machine(vports[i], ndlp, 3520 NULL, NLP_EVT_DEVICE_RECOVERY); 3521 3522 /* Don't remove the node unless the 3523 * has been unregistered with the 3524 * transport. If so, let dev_loss 3525 * take care of the node. 3526 */ 3527 if (!(ndlp->fc4_xpt_flags & 3528 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3529 lpfc_disc_state_machine 3530 (vports[i], ndlp, 3531 NULL, 3532 NLP_EVT_DEVICE_RM); 3533 } 3534 } 3535 } 3536 } 3537 lpfc_destroy_vport_work_array(phba, vports); 3538 3539 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3540 3541 if (phba->wq) 3542 flush_workqueue(phba->wq); 3543 } 3544 3545 /** 3546 * lpfc_offline - Bring a HBA offline 3547 * @phba: pointer to lpfc hba data structure. 3548 * 3549 * This routine actually brings a HBA offline. It stops all the timers 3550 * associated with the HBA, brings down the SLI layer, and eventually 3551 * marks the HBA as in offline state for the upper layer protocol. 3552 **/ 3553 void 3554 lpfc_offline(struct lpfc_hba *phba) 3555 { 3556 struct Scsi_Host *shost; 3557 struct lpfc_vport **vports; 3558 int i; 3559 3560 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3561 return; 3562 3563 /* stop port and all timers associated with this hba */ 3564 lpfc_stop_port(phba); 3565 3566 /* Tear down the local and target port registrations. The 3567 * nvme transports need to cleanup. 3568 */ 3569 lpfc_nvmet_destroy_targetport(phba); 3570 lpfc_nvme_destroy_localport(phba->pport); 3571 3572 vports = lpfc_create_vport_work_array(phba); 3573 if (vports != NULL) 3574 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3575 lpfc_stop_vport_timers(vports[i]); 3576 lpfc_destroy_vport_work_array(phba, vports); 3577 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3578 "0460 Bring Adapter offline\n"); 3579 /* Bring down the SLI Layer and cleanup. The HBA is offline 3580 now. */ 3581 lpfc_sli_hba_down(phba); 3582 spin_lock_irq(&phba->hbalock); 3583 phba->work_ha = 0; 3584 spin_unlock_irq(&phba->hbalock); 3585 vports = lpfc_create_vport_work_array(phba); 3586 if (vports != NULL) 3587 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3588 shost = lpfc_shost_from_vport(vports[i]); 3589 spin_lock_irq(shost->host_lock); 3590 vports[i]->work_port_events = 0; 3591 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3592 spin_unlock_irq(shost->host_lock); 3593 } 3594 lpfc_destroy_vport_work_array(phba, vports); 3595 __lpfc_cpuhp_remove(phba); 3596 3597 if (phba->cfg_xri_rebalancing) 3598 lpfc_destroy_multixri_pools(phba); 3599 } 3600 3601 /** 3602 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3603 * @phba: pointer to lpfc hba data structure. 3604 * 3605 * This routine is to free all the SCSI buffers and IOCBs from the driver 3606 * list back to kernel. It is called from lpfc_pci_remove_one to free 3607 * the internal resources before the device is removed from the system. 3608 **/ 3609 static void 3610 lpfc_scsi_free(struct lpfc_hba *phba) 3611 { 3612 struct lpfc_io_buf *sb, *sb_next; 3613 3614 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3615 return; 3616 3617 spin_lock_irq(&phba->hbalock); 3618 3619 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3620 3621 spin_lock(&phba->scsi_buf_list_put_lock); 3622 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3623 list) { 3624 list_del(&sb->list); 3625 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3626 sb->dma_handle); 3627 kfree(sb); 3628 phba->total_scsi_bufs--; 3629 } 3630 spin_unlock(&phba->scsi_buf_list_put_lock); 3631 3632 spin_lock(&phba->scsi_buf_list_get_lock); 3633 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3634 list) { 3635 list_del(&sb->list); 3636 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3637 sb->dma_handle); 3638 kfree(sb); 3639 phba->total_scsi_bufs--; 3640 } 3641 spin_unlock(&phba->scsi_buf_list_get_lock); 3642 spin_unlock_irq(&phba->hbalock); 3643 } 3644 3645 /** 3646 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3647 * @phba: pointer to lpfc hba data structure. 3648 * 3649 * This routine is to free all the IO buffers and IOCBs from the driver 3650 * list back to kernel. It is called from lpfc_pci_remove_one to free 3651 * the internal resources before the device is removed from the system. 3652 **/ 3653 void 3654 lpfc_io_free(struct lpfc_hba *phba) 3655 { 3656 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3657 struct lpfc_sli4_hdw_queue *qp; 3658 int idx; 3659 3660 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3661 qp = &phba->sli4_hba.hdwq[idx]; 3662 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3663 spin_lock(&qp->io_buf_list_put_lock); 3664 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3665 &qp->lpfc_io_buf_list_put, 3666 list) { 3667 list_del(&lpfc_ncmd->list); 3668 qp->put_io_bufs--; 3669 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3670 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3671 if (phba->cfg_xpsgl && !phba->nvmet_support) 3672 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3673 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3674 kfree(lpfc_ncmd); 3675 qp->total_io_bufs--; 3676 } 3677 spin_unlock(&qp->io_buf_list_put_lock); 3678 3679 spin_lock(&qp->io_buf_list_get_lock); 3680 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3681 &qp->lpfc_io_buf_list_get, 3682 list) { 3683 list_del(&lpfc_ncmd->list); 3684 qp->get_io_bufs--; 3685 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3686 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3687 if (phba->cfg_xpsgl && !phba->nvmet_support) 3688 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3689 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3690 kfree(lpfc_ncmd); 3691 qp->total_io_bufs--; 3692 } 3693 spin_unlock(&qp->io_buf_list_get_lock); 3694 } 3695 } 3696 3697 /** 3698 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3699 * @phba: pointer to lpfc hba data structure. 3700 * 3701 * This routine first calculates the sizes of the current els and allocated 3702 * scsi sgl lists, and then goes through all sgls to updates the physical 3703 * XRIs assigned due to port function reset. During port initialization, the 3704 * current els and allocated scsi sgl lists are 0s. 3705 * 3706 * Return codes 3707 * 0 - successful (for now, it always returns 0) 3708 **/ 3709 int 3710 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3711 { 3712 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3713 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3714 LIST_HEAD(els_sgl_list); 3715 int rc; 3716 3717 /* 3718 * update on pci function's els xri-sgl list 3719 */ 3720 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3721 3722 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3723 /* els xri-sgl expanded */ 3724 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3725 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3726 "3157 ELS xri-sgl count increased from " 3727 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3728 els_xri_cnt); 3729 /* allocate the additional els sgls */ 3730 for (i = 0; i < xri_cnt; i++) { 3731 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3732 GFP_KERNEL); 3733 if (sglq_entry == NULL) { 3734 lpfc_printf_log(phba, KERN_ERR, 3735 LOG_TRACE_EVENT, 3736 "2562 Failure to allocate an " 3737 "ELS sgl entry:%d\n", i); 3738 rc = -ENOMEM; 3739 goto out_free_mem; 3740 } 3741 sglq_entry->buff_type = GEN_BUFF_TYPE; 3742 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3743 &sglq_entry->phys); 3744 if (sglq_entry->virt == NULL) { 3745 kfree(sglq_entry); 3746 lpfc_printf_log(phba, KERN_ERR, 3747 LOG_TRACE_EVENT, 3748 "2563 Failure to allocate an " 3749 "ELS mbuf:%d\n", i); 3750 rc = -ENOMEM; 3751 goto out_free_mem; 3752 } 3753 sglq_entry->sgl = sglq_entry->virt; 3754 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3755 sglq_entry->state = SGL_FREED; 3756 list_add_tail(&sglq_entry->list, &els_sgl_list); 3757 } 3758 spin_lock_irq(&phba->hbalock); 3759 spin_lock(&phba->sli4_hba.sgl_list_lock); 3760 list_splice_init(&els_sgl_list, 3761 &phba->sli4_hba.lpfc_els_sgl_list); 3762 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3763 spin_unlock_irq(&phba->hbalock); 3764 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3765 /* els xri-sgl shrinked */ 3766 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3767 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3768 "3158 ELS xri-sgl count decreased from " 3769 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3770 els_xri_cnt); 3771 spin_lock_irq(&phba->hbalock); 3772 spin_lock(&phba->sli4_hba.sgl_list_lock); 3773 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3774 &els_sgl_list); 3775 /* release extra els sgls from list */ 3776 for (i = 0; i < xri_cnt; i++) { 3777 list_remove_head(&els_sgl_list, 3778 sglq_entry, struct lpfc_sglq, list); 3779 if (sglq_entry) { 3780 __lpfc_mbuf_free(phba, sglq_entry->virt, 3781 sglq_entry->phys); 3782 kfree(sglq_entry); 3783 } 3784 } 3785 list_splice_init(&els_sgl_list, 3786 &phba->sli4_hba.lpfc_els_sgl_list); 3787 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3788 spin_unlock_irq(&phba->hbalock); 3789 } else 3790 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3791 "3163 ELS xri-sgl count unchanged: %d\n", 3792 els_xri_cnt); 3793 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3794 3795 /* update xris to els sgls on the list */ 3796 sglq_entry = NULL; 3797 sglq_entry_next = NULL; 3798 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3799 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3800 lxri = lpfc_sli4_next_xritag(phba); 3801 if (lxri == NO_XRI) { 3802 lpfc_printf_log(phba, KERN_ERR, 3803 LOG_TRACE_EVENT, 3804 "2400 Failed to allocate xri for " 3805 "ELS sgl\n"); 3806 rc = -ENOMEM; 3807 goto out_free_mem; 3808 } 3809 sglq_entry->sli4_lxritag = lxri; 3810 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3811 } 3812 return 0; 3813 3814 out_free_mem: 3815 lpfc_free_els_sgl_list(phba); 3816 return rc; 3817 } 3818 3819 /** 3820 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3821 * @phba: pointer to lpfc hba data structure. 3822 * 3823 * This routine first calculates the sizes of the current els and allocated 3824 * scsi sgl lists, and then goes through all sgls to updates the physical 3825 * XRIs assigned due to port function reset. During port initialization, the 3826 * current els and allocated scsi sgl lists are 0s. 3827 * 3828 * Return codes 3829 * 0 - successful (for now, it always returns 0) 3830 **/ 3831 int 3832 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3833 { 3834 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3835 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3836 uint16_t nvmet_xri_cnt; 3837 LIST_HEAD(nvmet_sgl_list); 3838 int rc; 3839 3840 /* 3841 * update on pci function's nvmet xri-sgl list 3842 */ 3843 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3844 3845 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3846 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3847 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3848 /* els xri-sgl expanded */ 3849 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3850 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3851 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3852 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3853 /* allocate the additional nvmet sgls */ 3854 for (i = 0; i < xri_cnt; i++) { 3855 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3856 GFP_KERNEL); 3857 if (sglq_entry == NULL) { 3858 lpfc_printf_log(phba, KERN_ERR, 3859 LOG_TRACE_EVENT, 3860 "6303 Failure to allocate an " 3861 "NVMET sgl entry:%d\n", i); 3862 rc = -ENOMEM; 3863 goto out_free_mem; 3864 } 3865 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3866 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3867 &sglq_entry->phys); 3868 if (sglq_entry->virt == NULL) { 3869 kfree(sglq_entry); 3870 lpfc_printf_log(phba, KERN_ERR, 3871 LOG_TRACE_EVENT, 3872 "6304 Failure to allocate an " 3873 "NVMET buf:%d\n", i); 3874 rc = -ENOMEM; 3875 goto out_free_mem; 3876 } 3877 sglq_entry->sgl = sglq_entry->virt; 3878 memset(sglq_entry->sgl, 0, 3879 phba->cfg_sg_dma_buf_size); 3880 sglq_entry->state = SGL_FREED; 3881 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3882 } 3883 spin_lock_irq(&phba->hbalock); 3884 spin_lock(&phba->sli4_hba.sgl_list_lock); 3885 list_splice_init(&nvmet_sgl_list, 3886 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3887 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3888 spin_unlock_irq(&phba->hbalock); 3889 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3890 /* nvmet xri-sgl shrunk */ 3891 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3892 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3893 "6305 NVMET xri-sgl count decreased from " 3894 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3895 nvmet_xri_cnt); 3896 spin_lock_irq(&phba->hbalock); 3897 spin_lock(&phba->sli4_hba.sgl_list_lock); 3898 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3899 &nvmet_sgl_list); 3900 /* release extra nvmet sgls from list */ 3901 for (i = 0; i < xri_cnt; i++) { 3902 list_remove_head(&nvmet_sgl_list, 3903 sglq_entry, struct lpfc_sglq, list); 3904 if (sglq_entry) { 3905 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3906 sglq_entry->phys); 3907 kfree(sglq_entry); 3908 } 3909 } 3910 list_splice_init(&nvmet_sgl_list, 3911 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3912 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3913 spin_unlock_irq(&phba->hbalock); 3914 } else 3915 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3916 "6306 NVMET xri-sgl count unchanged: %d\n", 3917 nvmet_xri_cnt); 3918 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3919 3920 /* update xris to nvmet sgls on the list */ 3921 sglq_entry = NULL; 3922 sglq_entry_next = NULL; 3923 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3924 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3925 lxri = lpfc_sli4_next_xritag(phba); 3926 if (lxri == NO_XRI) { 3927 lpfc_printf_log(phba, KERN_ERR, 3928 LOG_TRACE_EVENT, 3929 "6307 Failed to allocate xri for " 3930 "NVMET sgl\n"); 3931 rc = -ENOMEM; 3932 goto out_free_mem; 3933 } 3934 sglq_entry->sli4_lxritag = lxri; 3935 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3936 } 3937 return 0; 3938 3939 out_free_mem: 3940 lpfc_free_nvmet_sgl_list(phba); 3941 return rc; 3942 } 3943 3944 int 3945 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3946 { 3947 LIST_HEAD(blist); 3948 struct lpfc_sli4_hdw_queue *qp; 3949 struct lpfc_io_buf *lpfc_cmd; 3950 struct lpfc_io_buf *iobufp, *prev_iobufp; 3951 int idx, cnt, xri, inserted; 3952 3953 cnt = 0; 3954 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3955 qp = &phba->sli4_hba.hdwq[idx]; 3956 spin_lock_irq(&qp->io_buf_list_get_lock); 3957 spin_lock(&qp->io_buf_list_put_lock); 3958 3959 /* Take everything off the get and put lists */ 3960 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3961 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3962 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3963 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3964 cnt += qp->get_io_bufs + qp->put_io_bufs; 3965 qp->get_io_bufs = 0; 3966 qp->put_io_bufs = 0; 3967 qp->total_io_bufs = 0; 3968 spin_unlock(&qp->io_buf_list_put_lock); 3969 spin_unlock_irq(&qp->io_buf_list_get_lock); 3970 } 3971 3972 /* 3973 * Take IO buffers off blist and put on cbuf sorted by XRI. 3974 * This is because POST_SGL takes a sequential range of XRIs 3975 * to post to the firmware. 3976 */ 3977 for (idx = 0; idx < cnt; idx++) { 3978 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3979 if (!lpfc_cmd) 3980 return cnt; 3981 if (idx == 0) { 3982 list_add_tail(&lpfc_cmd->list, cbuf); 3983 continue; 3984 } 3985 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3986 inserted = 0; 3987 prev_iobufp = NULL; 3988 list_for_each_entry(iobufp, cbuf, list) { 3989 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3990 if (prev_iobufp) 3991 list_add(&lpfc_cmd->list, 3992 &prev_iobufp->list); 3993 else 3994 list_add(&lpfc_cmd->list, cbuf); 3995 inserted = 1; 3996 break; 3997 } 3998 prev_iobufp = iobufp; 3999 } 4000 if (!inserted) 4001 list_add_tail(&lpfc_cmd->list, cbuf); 4002 } 4003 return cnt; 4004 } 4005 4006 int 4007 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4008 { 4009 struct lpfc_sli4_hdw_queue *qp; 4010 struct lpfc_io_buf *lpfc_cmd; 4011 int idx, cnt; 4012 4013 qp = phba->sli4_hba.hdwq; 4014 cnt = 0; 4015 while (!list_empty(cbuf)) { 4016 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4017 list_remove_head(cbuf, lpfc_cmd, 4018 struct lpfc_io_buf, list); 4019 if (!lpfc_cmd) 4020 return cnt; 4021 cnt++; 4022 qp = &phba->sli4_hba.hdwq[idx]; 4023 lpfc_cmd->hdwq_no = idx; 4024 lpfc_cmd->hdwq = qp; 4025 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 4026 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 4027 spin_lock(&qp->io_buf_list_put_lock); 4028 list_add_tail(&lpfc_cmd->list, 4029 &qp->lpfc_io_buf_list_put); 4030 qp->put_io_bufs++; 4031 qp->total_io_bufs++; 4032 spin_unlock(&qp->io_buf_list_put_lock); 4033 } 4034 } 4035 return cnt; 4036 } 4037 4038 /** 4039 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4040 * @phba: pointer to lpfc hba data structure. 4041 * 4042 * This routine first calculates the sizes of the current els and allocated 4043 * scsi sgl lists, and then goes through all sgls to updates the physical 4044 * XRIs assigned due to port function reset. During port initialization, the 4045 * current els and allocated scsi sgl lists are 0s. 4046 * 4047 * Return codes 4048 * 0 - successful (for now, it always returns 0) 4049 **/ 4050 int 4051 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4052 { 4053 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4054 uint16_t i, lxri, els_xri_cnt; 4055 uint16_t io_xri_cnt, io_xri_max; 4056 LIST_HEAD(io_sgl_list); 4057 int rc, cnt; 4058 4059 /* 4060 * update on pci function's allocated nvme xri-sgl list 4061 */ 4062 4063 /* maximum number of xris available for nvme buffers */ 4064 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4065 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4066 phba->sli4_hba.io_xri_max = io_xri_max; 4067 4068 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4069 "6074 Current allocated XRI sgl count:%d, " 4070 "maximum XRI count:%d\n", 4071 phba->sli4_hba.io_xri_cnt, 4072 phba->sli4_hba.io_xri_max); 4073 4074 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4075 4076 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4077 /* max nvme xri shrunk below the allocated nvme buffers */ 4078 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4079 phba->sli4_hba.io_xri_max; 4080 /* release the extra allocated nvme buffers */ 4081 for (i = 0; i < io_xri_cnt; i++) { 4082 list_remove_head(&io_sgl_list, lpfc_ncmd, 4083 struct lpfc_io_buf, list); 4084 if (lpfc_ncmd) { 4085 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4086 lpfc_ncmd->data, 4087 lpfc_ncmd->dma_handle); 4088 kfree(lpfc_ncmd); 4089 } 4090 } 4091 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4092 } 4093 4094 /* update xris associated to remaining allocated nvme buffers */ 4095 lpfc_ncmd = NULL; 4096 lpfc_ncmd_next = NULL; 4097 phba->sli4_hba.io_xri_cnt = cnt; 4098 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4099 &io_sgl_list, list) { 4100 lxri = lpfc_sli4_next_xritag(phba); 4101 if (lxri == NO_XRI) { 4102 lpfc_printf_log(phba, KERN_ERR, 4103 LOG_TRACE_EVENT, 4104 "6075 Failed to allocate xri for " 4105 "nvme buffer\n"); 4106 rc = -ENOMEM; 4107 goto out_free_mem; 4108 } 4109 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4110 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4111 } 4112 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4113 return 0; 4114 4115 out_free_mem: 4116 lpfc_io_free(phba); 4117 return rc; 4118 } 4119 4120 /** 4121 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4122 * @phba: Pointer to lpfc hba data structure. 4123 * @num_to_alloc: The requested number of buffers to allocate. 4124 * 4125 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4126 * the nvme buffer contains all the necessary information needed to initiate 4127 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4128 * them on a list, it post them to the port by using SGL block post. 4129 * 4130 * Return codes: 4131 * int - number of IO buffers that were allocated and posted. 4132 * 0 = failure, less than num_to_alloc is a partial failure. 4133 **/ 4134 int 4135 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4136 { 4137 struct lpfc_io_buf *lpfc_ncmd; 4138 struct lpfc_iocbq *pwqeq; 4139 uint16_t iotag, lxri = 0; 4140 int bcnt, num_posted; 4141 LIST_HEAD(prep_nblist); 4142 LIST_HEAD(post_nblist); 4143 LIST_HEAD(nvme_nblist); 4144 4145 phba->sli4_hba.io_xri_cnt = 0; 4146 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4147 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4148 if (!lpfc_ncmd) 4149 break; 4150 /* 4151 * Get memory from the pci pool to map the virt space to 4152 * pci bus space for an I/O. The DMA buffer includes the 4153 * number of SGE's necessary to support the sg_tablesize. 4154 */ 4155 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4156 GFP_KERNEL, 4157 &lpfc_ncmd->dma_handle); 4158 if (!lpfc_ncmd->data) { 4159 kfree(lpfc_ncmd); 4160 break; 4161 } 4162 4163 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4164 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4165 } else { 4166 /* 4167 * 4K Page alignment is CRITICAL to BlockGuard, double 4168 * check to be sure. 4169 */ 4170 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4171 (((unsigned long)(lpfc_ncmd->data) & 4172 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4173 lpfc_printf_log(phba, KERN_ERR, 4174 LOG_TRACE_EVENT, 4175 "3369 Memory alignment err: " 4176 "addr=%lx\n", 4177 (unsigned long)lpfc_ncmd->data); 4178 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4179 lpfc_ncmd->data, 4180 lpfc_ncmd->dma_handle); 4181 kfree(lpfc_ncmd); 4182 break; 4183 } 4184 } 4185 4186 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4187 4188 lxri = lpfc_sli4_next_xritag(phba); 4189 if (lxri == NO_XRI) { 4190 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4191 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4192 kfree(lpfc_ncmd); 4193 break; 4194 } 4195 pwqeq = &lpfc_ncmd->cur_iocbq; 4196 4197 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4198 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4199 if (iotag == 0) { 4200 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4201 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4202 kfree(lpfc_ncmd); 4203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4204 "6121 Failed to allocate IOTAG for" 4205 " XRI:0x%x\n", lxri); 4206 lpfc_sli4_free_xri(phba, lxri); 4207 break; 4208 } 4209 pwqeq->sli4_lxritag = lxri; 4210 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4211 pwqeq->context1 = lpfc_ncmd; 4212 4213 /* Initialize local short-hand pointers. */ 4214 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4215 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4216 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4217 spin_lock_init(&lpfc_ncmd->buf_lock); 4218 4219 /* add the nvme buffer to a post list */ 4220 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4221 phba->sli4_hba.io_xri_cnt++; 4222 } 4223 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4224 "6114 Allocate %d out of %d requested new NVME " 4225 "buffers\n", bcnt, num_to_alloc); 4226 4227 /* post the list of nvme buffer sgls to port if available */ 4228 if (!list_empty(&post_nblist)) 4229 num_posted = lpfc_sli4_post_io_sgl_list( 4230 phba, &post_nblist, bcnt); 4231 else 4232 num_posted = 0; 4233 4234 return num_posted; 4235 } 4236 4237 static uint64_t 4238 lpfc_get_wwpn(struct lpfc_hba *phba) 4239 { 4240 uint64_t wwn; 4241 int rc; 4242 LPFC_MBOXQ_t *mboxq; 4243 MAILBOX_t *mb; 4244 4245 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4246 GFP_KERNEL); 4247 if (!mboxq) 4248 return (uint64_t)-1; 4249 4250 /* First get WWN of HBA instance */ 4251 lpfc_read_nv(phba, mboxq); 4252 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4253 if (rc != MBX_SUCCESS) { 4254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4255 "6019 Mailbox failed , mbxCmd x%x " 4256 "READ_NV, mbxStatus x%x\n", 4257 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4258 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4259 mempool_free(mboxq, phba->mbox_mem_pool); 4260 return (uint64_t) -1; 4261 } 4262 mb = &mboxq->u.mb; 4263 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4264 /* wwn is WWPN of HBA instance */ 4265 mempool_free(mboxq, phba->mbox_mem_pool); 4266 if (phba->sli_rev == LPFC_SLI_REV4) 4267 return be64_to_cpu(wwn); 4268 else 4269 return rol64(wwn, 32); 4270 } 4271 4272 /** 4273 * lpfc_create_port - Create an FC port 4274 * @phba: pointer to lpfc hba data structure. 4275 * @instance: a unique integer ID to this FC port. 4276 * @dev: pointer to the device data structure. 4277 * 4278 * This routine creates a FC port for the upper layer protocol. The FC port 4279 * can be created on top of either a physical port or a virtual port provided 4280 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4281 * and associates the FC port created before adding the shost into the SCSI 4282 * layer. 4283 * 4284 * Return codes 4285 * @vport - pointer to the virtual N_Port data structure. 4286 * NULL - port create failed. 4287 **/ 4288 struct lpfc_vport * 4289 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4290 { 4291 struct lpfc_vport *vport; 4292 struct Scsi_Host *shost = NULL; 4293 struct scsi_host_template *template; 4294 int error = 0; 4295 int i; 4296 uint64_t wwn; 4297 bool use_no_reset_hba = false; 4298 int rc; 4299 4300 if (lpfc_no_hba_reset_cnt) { 4301 if (phba->sli_rev < LPFC_SLI_REV4 && 4302 dev == &phba->pcidev->dev) { 4303 /* Reset the port first */ 4304 lpfc_sli_brdrestart(phba); 4305 rc = lpfc_sli_chipset_init(phba); 4306 if (rc) 4307 return NULL; 4308 } 4309 wwn = lpfc_get_wwpn(phba); 4310 } 4311 4312 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4313 if (wwn == lpfc_no_hba_reset[i]) { 4314 lpfc_printf_log(phba, KERN_ERR, 4315 LOG_TRACE_EVENT, 4316 "6020 Setting use_no_reset port=%llx\n", 4317 wwn); 4318 use_no_reset_hba = true; 4319 break; 4320 } 4321 } 4322 4323 /* Seed template for SCSI host registration */ 4324 if (dev == &phba->pcidev->dev) { 4325 template = &phba->port_template; 4326 4327 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4328 /* Seed physical port template */ 4329 memcpy(template, &lpfc_template, sizeof(*template)); 4330 4331 if (use_no_reset_hba) 4332 /* template is for a no reset SCSI Host */ 4333 template->eh_host_reset_handler = NULL; 4334 4335 /* Template for all vports this physical port creates */ 4336 memcpy(&phba->vport_template, &lpfc_template, 4337 sizeof(*template)); 4338 phba->vport_template.shost_attrs = lpfc_vport_attrs; 4339 phba->vport_template.eh_bus_reset_handler = NULL; 4340 phba->vport_template.eh_host_reset_handler = NULL; 4341 phba->vport_template.vendor_id = 0; 4342 4343 /* Initialize the host templates with updated value */ 4344 if (phba->sli_rev == LPFC_SLI_REV4) { 4345 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4346 phba->vport_template.sg_tablesize = 4347 phba->cfg_scsi_seg_cnt; 4348 } else { 4349 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4350 phba->vport_template.sg_tablesize = 4351 phba->cfg_sg_seg_cnt; 4352 } 4353 4354 } else { 4355 /* NVMET is for physical port only */ 4356 memcpy(template, &lpfc_template_nvme, 4357 sizeof(*template)); 4358 } 4359 } else { 4360 template = &phba->vport_template; 4361 } 4362 4363 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4364 if (!shost) 4365 goto out; 4366 4367 vport = (struct lpfc_vport *) shost->hostdata; 4368 vport->phba = phba; 4369 vport->load_flag |= FC_LOADING; 4370 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4371 vport->fc_rscn_flush = 0; 4372 lpfc_get_vport_cfgparam(vport); 4373 4374 /* Adjust value in vport */ 4375 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4376 4377 shost->unique_id = instance; 4378 shost->max_id = LPFC_MAX_TARGET; 4379 shost->max_lun = vport->cfg_max_luns; 4380 shost->this_id = -1; 4381 shost->max_cmd_len = 16; 4382 4383 if (phba->sli_rev == LPFC_SLI_REV4) { 4384 if (!phba->cfg_fcp_mq_threshold || 4385 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4386 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4387 4388 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4389 phba->cfg_fcp_mq_threshold); 4390 4391 shost->dma_boundary = 4392 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4393 4394 if (phba->cfg_xpsgl && !phba->nvmet_support) 4395 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4396 else 4397 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4398 } else 4399 /* SLI-3 has a limited number of hardware queues (3), 4400 * thus there is only one for FCP processing. 4401 */ 4402 shost->nr_hw_queues = 1; 4403 4404 /* 4405 * Set initial can_queue value since 0 is no longer supported and 4406 * scsi_add_host will fail. This will be adjusted later based on the 4407 * max xri value determined in hba setup. 4408 */ 4409 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4410 if (dev != &phba->pcidev->dev) { 4411 shost->transportt = lpfc_vport_transport_template; 4412 vport->port_type = LPFC_NPIV_PORT; 4413 } else { 4414 shost->transportt = lpfc_transport_template; 4415 vport->port_type = LPFC_PHYSICAL_PORT; 4416 } 4417 4418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4419 "9081 CreatePort TMPLATE type %x TBLsize %d " 4420 "SEGcnt %d/%d\n", 4421 vport->port_type, shost->sg_tablesize, 4422 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4423 4424 /* Initialize all internally managed lists. */ 4425 INIT_LIST_HEAD(&vport->fc_nodes); 4426 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4427 spin_lock_init(&vport->work_port_lock); 4428 4429 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4430 4431 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4432 4433 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4434 4435 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4436 lpfc_setup_bg(phba, shost); 4437 4438 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4439 if (error) 4440 goto out_put_shost; 4441 4442 spin_lock_irq(&phba->port_list_lock); 4443 list_add_tail(&vport->listentry, &phba->port_list); 4444 spin_unlock_irq(&phba->port_list_lock); 4445 return vport; 4446 4447 out_put_shost: 4448 scsi_host_put(shost); 4449 out: 4450 return NULL; 4451 } 4452 4453 /** 4454 * destroy_port - destroy an FC port 4455 * @vport: pointer to an lpfc virtual N_Port data structure. 4456 * 4457 * This routine destroys a FC port from the upper layer protocol. All the 4458 * resources associated with the port are released. 4459 **/ 4460 void 4461 destroy_port(struct lpfc_vport *vport) 4462 { 4463 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4464 struct lpfc_hba *phba = vport->phba; 4465 4466 lpfc_debugfs_terminate(vport); 4467 fc_remove_host(shost); 4468 scsi_remove_host(shost); 4469 4470 spin_lock_irq(&phba->port_list_lock); 4471 list_del_init(&vport->listentry); 4472 spin_unlock_irq(&phba->port_list_lock); 4473 4474 lpfc_cleanup(vport); 4475 return; 4476 } 4477 4478 /** 4479 * lpfc_get_instance - Get a unique integer ID 4480 * 4481 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4482 * uses the kernel idr facility to perform the task. 4483 * 4484 * Return codes: 4485 * instance - a unique integer ID allocated as the new instance. 4486 * -1 - lpfc get instance failed. 4487 **/ 4488 int 4489 lpfc_get_instance(void) 4490 { 4491 int ret; 4492 4493 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4494 return ret < 0 ? -1 : ret; 4495 } 4496 4497 /** 4498 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4499 * @shost: pointer to SCSI host data structure. 4500 * @time: elapsed time of the scan in jiffies. 4501 * 4502 * This routine is called by the SCSI layer with a SCSI host to determine 4503 * whether the scan host is finished. 4504 * 4505 * Note: there is no scan_start function as adapter initialization will have 4506 * asynchronously kicked off the link initialization. 4507 * 4508 * Return codes 4509 * 0 - SCSI host scan is not over yet. 4510 * 1 - SCSI host scan is over. 4511 **/ 4512 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4513 { 4514 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4515 struct lpfc_hba *phba = vport->phba; 4516 int stat = 0; 4517 4518 spin_lock_irq(shost->host_lock); 4519 4520 if (vport->load_flag & FC_UNLOADING) { 4521 stat = 1; 4522 goto finished; 4523 } 4524 if (time >= msecs_to_jiffies(30 * 1000)) { 4525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4526 "0461 Scanning longer than 30 " 4527 "seconds. Continuing initialization\n"); 4528 stat = 1; 4529 goto finished; 4530 } 4531 if (time >= msecs_to_jiffies(15 * 1000) && 4532 phba->link_state <= LPFC_LINK_DOWN) { 4533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4534 "0465 Link down longer than 15 " 4535 "seconds. Continuing initialization\n"); 4536 stat = 1; 4537 goto finished; 4538 } 4539 4540 if (vport->port_state != LPFC_VPORT_READY) 4541 goto finished; 4542 if (vport->num_disc_nodes || vport->fc_prli_sent) 4543 goto finished; 4544 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4545 goto finished; 4546 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4547 goto finished; 4548 4549 stat = 1; 4550 4551 finished: 4552 spin_unlock_irq(shost->host_lock); 4553 return stat; 4554 } 4555 4556 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4557 { 4558 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4559 struct lpfc_hba *phba = vport->phba; 4560 4561 fc_host_supported_speeds(shost) = 0; 4562 /* 4563 * Avoid reporting supported link speed for FCoE as it can't be 4564 * controlled via FCoE. 4565 */ 4566 if (phba->hba_flag & HBA_FCOE_MODE) 4567 return; 4568 4569 if (phba->lmt & LMT_128Gb) 4570 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4571 if (phba->lmt & LMT_64Gb) 4572 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4573 if (phba->lmt & LMT_32Gb) 4574 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4575 if (phba->lmt & LMT_16Gb) 4576 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4577 if (phba->lmt & LMT_10Gb) 4578 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4579 if (phba->lmt & LMT_8Gb) 4580 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4581 if (phba->lmt & LMT_4Gb) 4582 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4583 if (phba->lmt & LMT_2Gb) 4584 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4585 if (phba->lmt & LMT_1Gb) 4586 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4587 } 4588 4589 /** 4590 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4591 * @shost: pointer to SCSI host data structure. 4592 * 4593 * This routine initializes a given SCSI host attributes on a FC port. The 4594 * SCSI host can be either on top of a physical port or a virtual port. 4595 **/ 4596 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4597 { 4598 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4599 struct lpfc_hba *phba = vport->phba; 4600 /* 4601 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4602 */ 4603 4604 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4605 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4606 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4607 4608 memset(fc_host_supported_fc4s(shost), 0, 4609 sizeof(fc_host_supported_fc4s(shost))); 4610 fc_host_supported_fc4s(shost)[2] = 1; 4611 fc_host_supported_fc4s(shost)[7] = 1; 4612 4613 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4614 sizeof fc_host_symbolic_name(shost)); 4615 4616 lpfc_host_supported_speeds_set(shost); 4617 4618 fc_host_maxframe_size(shost) = 4619 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4620 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4621 4622 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4623 4624 /* This value is also unchanging */ 4625 memset(fc_host_active_fc4s(shost), 0, 4626 sizeof(fc_host_active_fc4s(shost))); 4627 fc_host_active_fc4s(shost)[2] = 1; 4628 fc_host_active_fc4s(shost)[7] = 1; 4629 4630 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4631 spin_lock_irq(shost->host_lock); 4632 vport->load_flag &= ~FC_LOADING; 4633 spin_unlock_irq(shost->host_lock); 4634 } 4635 4636 /** 4637 * lpfc_stop_port_s3 - Stop SLI3 device port 4638 * @phba: pointer to lpfc hba data structure. 4639 * 4640 * This routine is invoked to stop an SLI3 device port, it stops the device 4641 * from generating interrupts and stops the device driver's timers for the 4642 * device. 4643 **/ 4644 static void 4645 lpfc_stop_port_s3(struct lpfc_hba *phba) 4646 { 4647 /* Clear all interrupt enable conditions */ 4648 writel(0, phba->HCregaddr); 4649 readl(phba->HCregaddr); /* flush */ 4650 /* Clear all pending interrupts */ 4651 writel(0xffffffff, phba->HAregaddr); 4652 readl(phba->HAregaddr); /* flush */ 4653 4654 /* Reset some HBA SLI setup states */ 4655 lpfc_stop_hba_timers(phba); 4656 phba->pport->work_port_events = 0; 4657 } 4658 4659 /** 4660 * lpfc_stop_port_s4 - Stop SLI4 device port 4661 * @phba: pointer to lpfc hba data structure. 4662 * 4663 * This routine is invoked to stop an SLI4 device port, it stops the device 4664 * from generating interrupts and stops the device driver's timers for the 4665 * device. 4666 **/ 4667 static void 4668 lpfc_stop_port_s4(struct lpfc_hba *phba) 4669 { 4670 /* Reset some HBA SLI4 setup states */ 4671 lpfc_stop_hba_timers(phba); 4672 if (phba->pport) 4673 phba->pport->work_port_events = 0; 4674 phba->sli4_hba.intr_enable = 0; 4675 } 4676 4677 /** 4678 * lpfc_stop_port - Wrapper function for stopping hba port 4679 * @phba: Pointer to HBA context object. 4680 * 4681 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4682 * the API jump table function pointer from the lpfc_hba struct. 4683 **/ 4684 void 4685 lpfc_stop_port(struct lpfc_hba *phba) 4686 { 4687 phba->lpfc_stop_port(phba); 4688 4689 if (phba->wq) 4690 flush_workqueue(phba->wq); 4691 } 4692 4693 /** 4694 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4695 * @phba: Pointer to hba for which this call is being executed. 4696 * 4697 * This routine starts the timer waiting for the FCF rediscovery to complete. 4698 **/ 4699 void 4700 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4701 { 4702 unsigned long fcf_redisc_wait_tmo = 4703 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4704 /* Start fcf rediscovery wait period timer */ 4705 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4706 spin_lock_irq(&phba->hbalock); 4707 /* Allow action to new fcf asynchronous event */ 4708 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4709 /* Mark the FCF rediscovery pending state */ 4710 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4711 spin_unlock_irq(&phba->hbalock); 4712 } 4713 4714 /** 4715 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4716 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 4717 * 4718 * This routine is invoked when waiting for FCF table rediscover has been 4719 * timed out. If new FCF record(s) has (have) been discovered during the 4720 * wait period, a new FCF event shall be added to the FCOE async event 4721 * list, and then worker thread shall be waked up for processing from the 4722 * worker thread context. 4723 **/ 4724 static void 4725 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4726 { 4727 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4728 4729 /* Don't send FCF rediscovery event if timer cancelled */ 4730 spin_lock_irq(&phba->hbalock); 4731 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4732 spin_unlock_irq(&phba->hbalock); 4733 return; 4734 } 4735 /* Clear FCF rediscovery timer pending flag */ 4736 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4737 /* FCF rediscovery event to worker thread */ 4738 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4739 spin_unlock_irq(&phba->hbalock); 4740 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4741 "2776 FCF rediscover quiescent timer expired\n"); 4742 /* wake up worker thread */ 4743 lpfc_worker_wake_up(phba); 4744 } 4745 4746 /** 4747 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4748 * @phba: pointer to lpfc hba data structure. 4749 * @acqe_link: pointer to the async link completion queue entry. 4750 * 4751 * This routine is to parse the SLI4 link-attention link fault code. 4752 **/ 4753 static void 4754 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4755 struct lpfc_acqe_link *acqe_link) 4756 { 4757 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4758 case LPFC_ASYNC_LINK_FAULT_NONE: 4759 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4760 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4761 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4762 break; 4763 default: 4764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4765 "0398 Unknown link fault code: x%x\n", 4766 bf_get(lpfc_acqe_link_fault, acqe_link)); 4767 break; 4768 } 4769 } 4770 4771 /** 4772 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4773 * @phba: pointer to lpfc hba data structure. 4774 * @acqe_link: pointer to the async link completion queue entry. 4775 * 4776 * This routine is to parse the SLI4 link attention type and translate it 4777 * into the base driver's link attention type coding. 4778 * 4779 * Return: Link attention type in terms of base driver's coding. 4780 **/ 4781 static uint8_t 4782 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4783 struct lpfc_acqe_link *acqe_link) 4784 { 4785 uint8_t att_type; 4786 4787 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4788 case LPFC_ASYNC_LINK_STATUS_DOWN: 4789 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4790 att_type = LPFC_ATT_LINK_DOWN; 4791 break; 4792 case LPFC_ASYNC_LINK_STATUS_UP: 4793 /* Ignore physical link up events - wait for logical link up */ 4794 att_type = LPFC_ATT_RESERVED; 4795 break; 4796 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4797 att_type = LPFC_ATT_LINK_UP; 4798 break; 4799 default: 4800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4801 "0399 Invalid link attention type: x%x\n", 4802 bf_get(lpfc_acqe_link_status, acqe_link)); 4803 att_type = LPFC_ATT_RESERVED; 4804 break; 4805 } 4806 return att_type; 4807 } 4808 4809 /** 4810 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4811 * @phba: pointer to lpfc hba data structure. 4812 * 4813 * This routine is to get an SLI3 FC port's link speed in Mbps. 4814 * 4815 * Return: link speed in terms of Mbps. 4816 **/ 4817 uint32_t 4818 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4819 { 4820 uint32_t link_speed; 4821 4822 if (!lpfc_is_link_up(phba)) 4823 return 0; 4824 4825 if (phba->sli_rev <= LPFC_SLI_REV3) { 4826 switch (phba->fc_linkspeed) { 4827 case LPFC_LINK_SPEED_1GHZ: 4828 link_speed = 1000; 4829 break; 4830 case LPFC_LINK_SPEED_2GHZ: 4831 link_speed = 2000; 4832 break; 4833 case LPFC_LINK_SPEED_4GHZ: 4834 link_speed = 4000; 4835 break; 4836 case LPFC_LINK_SPEED_8GHZ: 4837 link_speed = 8000; 4838 break; 4839 case LPFC_LINK_SPEED_10GHZ: 4840 link_speed = 10000; 4841 break; 4842 case LPFC_LINK_SPEED_16GHZ: 4843 link_speed = 16000; 4844 break; 4845 default: 4846 link_speed = 0; 4847 } 4848 } else { 4849 if (phba->sli4_hba.link_state.logical_speed) 4850 link_speed = 4851 phba->sli4_hba.link_state.logical_speed; 4852 else 4853 link_speed = phba->sli4_hba.link_state.speed; 4854 } 4855 return link_speed; 4856 } 4857 4858 /** 4859 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4860 * @phba: pointer to lpfc hba data structure. 4861 * @evt_code: asynchronous event code. 4862 * @speed_code: asynchronous event link speed code. 4863 * 4864 * This routine is to parse the giving SLI4 async event link speed code into 4865 * value of Mbps for the link speed. 4866 * 4867 * Return: link speed in terms of Mbps. 4868 **/ 4869 static uint32_t 4870 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4871 uint8_t speed_code) 4872 { 4873 uint32_t port_speed; 4874 4875 switch (evt_code) { 4876 case LPFC_TRAILER_CODE_LINK: 4877 switch (speed_code) { 4878 case LPFC_ASYNC_LINK_SPEED_ZERO: 4879 port_speed = 0; 4880 break; 4881 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4882 port_speed = 10; 4883 break; 4884 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4885 port_speed = 100; 4886 break; 4887 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4888 port_speed = 1000; 4889 break; 4890 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4891 port_speed = 10000; 4892 break; 4893 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4894 port_speed = 20000; 4895 break; 4896 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4897 port_speed = 25000; 4898 break; 4899 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4900 port_speed = 40000; 4901 break; 4902 case LPFC_ASYNC_LINK_SPEED_100GBPS: 4903 port_speed = 100000; 4904 break; 4905 default: 4906 port_speed = 0; 4907 } 4908 break; 4909 case LPFC_TRAILER_CODE_FC: 4910 switch (speed_code) { 4911 case LPFC_FC_LA_SPEED_UNKNOWN: 4912 port_speed = 0; 4913 break; 4914 case LPFC_FC_LA_SPEED_1G: 4915 port_speed = 1000; 4916 break; 4917 case LPFC_FC_LA_SPEED_2G: 4918 port_speed = 2000; 4919 break; 4920 case LPFC_FC_LA_SPEED_4G: 4921 port_speed = 4000; 4922 break; 4923 case LPFC_FC_LA_SPEED_8G: 4924 port_speed = 8000; 4925 break; 4926 case LPFC_FC_LA_SPEED_10G: 4927 port_speed = 10000; 4928 break; 4929 case LPFC_FC_LA_SPEED_16G: 4930 port_speed = 16000; 4931 break; 4932 case LPFC_FC_LA_SPEED_32G: 4933 port_speed = 32000; 4934 break; 4935 case LPFC_FC_LA_SPEED_64G: 4936 port_speed = 64000; 4937 break; 4938 case LPFC_FC_LA_SPEED_128G: 4939 port_speed = 128000; 4940 break; 4941 default: 4942 port_speed = 0; 4943 } 4944 break; 4945 default: 4946 port_speed = 0; 4947 } 4948 return port_speed; 4949 } 4950 4951 /** 4952 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4953 * @phba: pointer to lpfc hba data structure. 4954 * @acqe_link: pointer to the async link completion queue entry. 4955 * 4956 * This routine is to handle the SLI4 asynchronous FCoE link event. 4957 **/ 4958 static void 4959 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4960 struct lpfc_acqe_link *acqe_link) 4961 { 4962 struct lpfc_dmabuf *mp; 4963 LPFC_MBOXQ_t *pmb; 4964 MAILBOX_t *mb; 4965 struct lpfc_mbx_read_top *la; 4966 uint8_t att_type; 4967 int rc; 4968 4969 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4970 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4971 return; 4972 phba->fcoe_eventtag = acqe_link->event_tag; 4973 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4974 if (!pmb) { 4975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4976 "0395 The mboxq allocation failed\n"); 4977 return; 4978 } 4979 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4980 if (!mp) { 4981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4982 "0396 The lpfc_dmabuf allocation failed\n"); 4983 goto out_free_pmb; 4984 } 4985 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4986 if (!mp->virt) { 4987 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4988 "0397 The mbuf allocation failed\n"); 4989 goto out_free_dmabuf; 4990 } 4991 4992 /* Cleanup any outstanding ELS commands */ 4993 lpfc_els_flush_all_cmd(phba); 4994 4995 /* Block ELS IOCBs until we have done process link event */ 4996 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4997 4998 /* Update link event statistics */ 4999 phba->sli.slistat.link_event++; 5000 5001 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5002 lpfc_read_topology(phba, pmb, mp); 5003 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5004 pmb->vport = phba->pport; 5005 5006 /* Keep the link status for extra SLI4 state machine reference */ 5007 phba->sli4_hba.link_state.speed = 5008 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5009 bf_get(lpfc_acqe_link_speed, acqe_link)); 5010 phba->sli4_hba.link_state.duplex = 5011 bf_get(lpfc_acqe_link_duplex, acqe_link); 5012 phba->sli4_hba.link_state.status = 5013 bf_get(lpfc_acqe_link_status, acqe_link); 5014 phba->sli4_hba.link_state.type = 5015 bf_get(lpfc_acqe_link_type, acqe_link); 5016 phba->sli4_hba.link_state.number = 5017 bf_get(lpfc_acqe_link_number, acqe_link); 5018 phba->sli4_hba.link_state.fault = 5019 bf_get(lpfc_acqe_link_fault, acqe_link); 5020 phba->sli4_hba.link_state.logical_speed = 5021 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5022 5023 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5024 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5025 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5026 "Logical speed:%dMbps Fault:%d\n", 5027 phba->sli4_hba.link_state.speed, 5028 phba->sli4_hba.link_state.topology, 5029 phba->sli4_hba.link_state.status, 5030 phba->sli4_hba.link_state.type, 5031 phba->sli4_hba.link_state.number, 5032 phba->sli4_hba.link_state.logical_speed, 5033 phba->sli4_hba.link_state.fault); 5034 /* 5035 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5036 * topology info. Note: Optional for non FC-AL ports. 5037 */ 5038 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5039 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5040 if (rc == MBX_NOT_FINISHED) 5041 goto out_free_dmabuf; 5042 return; 5043 } 5044 /* 5045 * For FCoE Mode: fill in all the topology information we need and call 5046 * the READ_TOPOLOGY completion routine to continue without actually 5047 * sending the READ_TOPOLOGY mailbox command to the port. 5048 */ 5049 /* Initialize completion status */ 5050 mb = &pmb->u.mb; 5051 mb->mbxStatus = MBX_SUCCESS; 5052 5053 /* Parse port fault information field */ 5054 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5055 5056 /* Parse and translate link attention fields */ 5057 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5058 la->eventTag = acqe_link->event_tag; 5059 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5060 bf_set(lpfc_mbx_read_top_link_spd, la, 5061 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5062 5063 /* Fake the the following irrelvant fields */ 5064 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5065 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5066 bf_set(lpfc_mbx_read_top_il, la, 0); 5067 bf_set(lpfc_mbx_read_top_pb, la, 0); 5068 bf_set(lpfc_mbx_read_top_fa, la, 0); 5069 bf_set(lpfc_mbx_read_top_mm, la, 0); 5070 5071 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5072 lpfc_mbx_cmpl_read_topology(phba, pmb); 5073 5074 return; 5075 5076 out_free_dmabuf: 5077 kfree(mp); 5078 out_free_pmb: 5079 mempool_free(pmb, phba->mbox_mem_pool); 5080 } 5081 5082 /** 5083 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5084 * topology. 5085 * @phba: pointer to lpfc hba data structure. 5086 * @speed_code: asynchronous event link speed code. 5087 * 5088 * This routine is to parse the giving SLI4 async event link speed code into 5089 * value of Read topology link speed. 5090 * 5091 * Return: link speed in terms of Read topology. 5092 **/ 5093 static uint8_t 5094 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5095 { 5096 uint8_t port_speed; 5097 5098 switch (speed_code) { 5099 case LPFC_FC_LA_SPEED_1G: 5100 port_speed = LPFC_LINK_SPEED_1GHZ; 5101 break; 5102 case LPFC_FC_LA_SPEED_2G: 5103 port_speed = LPFC_LINK_SPEED_2GHZ; 5104 break; 5105 case LPFC_FC_LA_SPEED_4G: 5106 port_speed = LPFC_LINK_SPEED_4GHZ; 5107 break; 5108 case LPFC_FC_LA_SPEED_8G: 5109 port_speed = LPFC_LINK_SPEED_8GHZ; 5110 break; 5111 case LPFC_FC_LA_SPEED_16G: 5112 port_speed = LPFC_LINK_SPEED_16GHZ; 5113 break; 5114 case LPFC_FC_LA_SPEED_32G: 5115 port_speed = LPFC_LINK_SPEED_32GHZ; 5116 break; 5117 case LPFC_FC_LA_SPEED_64G: 5118 port_speed = LPFC_LINK_SPEED_64GHZ; 5119 break; 5120 case LPFC_FC_LA_SPEED_128G: 5121 port_speed = LPFC_LINK_SPEED_128GHZ; 5122 break; 5123 case LPFC_FC_LA_SPEED_256G: 5124 port_speed = LPFC_LINK_SPEED_256GHZ; 5125 break; 5126 default: 5127 port_speed = 0; 5128 break; 5129 } 5130 5131 return port_speed; 5132 } 5133 5134 #define trunk_link_status(__idx)\ 5135 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5136 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5137 "Link up" : "Link down") : "NA" 5138 /* Did port __idx reported an error */ 5139 #define trunk_port_fault(__idx)\ 5140 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5141 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5142 5143 static void 5144 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5145 struct lpfc_acqe_fc_la *acqe_fc) 5146 { 5147 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5148 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5149 5150 phba->sli4_hba.link_state.speed = 5151 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5152 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5153 5154 phba->sli4_hba.link_state.logical_speed = 5155 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5156 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5157 phba->fc_linkspeed = 5158 lpfc_async_link_speed_to_read_top( 5159 phba, 5160 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5161 5162 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5163 phba->trunk_link.link0.state = 5164 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5165 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5166 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5167 } 5168 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5169 phba->trunk_link.link1.state = 5170 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5171 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5172 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5173 } 5174 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5175 phba->trunk_link.link2.state = 5176 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5177 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5178 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5179 } 5180 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5181 phba->trunk_link.link3.state = 5182 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5183 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5184 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5185 } 5186 5187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5188 "2910 Async FC Trunking Event - Speed:%d\n" 5189 "\tLogical speed:%d " 5190 "port0: %s port1: %s port2: %s port3: %s\n", 5191 phba->sli4_hba.link_state.speed, 5192 phba->sli4_hba.link_state.logical_speed, 5193 trunk_link_status(0), trunk_link_status(1), 5194 trunk_link_status(2), trunk_link_status(3)); 5195 5196 if (port_fault) 5197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5198 "3202 trunk error:0x%x (%s) seen on port0:%s " 5199 /* 5200 * SLI-4: We have only 0xA error codes 5201 * defined as of now. print an appropriate 5202 * message in case driver needs to be updated. 5203 */ 5204 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5205 "UNDEFINED. update driver." : trunk_errmsg[err], 5206 trunk_port_fault(0), trunk_port_fault(1), 5207 trunk_port_fault(2), trunk_port_fault(3)); 5208 } 5209 5210 5211 /** 5212 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5213 * @phba: pointer to lpfc hba data structure. 5214 * @acqe_fc: pointer to the async fc completion queue entry. 5215 * 5216 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5217 * that the event was received and then issue a read_topology mailbox command so 5218 * that the rest of the driver will treat it the same as SLI3. 5219 **/ 5220 static void 5221 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5222 { 5223 struct lpfc_dmabuf *mp; 5224 LPFC_MBOXQ_t *pmb; 5225 MAILBOX_t *mb; 5226 struct lpfc_mbx_read_top *la; 5227 int rc; 5228 5229 if (bf_get(lpfc_trailer_type, acqe_fc) != 5230 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5231 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5232 "2895 Non FC link Event detected.(%d)\n", 5233 bf_get(lpfc_trailer_type, acqe_fc)); 5234 return; 5235 } 5236 5237 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5238 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5239 lpfc_update_trunk_link_status(phba, acqe_fc); 5240 return; 5241 } 5242 5243 /* Keep the link status for extra SLI4 state machine reference */ 5244 phba->sli4_hba.link_state.speed = 5245 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5246 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5247 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5248 phba->sli4_hba.link_state.topology = 5249 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5250 phba->sli4_hba.link_state.status = 5251 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5252 phba->sli4_hba.link_state.type = 5253 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5254 phba->sli4_hba.link_state.number = 5255 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5256 phba->sli4_hba.link_state.fault = 5257 bf_get(lpfc_acqe_link_fault, acqe_fc); 5258 5259 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5260 LPFC_FC_LA_TYPE_LINK_DOWN) 5261 phba->sli4_hba.link_state.logical_speed = 0; 5262 else if (!phba->sli4_hba.conf_trunk) 5263 phba->sli4_hba.link_state.logical_speed = 5264 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5265 5266 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5267 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5268 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5269 "%dMbps Fault:%d\n", 5270 phba->sli4_hba.link_state.speed, 5271 phba->sli4_hba.link_state.topology, 5272 phba->sli4_hba.link_state.status, 5273 phba->sli4_hba.link_state.type, 5274 phba->sli4_hba.link_state.number, 5275 phba->sli4_hba.link_state.logical_speed, 5276 phba->sli4_hba.link_state.fault); 5277 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5278 if (!pmb) { 5279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5280 "2897 The mboxq allocation failed\n"); 5281 return; 5282 } 5283 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5284 if (!mp) { 5285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5286 "2898 The lpfc_dmabuf allocation failed\n"); 5287 goto out_free_pmb; 5288 } 5289 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5290 if (!mp->virt) { 5291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5292 "2899 The mbuf allocation failed\n"); 5293 goto out_free_dmabuf; 5294 } 5295 5296 /* Cleanup any outstanding ELS commands */ 5297 lpfc_els_flush_all_cmd(phba); 5298 5299 /* Block ELS IOCBs until we have done process link event */ 5300 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5301 5302 /* Update link event statistics */ 5303 phba->sli.slistat.link_event++; 5304 5305 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5306 lpfc_read_topology(phba, pmb, mp); 5307 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5308 pmb->vport = phba->pport; 5309 5310 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5311 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5312 5313 switch (phba->sli4_hba.link_state.status) { 5314 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5315 phba->link_flag |= LS_MDS_LINK_DOWN; 5316 break; 5317 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5318 phba->link_flag |= LS_MDS_LOOPBACK; 5319 break; 5320 default: 5321 break; 5322 } 5323 5324 /* Initialize completion status */ 5325 mb = &pmb->u.mb; 5326 mb->mbxStatus = MBX_SUCCESS; 5327 5328 /* Parse port fault information field */ 5329 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5330 5331 /* Parse and translate link attention fields */ 5332 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5333 la->eventTag = acqe_fc->event_tag; 5334 5335 if (phba->sli4_hba.link_state.status == 5336 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5337 bf_set(lpfc_mbx_read_top_att_type, la, 5338 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5339 } else { 5340 bf_set(lpfc_mbx_read_top_att_type, la, 5341 LPFC_FC_LA_TYPE_LINK_DOWN); 5342 } 5343 /* Invoke the mailbox command callback function */ 5344 lpfc_mbx_cmpl_read_topology(phba, pmb); 5345 5346 return; 5347 } 5348 5349 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5350 if (rc == MBX_NOT_FINISHED) 5351 goto out_free_dmabuf; 5352 return; 5353 5354 out_free_dmabuf: 5355 kfree(mp); 5356 out_free_pmb: 5357 mempool_free(pmb, phba->mbox_mem_pool); 5358 } 5359 5360 /** 5361 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5362 * @phba: pointer to lpfc hba data structure. 5363 * @acqe_sli: pointer to the async SLI completion queue entry. 5364 * 5365 * This routine is to handle the SLI4 asynchronous SLI events. 5366 **/ 5367 static void 5368 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5369 { 5370 char port_name; 5371 char message[128]; 5372 uint8_t status; 5373 uint8_t evt_type; 5374 uint8_t operational = 0; 5375 struct temp_event temp_event_data; 5376 struct lpfc_acqe_misconfigured_event *misconfigured; 5377 struct Scsi_Host *shost; 5378 struct lpfc_vport **vports; 5379 int rc, i; 5380 5381 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5382 5383 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5384 "2901 Async SLI event - Type:%d, Event Data: x%08x " 5385 "x%08x x%08x x%08x\n", evt_type, 5386 acqe_sli->event_data1, acqe_sli->event_data2, 5387 acqe_sli->reserved, acqe_sli->trailer); 5388 5389 port_name = phba->Port[0]; 5390 if (port_name == 0x00) 5391 port_name = '?'; /* get port name is empty */ 5392 5393 switch (evt_type) { 5394 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5395 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5396 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5397 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5398 5399 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5400 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5401 acqe_sli->event_data1, port_name); 5402 5403 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5404 shost = lpfc_shost_from_vport(phba->pport); 5405 fc_host_post_vendor_event(shost, fc_get_event_number(), 5406 sizeof(temp_event_data), 5407 (char *)&temp_event_data, 5408 SCSI_NL_VID_TYPE_PCI 5409 | PCI_VENDOR_ID_EMULEX); 5410 break; 5411 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5412 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5413 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5414 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5415 5416 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5417 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5418 acqe_sli->event_data1, port_name); 5419 5420 shost = lpfc_shost_from_vport(phba->pport); 5421 fc_host_post_vendor_event(shost, fc_get_event_number(), 5422 sizeof(temp_event_data), 5423 (char *)&temp_event_data, 5424 SCSI_NL_VID_TYPE_PCI 5425 | PCI_VENDOR_ID_EMULEX); 5426 break; 5427 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5428 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5429 &acqe_sli->event_data1; 5430 5431 /* fetch the status for this port */ 5432 switch (phba->sli4_hba.lnk_info.lnk_no) { 5433 case LPFC_LINK_NUMBER_0: 5434 status = bf_get(lpfc_sli_misconfigured_port0_state, 5435 &misconfigured->theEvent); 5436 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5437 &misconfigured->theEvent); 5438 break; 5439 case LPFC_LINK_NUMBER_1: 5440 status = bf_get(lpfc_sli_misconfigured_port1_state, 5441 &misconfigured->theEvent); 5442 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5443 &misconfigured->theEvent); 5444 break; 5445 case LPFC_LINK_NUMBER_2: 5446 status = bf_get(lpfc_sli_misconfigured_port2_state, 5447 &misconfigured->theEvent); 5448 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5449 &misconfigured->theEvent); 5450 break; 5451 case LPFC_LINK_NUMBER_3: 5452 status = bf_get(lpfc_sli_misconfigured_port3_state, 5453 &misconfigured->theEvent); 5454 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5455 &misconfigured->theEvent); 5456 break; 5457 default: 5458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5459 "3296 " 5460 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5461 "event: Invalid link %d", 5462 phba->sli4_hba.lnk_info.lnk_no); 5463 return; 5464 } 5465 5466 /* Skip if optic state unchanged */ 5467 if (phba->sli4_hba.lnk_info.optic_state == status) 5468 return; 5469 5470 switch (status) { 5471 case LPFC_SLI_EVENT_STATUS_VALID: 5472 sprintf(message, "Physical Link is functional"); 5473 break; 5474 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5475 sprintf(message, "Optics faulted/incorrectly " 5476 "installed/not installed - Reseat optics, " 5477 "if issue not resolved, replace."); 5478 break; 5479 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5480 sprintf(message, 5481 "Optics of two types installed - Remove one " 5482 "optic or install matching pair of optics."); 5483 break; 5484 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5485 sprintf(message, "Incompatible optics - Replace with " 5486 "compatible optics for card to function."); 5487 break; 5488 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5489 sprintf(message, "Unqualified optics - Replace with " 5490 "Avago optics for Warranty and Technical " 5491 "Support - Link is%s operational", 5492 (operational) ? " not" : ""); 5493 break; 5494 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5495 sprintf(message, "Uncertified optics - Replace with " 5496 "Avago-certified optics to enable link " 5497 "operation - Link is%s operational", 5498 (operational) ? " not" : ""); 5499 break; 5500 default: 5501 /* firmware is reporting a status we don't know about */ 5502 sprintf(message, "Unknown event status x%02x", status); 5503 break; 5504 } 5505 5506 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5507 rc = lpfc_sli4_read_config(phba); 5508 if (rc) { 5509 phba->lmt = 0; 5510 lpfc_printf_log(phba, KERN_ERR, 5511 LOG_TRACE_EVENT, 5512 "3194 Unable to retrieve supported " 5513 "speeds, rc = 0x%x\n", rc); 5514 } 5515 vports = lpfc_create_vport_work_array(phba); 5516 if (vports != NULL) { 5517 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5518 i++) { 5519 shost = lpfc_shost_from_vport(vports[i]); 5520 lpfc_host_supported_speeds_set(shost); 5521 } 5522 } 5523 lpfc_destroy_vport_work_array(phba, vports); 5524 5525 phba->sli4_hba.lnk_info.optic_state = status; 5526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5527 "3176 Port Name %c %s\n", port_name, message); 5528 break; 5529 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5530 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5531 "3192 Remote DPort Test Initiated - " 5532 "Event Data1:x%08x Event Data2: x%08x\n", 5533 acqe_sli->event_data1, acqe_sli->event_data2); 5534 break; 5535 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 5536 /* Misconfigured WWN. Reports that the SLI Port is configured 5537 * to use FA-WWN, but the attached device doesn’t support it. 5538 * No driver action is required. 5539 * Event Data1 - N.A, Event Data2 - N.A 5540 */ 5541 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 5542 "2699 Misconfigured FA-WWN - Attached device does " 5543 "not support FA-WWN\n"); 5544 break; 5545 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 5546 /* EEPROM failure. No driver action is required */ 5547 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5548 "2518 EEPROM failure - " 5549 "Event Data1: x%08x Event Data2: x%08x\n", 5550 acqe_sli->event_data1, acqe_sli->event_data2); 5551 break; 5552 default: 5553 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5554 "3193 Unrecognized SLI event, type: 0x%x", 5555 evt_type); 5556 break; 5557 } 5558 } 5559 5560 /** 5561 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5562 * @vport: pointer to vport data structure. 5563 * 5564 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5565 * response to a CVL event. 5566 * 5567 * Return the pointer to the ndlp with the vport if successful, otherwise 5568 * return NULL. 5569 **/ 5570 static struct lpfc_nodelist * 5571 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5572 { 5573 struct lpfc_nodelist *ndlp; 5574 struct Scsi_Host *shost; 5575 struct lpfc_hba *phba; 5576 5577 if (!vport) 5578 return NULL; 5579 phba = vport->phba; 5580 if (!phba) 5581 return NULL; 5582 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5583 if (!ndlp) { 5584 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5585 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5586 if (!ndlp) 5587 return 0; 5588 /* Set the node type */ 5589 ndlp->nlp_type |= NLP_FABRIC; 5590 /* Put ndlp onto node list */ 5591 lpfc_enqueue_node(vport, ndlp); 5592 } 5593 if ((phba->pport->port_state < LPFC_FLOGI) && 5594 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5595 return NULL; 5596 /* If virtual link is not yet instantiated ignore CVL */ 5597 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5598 && (vport->port_state != LPFC_VPORT_FAILED)) 5599 return NULL; 5600 shost = lpfc_shost_from_vport(vport); 5601 if (!shost) 5602 return NULL; 5603 lpfc_linkdown_port(vport); 5604 lpfc_cleanup_pending_mbox(vport); 5605 spin_lock_irq(shost->host_lock); 5606 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5607 spin_unlock_irq(shost->host_lock); 5608 5609 return ndlp; 5610 } 5611 5612 /** 5613 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5614 * @phba: pointer to lpfc hba data structure. 5615 * 5616 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5617 * response to a FCF dead event. 5618 **/ 5619 static void 5620 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5621 { 5622 struct lpfc_vport **vports; 5623 int i; 5624 5625 vports = lpfc_create_vport_work_array(phba); 5626 if (vports) 5627 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5628 lpfc_sli4_perform_vport_cvl(vports[i]); 5629 lpfc_destroy_vport_work_array(phba, vports); 5630 } 5631 5632 /** 5633 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5634 * @phba: pointer to lpfc hba data structure. 5635 * @acqe_fip: pointer to the async fcoe completion queue entry. 5636 * 5637 * This routine is to handle the SLI4 asynchronous fcoe event. 5638 **/ 5639 static void 5640 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5641 struct lpfc_acqe_fip *acqe_fip) 5642 { 5643 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5644 int rc; 5645 struct lpfc_vport *vport; 5646 struct lpfc_nodelist *ndlp; 5647 int active_vlink_present; 5648 struct lpfc_vport **vports; 5649 int i; 5650 5651 phba->fc_eventTag = acqe_fip->event_tag; 5652 phba->fcoe_eventtag = acqe_fip->event_tag; 5653 switch (event_type) { 5654 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5655 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5656 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5657 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5658 "2546 New FCF event, evt_tag:x%x, " 5659 "index:x%x\n", 5660 acqe_fip->event_tag, 5661 acqe_fip->index); 5662 else 5663 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5664 LOG_DISCOVERY, 5665 "2788 FCF param modified event, " 5666 "evt_tag:x%x, index:x%x\n", 5667 acqe_fip->event_tag, 5668 acqe_fip->index); 5669 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5670 /* 5671 * During period of FCF discovery, read the FCF 5672 * table record indexed by the event to update 5673 * FCF roundrobin failover eligible FCF bmask. 5674 */ 5675 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5676 LOG_DISCOVERY, 5677 "2779 Read FCF (x%x) for updating " 5678 "roundrobin FCF failover bmask\n", 5679 acqe_fip->index); 5680 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5681 } 5682 5683 /* If the FCF discovery is in progress, do nothing. */ 5684 spin_lock_irq(&phba->hbalock); 5685 if (phba->hba_flag & FCF_TS_INPROG) { 5686 spin_unlock_irq(&phba->hbalock); 5687 break; 5688 } 5689 /* If fast FCF failover rescan event is pending, do nothing */ 5690 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5691 spin_unlock_irq(&phba->hbalock); 5692 break; 5693 } 5694 5695 /* If the FCF has been in discovered state, do nothing. */ 5696 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5697 spin_unlock_irq(&phba->hbalock); 5698 break; 5699 } 5700 spin_unlock_irq(&phba->hbalock); 5701 5702 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5703 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5704 "2770 Start FCF table scan per async FCF " 5705 "event, evt_tag:x%x, index:x%x\n", 5706 acqe_fip->event_tag, acqe_fip->index); 5707 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5708 LPFC_FCOE_FCF_GET_FIRST); 5709 if (rc) 5710 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5711 "2547 Issue FCF scan read FCF mailbox " 5712 "command failed (x%x)\n", rc); 5713 break; 5714 5715 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5717 "2548 FCF Table full count 0x%x tag 0x%x\n", 5718 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5719 acqe_fip->event_tag); 5720 break; 5721 5722 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5723 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5725 "2549 FCF (x%x) disconnected from network, " 5726 "tag:x%x\n", acqe_fip->index, 5727 acqe_fip->event_tag); 5728 /* 5729 * If we are in the middle of FCF failover process, clear 5730 * the corresponding FCF bit in the roundrobin bitmap. 5731 */ 5732 spin_lock_irq(&phba->hbalock); 5733 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5734 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5735 spin_unlock_irq(&phba->hbalock); 5736 /* Update FLOGI FCF failover eligible FCF bmask */ 5737 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5738 break; 5739 } 5740 spin_unlock_irq(&phba->hbalock); 5741 5742 /* If the event is not for currently used fcf do nothing */ 5743 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5744 break; 5745 5746 /* 5747 * Otherwise, request the port to rediscover the entire FCF 5748 * table for a fast recovery from case that the current FCF 5749 * is no longer valid as we are not in the middle of FCF 5750 * failover process already. 5751 */ 5752 spin_lock_irq(&phba->hbalock); 5753 /* Mark the fast failover process in progress */ 5754 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5755 spin_unlock_irq(&phba->hbalock); 5756 5757 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5758 "2771 Start FCF fast failover process due to " 5759 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5760 "\n", acqe_fip->event_tag, acqe_fip->index); 5761 rc = lpfc_sli4_redisc_fcf_table(phba); 5762 if (rc) { 5763 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5764 LOG_TRACE_EVENT, 5765 "2772 Issue FCF rediscover mailbox " 5766 "command failed, fail through to FCF " 5767 "dead event\n"); 5768 spin_lock_irq(&phba->hbalock); 5769 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5770 spin_unlock_irq(&phba->hbalock); 5771 /* 5772 * Last resort will fail over by treating this 5773 * as a link down to FCF registration. 5774 */ 5775 lpfc_sli4_fcf_dead_failthrough(phba); 5776 } else { 5777 /* Reset FCF roundrobin bmask for new discovery */ 5778 lpfc_sli4_clear_fcf_rr_bmask(phba); 5779 /* 5780 * Handling fast FCF failover to a DEAD FCF event is 5781 * considered equalivant to receiving CVL to all vports. 5782 */ 5783 lpfc_sli4_perform_all_vport_cvl(phba); 5784 } 5785 break; 5786 case LPFC_FIP_EVENT_TYPE_CVL: 5787 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5788 lpfc_printf_log(phba, KERN_ERR, 5789 LOG_TRACE_EVENT, 5790 "2718 Clear Virtual Link Received for VPI 0x%x" 5791 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5792 5793 vport = lpfc_find_vport_by_vpid(phba, 5794 acqe_fip->index); 5795 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5796 if (!ndlp) 5797 break; 5798 active_vlink_present = 0; 5799 5800 vports = lpfc_create_vport_work_array(phba); 5801 if (vports) { 5802 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5803 i++) { 5804 if ((!(vports[i]->fc_flag & 5805 FC_VPORT_CVL_RCVD)) && 5806 (vports[i]->port_state > LPFC_FDISC)) { 5807 active_vlink_present = 1; 5808 break; 5809 } 5810 } 5811 lpfc_destroy_vport_work_array(phba, vports); 5812 } 5813 5814 /* 5815 * Don't re-instantiate if vport is marked for deletion. 5816 * If we are here first then vport_delete is going to wait 5817 * for discovery to complete. 5818 */ 5819 if (!(vport->load_flag & FC_UNLOADING) && 5820 active_vlink_present) { 5821 /* 5822 * If there are other active VLinks present, 5823 * re-instantiate the Vlink using FDISC. 5824 */ 5825 mod_timer(&ndlp->nlp_delayfunc, 5826 jiffies + msecs_to_jiffies(1000)); 5827 spin_lock_irq(&ndlp->lock); 5828 ndlp->nlp_flag |= NLP_DELAY_TMO; 5829 spin_unlock_irq(&ndlp->lock); 5830 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5831 vport->port_state = LPFC_FDISC; 5832 } else { 5833 /* 5834 * Otherwise, we request port to rediscover 5835 * the entire FCF table for a fast recovery 5836 * from possible case that the current FCF 5837 * is no longer valid if we are not already 5838 * in the FCF failover process. 5839 */ 5840 spin_lock_irq(&phba->hbalock); 5841 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5842 spin_unlock_irq(&phba->hbalock); 5843 break; 5844 } 5845 /* Mark the fast failover process in progress */ 5846 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5847 spin_unlock_irq(&phba->hbalock); 5848 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5849 LOG_DISCOVERY, 5850 "2773 Start FCF failover per CVL, " 5851 "evt_tag:x%x\n", acqe_fip->event_tag); 5852 rc = lpfc_sli4_redisc_fcf_table(phba); 5853 if (rc) { 5854 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5855 LOG_TRACE_EVENT, 5856 "2774 Issue FCF rediscover " 5857 "mailbox command failed, " 5858 "through to CVL event\n"); 5859 spin_lock_irq(&phba->hbalock); 5860 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5861 spin_unlock_irq(&phba->hbalock); 5862 /* 5863 * Last resort will be re-try on the 5864 * the current registered FCF entry. 5865 */ 5866 lpfc_retry_pport_discovery(phba); 5867 } else 5868 /* 5869 * Reset FCF roundrobin bmask for new 5870 * discovery. 5871 */ 5872 lpfc_sli4_clear_fcf_rr_bmask(phba); 5873 } 5874 break; 5875 default: 5876 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5877 "0288 Unknown FCoE event type 0x%x event tag " 5878 "0x%x\n", event_type, acqe_fip->event_tag); 5879 break; 5880 } 5881 } 5882 5883 /** 5884 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5885 * @phba: pointer to lpfc hba data structure. 5886 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 5887 * 5888 * This routine is to handle the SLI4 asynchronous dcbx event. 5889 **/ 5890 static void 5891 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5892 struct lpfc_acqe_dcbx *acqe_dcbx) 5893 { 5894 phba->fc_eventTag = acqe_dcbx->event_tag; 5895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5896 "0290 The SLI4 DCBX asynchronous event is not " 5897 "handled yet\n"); 5898 } 5899 5900 /** 5901 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5902 * @phba: pointer to lpfc hba data structure. 5903 * @acqe_grp5: pointer to the async grp5 completion queue entry. 5904 * 5905 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5906 * is an asynchronous notified of a logical link speed change. The Port 5907 * reports the logical link speed in units of 10Mbps. 5908 **/ 5909 static void 5910 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5911 struct lpfc_acqe_grp5 *acqe_grp5) 5912 { 5913 uint16_t prev_ll_spd; 5914 5915 phba->fc_eventTag = acqe_grp5->event_tag; 5916 phba->fcoe_eventtag = acqe_grp5->event_tag; 5917 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5918 phba->sli4_hba.link_state.logical_speed = 5919 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5920 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5921 "2789 GRP5 Async Event: Updating logical link speed " 5922 "from %dMbps to %dMbps\n", prev_ll_spd, 5923 phba->sli4_hba.link_state.logical_speed); 5924 } 5925 5926 /** 5927 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5928 * @phba: pointer to lpfc hba data structure. 5929 * 5930 * This routine is invoked by the worker thread to process all the pending 5931 * SLI4 asynchronous events. 5932 **/ 5933 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5934 { 5935 struct lpfc_cq_event *cq_event; 5936 unsigned long iflags; 5937 5938 /* First, declare the async event has been handled */ 5939 spin_lock_irqsave(&phba->hbalock, iflags); 5940 phba->hba_flag &= ~ASYNC_EVENT; 5941 spin_unlock_irqrestore(&phba->hbalock, iflags); 5942 5943 /* Now, handle all the async events */ 5944 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 5945 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5946 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5947 cq_event, struct lpfc_cq_event, list); 5948 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 5949 iflags); 5950 5951 /* Process the asynchronous event */ 5952 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5953 case LPFC_TRAILER_CODE_LINK: 5954 lpfc_sli4_async_link_evt(phba, 5955 &cq_event->cqe.acqe_link); 5956 break; 5957 case LPFC_TRAILER_CODE_FCOE: 5958 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5959 break; 5960 case LPFC_TRAILER_CODE_DCBX: 5961 lpfc_sli4_async_dcbx_evt(phba, 5962 &cq_event->cqe.acqe_dcbx); 5963 break; 5964 case LPFC_TRAILER_CODE_GRP5: 5965 lpfc_sli4_async_grp5_evt(phba, 5966 &cq_event->cqe.acqe_grp5); 5967 break; 5968 case LPFC_TRAILER_CODE_FC: 5969 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5970 break; 5971 case LPFC_TRAILER_CODE_SLI: 5972 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5973 break; 5974 default: 5975 lpfc_printf_log(phba, KERN_ERR, 5976 LOG_TRACE_EVENT, 5977 "1804 Invalid asynchronous event code: " 5978 "x%x\n", bf_get(lpfc_trailer_code, 5979 &cq_event->cqe.mcqe_cmpl)); 5980 break; 5981 } 5982 5983 /* Free the completion event processed to the free pool */ 5984 lpfc_sli4_cq_event_release(phba, cq_event); 5985 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 5986 } 5987 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 5988 } 5989 5990 /** 5991 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5992 * @phba: pointer to lpfc hba data structure. 5993 * 5994 * This routine is invoked by the worker thread to process FCF table 5995 * rediscovery pending completion event. 5996 **/ 5997 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5998 { 5999 int rc; 6000 6001 spin_lock_irq(&phba->hbalock); 6002 /* Clear FCF rediscovery timeout event */ 6003 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 6004 /* Clear driver fast failover FCF record flag */ 6005 phba->fcf.failover_rec.flag = 0; 6006 /* Set state for FCF fast failover */ 6007 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 6008 spin_unlock_irq(&phba->hbalock); 6009 6010 /* Scan FCF table from the first entry to re-discover SAN */ 6011 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6012 "2777 Start post-quiescent FCF table scan\n"); 6013 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 6014 if (rc) 6015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6016 "2747 Issue FCF scan read FCF mailbox " 6017 "command failed 0x%x\n", rc); 6018 } 6019 6020 /** 6021 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 6022 * @phba: pointer to lpfc hba data structure. 6023 * @dev_grp: The HBA PCI-Device group number. 6024 * 6025 * This routine is invoked to set up the per HBA PCI-Device group function 6026 * API jump table entries. 6027 * 6028 * Return: 0 if success, otherwise -ENODEV 6029 **/ 6030 int 6031 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6032 { 6033 int rc; 6034 6035 /* Set up lpfc PCI-device group */ 6036 phba->pci_dev_grp = dev_grp; 6037 6038 /* The LPFC_PCI_DEV_OC uses SLI4 */ 6039 if (dev_grp == LPFC_PCI_DEV_OC) 6040 phba->sli_rev = LPFC_SLI_REV4; 6041 6042 /* Set up device INIT API function jump table */ 6043 rc = lpfc_init_api_table_setup(phba, dev_grp); 6044 if (rc) 6045 return -ENODEV; 6046 /* Set up SCSI API function jump table */ 6047 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 6048 if (rc) 6049 return -ENODEV; 6050 /* Set up SLI API function jump table */ 6051 rc = lpfc_sli_api_table_setup(phba, dev_grp); 6052 if (rc) 6053 return -ENODEV; 6054 /* Set up MBOX API function jump table */ 6055 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 6056 if (rc) 6057 return -ENODEV; 6058 6059 return 0; 6060 } 6061 6062 /** 6063 * lpfc_log_intr_mode - Log the active interrupt mode 6064 * @phba: pointer to lpfc hba data structure. 6065 * @intr_mode: active interrupt mode adopted. 6066 * 6067 * This routine it invoked to log the currently used active interrupt mode 6068 * to the device. 6069 **/ 6070 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6071 { 6072 switch (intr_mode) { 6073 case 0: 6074 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6075 "0470 Enable INTx interrupt mode.\n"); 6076 break; 6077 case 1: 6078 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6079 "0481 Enabled MSI interrupt mode.\n"); 6080 break; 6081 case 2: 6082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6083 "0480 Enabled MSI-X interrupt mode.\n"); 6084 break; 6085 default: 6086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6087 "0482 Illegal interrupt mode.\n"); 6088 break; 6089 } 6090 return; 6091 } 6092 6093 /** 6094 * lpfc_enable_pci_dev - Enable a generic PCI device. 6095 * @phba: pointer to lpfc hba data structure. 6096 * 6097 * This routine is invoked to enable the PCI device that is common to all 6098 * PCI devices. 6099 * 6100 * Return codes 6101 * 0 - successful 6102 * other values - error 6103 **/ 6104 static int 6105 lpfc_enable_pci_dev(struct lpfc_hba *phba) 6106 { 6107 struct pci_dev *pdev; 6108 6109 /* Obtain PCI device reference */ 6110 if (!phba->pcidev) 6111 goto out_error; 6112 else 6113 pdev = phba->pcidev; 6114 /* Enable PCI device */ 6115 if (pci_enable_device_mem(pdev)) 6116 goto out_error; 6117 /* Request PCI resource for the device */ 6118 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6119 goto out_disable_device; 6120 /* Set up device as PCI master and save state for EEH */ 6121 pci_set_master(pdev); 6122 pci_try_set_mwi(pdev); 6123 pci_save_state(pdev); 6124 6125 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6126 if (pci_is_pcie(pdev)) 6127 pdev->needs_freset = 1; 6128 6129 return 0; 6130 6131 out_disable_device: 6132 pci_disable_device(pdev); 6133 out_error: 6134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6135 "1401 Failed to enable pci device\n"); 6136 return -ENODEV; 6137 } 6138 6139 /** 6140 * lpfc_disable_pci_dev - Disable a generic PCI device. 6141 * @phba: pointer to lpfc hba data structure. 6142 * 6143 * This routine is invoked to disable the PCI device that is common to all 6144 * PCI devices. 6145 **/ 6146 static void 6147 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6148 { 6149 struct pci_dev *pdev; 6150 6151 /* Obtain PCI device reference */ 6152 if (!phba->pcidev) 6153 return; 6154 else 6155 pdev = phba->pcidev; 6156 /* Release PCI resource and disable PCI device */ 6157 pci_release_mem_regions(pdev); 6158 pci_disable_device(pdev); 6159 6160 return; 6161 } 6162 6163 /** 6164 * lpfc_reset_hba - Reset a hba 6165 * @phba: pointer to lpfc hba data structure. 6166 * 6167 * This routine is invoked to reset a hba device. It brings the HBA 6168 * offline, performs a board restart, and then brings the board back 6169 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6170 * on outstanding mailbox commands. 6171 **/ 6172 void 6173 lpfc_reset_hba(struct lpfc_hba *phba) 6174 { 6175 /* If resets are disabled then set error state and return. */ 6176 if (!phba->cfg_enable_hba_reset) { 6177 phba->link_state = LPFC_HBA_ERROR; 6178 return; 6179 } 6180 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6181 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6182 else 6183 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6184 lpfc_offline(phba); 6185 lpfc_sli_brdrestart(phba); 6186 lpfc_online(phba); 6187 lpfc_unblock_mgmt_io(phba); 6188 } 6189 6190 /** 6191 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6192 * @phba: pointer to lpfc hba data structure. 6193 * 6194 * This function enables the PCI SR-IOV virtual functions to a physical 6195 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6196 * enable the number of virtual functions to the physical function. As 6197 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6198 * API call does not considered as an error condition for most of the device. 6199 **/ 6200 uint16_t 6201 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6202 { 6203 struct pci_dev *pdev = phba->pcidev; 6204 uint16_t nr_virtfn; 6205 int pos; 6206 6207 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6208 if (pos == 0) 6209 return 0; 6210 6211 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6212 return nr_virtfn; 6213 } 6214 6215 /** 6216 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6217 * @phba: pointer to lpfc hba data structure. 6218 * @nr_vfn: number of virtual functions to be enabled. 6219 * 6220 * This function enables the PCI SR-IOV virtual functions to a physical 6221 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6222 * enable the number of virtual functions to the physical function. As 6223 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6224 * API call does not considered as an error condition for most of the device. 6225 **/ 6226 int 6227 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6228 { 6229 struct pci_dev *pdev = phba->pcidev; 6230 uint16_t max_nr_vfn; 6231 int rc; 6232 6233 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6234 if (nr_vfn > max_nr_vfn) { 6235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6236 "3057 Requested vfs (%d) greater than " 6237 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6238 return -EINVAL; 6239 } 6240 6241 rc = pci_enable_sriov(pdev, nr_vfn); 6242 if (rc) { 6243 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6244 "2806 Failed to enable sriov on this device " 6245 "with vfn number nr_vf:%d, rc:%d\n", 6246 nr_vfn, rc); 6247 } else 6248 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6249 "2807 Successful enable sriov on this device " 6250 "with vfn number nr_vf:%d\n", nr_vfn); 6251 return rc; 6252 } 6253 6254 /** 6255 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6256 * @phba: pointer to lpfc hba data structure. 6257 * 6258 * This routine is invoked to set up the driver internal resources before the 6259 * device specific resource setup to support the HBA device it attached to. 6260 * 6261 * Return codes 6262 * 0 - successful 6263 * other values - error 6264 **/ 6265 static int 6266 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6267 { 6268 struct lpfc_sli *psli = &phba->sli; 6269 6270 /* 6271 * Driver resources common to all SLI revisions 6272 */ 6273 atomic_set(&phba->fast_event_count, 0); 6274 atomic_set(&phba->dbg_log_idx, 0); 6275 atomic_set(&phba->dbg_log_cnt, 0); 6276 atomic_set(&phba->dbg_log_dmping, 0); 6277 spin_lock_init(&phba->hbalock); 6278 6279 /* Initialize port_list spinlock */ 6280 spin_lock_init(&phba->port_list_lock); 6281 INIT_LIST_HEAD(&phba->port_list); 6282 6283 INIT_LIST_HEAD(&phba->work_list); 6284 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6285 6286 /* Initialize the wait queue head for the kernel thread */ 6287 init_waitqueue_head(&phba->work_waitq); 6288 6289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6290 "1403 Protocols supported %s %s %s\n", 6291 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6292 "SCSI" : " "), 6293 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6294 "NVME" : " "), 6295 (phba->nvmet_support ? "NVMET" : " ")); 6296 6297 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6298 spin_lock_init(&phba->scsi_buf_list_get_lock); 6299 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6300 spin_lock_init(&phba->scsi_buf_list_put_lock); 6301 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6302 6303 /* Initialize the fabric iocb list */ 6304 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6305 6306 /* Initialize list to save ELS buffers */ 6307 INIT_LIST_HEAD(&phba->elsbuf); 6308 6309 /* Initialize FCF connection rec list */ 6310 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6311 6312 /* Initialize OAS configuration list */ 6313 spin_lock_init(&phba->devicelock); 6314 INIT_LIST_HEAD(&phba->luns); 6315 6316 /* MBOX heartbeat timer */ 6317 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6318 /* Fabric block timer */ 6319 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6320 /* EA polling mode timer */ 6321 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6322 /* Heartbeat timer */ 6323 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6324 6325 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6326 6327 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 6328 lpfc_idle_stat_delay_work); 6329 6330 return 0; 6331 } 6332 6333 /** 6334 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6335 * @phba: pointer to lpfc hba data structure. 6336 * 6337 * This routine is invoked to set up the driver internal resources specific to 6338 * support the SLI-3 HBA device it attached to. 6339 * 6340 * Return codes 6341 * 0 - successful 6342 * other values - error 6343 **/ 6344 static int 6345 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6346 { 6347 int rc, entry_sz; 6348 6349 /* 6350 * Initialize timers used by driver 6351 */ 6352 6353 /* FCP polling mode timer */ 6354 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6355 6356 /* Host attention work mask setup */ 6357 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6358 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6359 6360 /* Get all the module params for configuring this host */ 6361 lpfc_get_cfgparam(phba); 6362 /* Set up phase-1 common device driver resources */ 6363 6364 rc = lpfc_setup_driver_resource_phase1(phba); 6365 if (rc) 6366 return -ENODEV; 6367 6368 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6369 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6370 /* check for menlo minimum sg count */ 6371 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6372 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6373 } 6374 6375 if (!phba->sli.sli3_ring) 6376 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6377 sizeof(struct lpfc_sli_ring), 6378 GFP_KERNEL); 6379 if (!phba->sli.sli3_ring) 6380 return -ENOMEM; 6381 6382 /* 6383 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6384 * used to create the sg_dma_buf_pool must be dynamically calculated. 6385 */ 6386 6387 if (phba->sli_rev == LPFC_SLI_REV4) 6388 entry_sz = sizeof(struct sli4_sge); 6389 else 6390 entry_sz = sizeof(struct ulp_bde64); 6391 6392 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6393 if (phba->cfg_enable_bg) { 6394 /* 6395 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6396 * the FCP rsp, and a BDE for each. Sice we have no control 6397 * over how many protection data segments the SCSI Layer 6398 * will hand us (ie: there could be one for every block 6399 * in the IO), we just allocate enough BDEs to accomidate 6400 * our max amount and we need to limit lpfc_sg_seg_cnt to 6401 * minimize the risk of running out. 6402 */ 6403 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6404 sizeof(struct fcp_rsp) + 6405 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6406 6407 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6408 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6409 6410 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6411 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6412 } else { 6413 /* 6414 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6415 * the FCP rsp, a BDE for each, and a BDE for up to 6416 * cfg_sg_seg_cnt data segments. 6417 */ 6418 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6419 sizeof(struct fcp_rsp) + 6420 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6421 6422 /* Total BDEs in BPL for scsi_sg_list */ 6423 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6424 } 6425 6426 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6427 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6428 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6429 phba->cfg_total_seg_cnt); 6430 6431 phba->max_vpi = LPFC_MAX_VPI; 6432 /* This will be set to correct value after config_port mbox */ 6433 phba->max_vports = 0; 6434 6435 /* 6436 * Initialize the SLI Layer to run with lpfc HBAs. 6437 */ 6438 lpfc_sli_setup(phba); 6439 lpfc_sli_queue_init(phba); 6440 6441 /* Allocate device driver memory */ 6442 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6443 return -ENOMEM; 6444 6445 phba->lpfc_sg_dma_buf_pool = 6446 dma_pool_create("lpfc_sg_dma_buf_pool", 6447 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 6448 BPL_ALIGN_SZ, 0); 6449 6450 if (!phba->lpfc_sg_dma_buf_pool) 6451 goto fail_free_mem; 6452 6453 phba->lpfc_cmd_rsp_buf_pool = 6454 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6455 &phba->pcidev->dev, 6456 sizeof(struct fcp_cmnd) + 6457 sizeof(struct fcp_rsp), 6458 BPL_ALIGN_SZ, 0); 6459 6460 if (!phba->lpfc_cmd_rsp_buf_pool) 6461 goto fail_free_dma_buf_pool; 6462 6463 /* 6464 * Enable sr-iov virtual functions if supported and configured 6465 * through the module parameter. 6466 */ 6467 if (phba->cfg_sriov_nr_virtfn > 0) { 6468 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6469 phba->cfg_sriov_nr_virtfn); 6470 if (rc) { 6471 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6472 "2808 Requested number of SR-IOV " 6473 "virtual functions (%d) is not " 6474 "supported\n", 6475 phba->cfg_sriov_nr_virtfn); 6476 phba->cfg_sriov_nr_virtfn = 0; 6477 } 6478 } 6479 6480 return 0; 6481 6482 fail_free_dma_buf_pool: 6483 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6484 phba->lpfc_sg_dma_buf_pool = NULL; 6485 fail_free_mem: 6486 lpfc_mem_free(phba); 6487 return -ENOMEM; 6488 } 6489 6490 /** 6491 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6492 * @phba: pointer to lpfc hba data structure. 6493 * 6494 * This routine is invoked to unset the driver internal resources set up 6495 * specific for supporting the SLI-3 HBA device it attached to. 6496 **/ 6497 static void 6498 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6499 { 6500 /* Free device driver memory allocated */ 6501 lpfc_mem_free_all(phba); 6502 6503 return; 6504 } 6505 6506 /** 6507 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6508 * @phba: pointer to lpfc hba data structure. 6509 * 6510 * This routine is invoked to set up the driver internal resources specific to 6511 * support the SLI-4 HBA device it attached to. 6512 * 6513 * Return codes 6514 * 0 - successful 6515 * other values - error 6516 **/ 6517 static int 6518 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6519 { 6520 LPFC_MBOXQ_t *mboxq; 6521 MAILBOX_t *mb; 6522 int rc, i, max_buf_size; 6523 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6524 struct lpfc_mqe *mqe; 6525 int longs; 6526 int extra; 6527 uint64_t wwn; 6528 u32 if_type; 6529 u32 if_fam; 6530 6531 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6532 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 6533 phba->sli4_hba.curr_disp_cpu = 0; 6534 6535 /* Get all the module params for configuring this host */ 6536 lpfc_get_cfgparam(phba); 6537 6538 /* Set up phase-1 common device driver resources */ 6539 rc = lpfc_setup_driver_resource_phase1(phba); 6540 if (rc) 6541 return -ENODEV; 6542 6543 /* Before proceed, wait for POST done and device ready */ 6544 rc = lpfc_sli4_post_status_check(phba); 6545 if (rc) 6546 return -ENODEV; 6547 6548 /* Allocate all driver workqueues here */ 6549 6550 /* The lpfc_wq workqueue for deferred irq use */ 6551 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6552 6553 /* 6554 * Initialize timers used by driver 6555 */ 6556 6557 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6558 6559 /* FCF rediscover timer */ 6560 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6561 6562 /* 6563 * Control structure for handling external multi-buffer mailbox 6564 * command pass-through. 6565 */ 6566 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6567 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6568 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6569 6570 phba->max_vpi = LPFC_MAX_VPI; 6571 6572 /* This will be set to correct value after the read_config mbox */ 6573 phba->max_vports = 0; 6574 6575 /* Program the default value of vlan_id and fc_map */ 6576 phba->valid_vlan = 0; 6577 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6578 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6579 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6580 6581 /* 6582 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6583 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6584 * The WQ create will allocate the ring. 6585 */ 6586 6587 /* Initialize buffer queue management fields */ 6588 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6589 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6590 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6591 6592 /* 6593 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6594 */ 6595 /* Initialize the Abort buffer list used by driver */ 6596 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 6597 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 6598 6599 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6600 /* Initialize the Abort nvme buffer list used by driver */ 6601 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6602 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6603 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6604 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6605 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6606 } 6607 6608 /* This abort list used by worker thread */ 6609 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6610 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6611 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 6612 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 6613 6614 /* 6615 * Initialize driver internal slow-path work queues 6616 */ 6617 6618 /* Driver internel slow-path CQ Event pool */ 6619 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6620 /* Response IOCB work queue list */ 6621 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6622 /* Asynchronous event CQ Event work queue list */ 6623 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6624 /* Slow-path XRI aborted CQ Event work queue list */ 6625 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6626 /* Receive queue CQ Event work queue list */ 6627 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6628 6629 /* Initialize extent block lists. */ 6630 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6631 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6632 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6633 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6634 6635 /* Initialize mboxq lists. If the early init routines fail 6636 * these lists need to be correctly initialized. 6637 */ 6638 INIT_LIST_HEAD(&phba->sli.mboxq); 6639 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6640 6641 /* initialize optic_state to 0xFF */ 6642 phba->sli4_hba.lnk_info.optic_state = 0xff; 6643 6644 /* Allocate device driver memory */ 6645 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6646 if (rc) 6647 return -ENOMEM; 6648 6649 /* IF Type 2 ports get initialized now. */ 6650 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6651 LPFC_SLI_INTF_IF_TYPE_2) { 6652 rc = lpfc_pci_function_reset(phba); 6653 if (unlikely(rc)) { 6654 rc = -ENODEV; 6655 goto out_free_mem; 6656 } 6657 phba->temp_sensor_support = 1; 6658 } 6659 6660 /* Create the bootstrap mailbox command */ 6661 rc = lpfc_create_bootstrap_mbox(phba); 6662 if (unlikely(rc)) 6663 goto out_free_mem; 6664 6665 /* Set up the host's endian order with the device. */ 6666 rc = lpfc_setup_endian_order(phba); 6667 if (unlikely(rc)) 6668 goto out_free_bsmbx; 6669 6670 /* Set up the hba's configuration parameters. */ 6671 rc = lpfc_sli4_read_config(phba); 6672 if (unlikely(rc)) 6673 goto out_free_bsmbx; 6674 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6675 if (unlikely(rc)) 6676 goto out_free_bsmbx; 6677 6678 /* IF Type 0 ports get initialized now. */ 6679 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6680 LPFC_SLI_INTF_IF_TYPE_0) { 6681 rc = lpfc_pci_function_reset(phba); 6682 if (unlikely(rc)) 6683 goto out_free_bsmbx; 6684 } 6685 6686 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6687 GFP_KERNEL); 6688 if (!mboxq) { 6689 rc = -ENOMEM; 6690 goto out_free_bsmbx; 6691 } 6692 6693 /* Check for NVMET being configured */ 6694 phba->nvmet_support = 0; 6695 if (lpfc_enable_nvmet_cnt) { 6696 6697 /* First get WWN of HBA instance */ 6698 lpfc_read_nv(phba, mboxq); 6699 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6700 if (rc != MBX_SUCCESS) { 6701 lpfc_printf_log(phba, KERN_ERR, 6702 LOG_TRACE_EVENT, 6703 "6016 Mailbox failed , mbxCmd x%x " 6704 "READ_NV, mbxStatus x%x\n", 6705 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6706 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6707 mempool_free(mboxq, phba->mbox_mem_pool); 6708 rc = -EIO; 6709 goto out_free_bsmbx; 6710 } 6711 mb = &mboxq->u.mb; 6712 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6713 sizeof(uint64_t)); 6714 wwn = cpu_to_be64(wwn); 6715 phba->sli4_hba.wwnn.u.name = wwn; 6716 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6717 sizeof(uint64_t)); 6718 /* wwn is WWPN of HBA instance */ 6719 wwn = cpu_to_be64(wwn); 6720 phba->sli4_hba.wwpn.u.name = wwn; 6721 6722 /* Check to see if it matches any module parameter */ 6723 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6724 if (wwn == lpfc_enable_nvmet[i]) { 6725 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6726 if (lpfc_nvmet_mem_alloc(phba)) 6727 break; 6728 6729 phba->nvmet_support = 1; /* a match */ 6730 6731 lpfc_printf_log(phba, KERN_ERR, 6732 LOG_TRACE_EVENT, 6733 "6017 NVME Target %016llx\n", 6734 wwn); 6735 #else 6736 lpfc_printf_log(phba, KERN_ERR, 6737 LOG_TRACE_EVENT, 6738 "6021 Can't enable NVME Target." 6739 " NVME_TARGET_FC infrastructure" 6740 " is not in kernel\n"); 6741 #endif 6742 /* Not supported for NVMET */ 6743 phba->cfg_xri_rebalancing = 0; 6744 if (phba->irq_chann_mode == NHT_MODE) { 6745 phba->cfg_irq_chann = 6746 phba->sli4_hba.num_present_cpu; 6747 phba->cfg_hdw_queue = 6748 phba->sli4_hba.num_present_cpu; 6749 phba->irq_chann_mode = NORMAL_MODE; 6750 } 6751 break; 6752 } 6753 } 6754 } 6755 6756 lpfc_nvme_mod_param_dep(phba); 6757 6758 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6759 lpfc_supported_pages(mboxq); 6760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6761 if (!rc) { 6762 mqe = &mboxq->u.mqe; 6763 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6764 LPFC_MAX_SUPPORTED_PAGES); 6765 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6766 switch (pn_page[i]) { 6767 case LPFC_SLI4_PARAMETERS: 6768 phba->sli4_hba.pc_sli4_params.supported = 1; 6769 break; 6770 default: 6771 break; 6772 } 6773 } 6774 /* Read the port's SLI4 Parameters capabilities if supported. */ 6775 if (phba->sli4_hba.pc_sli4_params.supported) 6776 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6777 if (rc) { 6778 mempool_free(mboxq, phba->mbox_mem_pool); 6779 rc = -EIO; 6780 goto out_free_bsmbx; 6781 } 6782 } 6783 6784 /* 6785 * Get sli4 parameters that override parameters from Port capabilities. 6786 * If this call fails, it isn't critical unless the SLI4 parameters come 6787 * back in conflict. 6788 */ 6789 rc = lpfc_get_sli4_parameters(phba, mboxq); 6790 if (rc) { 6791 if_type = bf_get(lpfc_sli_intf_if_type, 6792 &phba->sli4_hba.sli_intf); 6793 if_fam = bf_get(lpfc_sli_intf_sli_family, 6794 &phba->sli4_hba.sli_intf); 6795 if (phba->sli4_hba.extents_in_use && 6796 phba->sli4_hba.rpi_hdrs_in_use) { 6797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6798 "2999 Unsupported SLI4 Parameters " 6799 "Extents and RPI headers enabled.\n"); 6800 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6801 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6802 mempool_free(mboxq, phba->mbox_mem_pool); 6803 rc = -EIO; 6804 goto out_free_bsmbx; 6805 } 6806 } 6807 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6808 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6809 mempool_free(mboxq, phba->mbox_mem_pool); 6810 rc = -EIO; 6811 goto out_free_bsmbx; 6812 } 6813 } 6814 6815 /* 6816 * 1 for cmd, 1 for rsp, NVME adds an extra one 6817 * for boundary conditions in its max_sgl_segment template. 6818 */ 6819 extra = 2; 6820 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6821 extra++; 6822 6823 /* 6824 * It doesn't matter what family our adapter is in, we are 6825 * limited to 2 Pages, 512 SGEs, for our SGL. 6826 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6827 */ 6828 max_buf_size = (2 * SLI4_PAGE_SIZE); 6829 6830 /* 6831 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6832 * used to create the sg_dma_buf_pool must be calculated. 6833 */ 6834 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6835 /* Both cfg_enable_bg and cfg_external_dif code paths */ 6836 6837 /* 6838 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6839 * the FCP rsp, and a SGE. Sice we have no control 6840 * over how many protection segments the SCSI Layer 6841 * will hand us (ie: there could be one for every block 6842 * in the IO), just allocate enough SGEs to accomidate 6843 * our max amount and we need to limit lpfc_sg_seg_cnt 6844 * to minimize the risk of running out. 6845 */ 6846 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6847 sizeof(struct fcp_rsp) + max_buf_size; 6848 6849 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6850 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6851 6852 /* 6853 * If supporting DIF, reduce the seg count for scsi to 6854 * allow room for the DIF sges. 6855 */ 6856 if (phba->cfg_enable_bg && 6857 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6858 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6859 else 6860 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6861 6862 } else { 6863 /* 6864 * The scsi_buf for a regular I/O holds the FCP cmnd, 6865 * the FCP rsp, a SGE for each, and a SGE for up to 6866 * cfg_sg_seg_cnt data segments. 6867 */ 6868 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6869 sizeof(struct fcp_rsp) + 6870 ((phba->cfg_sg_seg_cnt + extra) * 6871 sizeof(struct sli4_sge)); 6872 6873 /* Total SGEs for scsi_sg_list */ 6874 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6875 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6876 6877 /* 6878 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6879 * need to post 1 page for the SGL. 6880 */ 6881 } 6882 6883 if (phba->cfg_xpsgl && !phba->nvmet_support) 6884 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 6885 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6886 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6887 else 6888 phba->cfg_sg_dma_buf_size = 6889 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6890 6891 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 6892 sizeof(struct sli4_sge); 6893 6894 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6895 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6896 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6897 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6898 "6300 Reducing NVME sg segment " 6899 "cnt to %d\n", 6900 LPFC_MAX_NVME_SEG_CNT); 6901 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6902 } else 6903 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6904 } 6905 6906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6907 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6908 "total:%d scsi:%d nvme:%d\n", 6909 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6910 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6911 phba->cfg_nvme_seg_cnt); 6912 6913 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 6914 i = phba->cfg_sg_dma_buf_size; 6915 else 6916 i = SLI4_PAGE_SIZE; 6917 6918 phba->lpfc_sg_dma_buf_pool = 6919 dma_pool_create("lpfc_sg_dma_buf_pool", 6920 &phba->pcidev->dev, 6921 phba->cfg_sg_dma_buf_size, 6922 i, 0); 6923 if (!phba->lpfc_sg_dma_buf_pool) 6924 goto out_free_bsmbx; 6925 6926 phba->lpfc_cmd_rsp_buf_pool = 6927 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6928 &phba->pcidev->dev, 6929 sizeof(struct fcp_cmnd) + 6930 sizeof(struct fcp_rsp), 6931 i, 0); 6932 if (!phba->lpfc_cmd_rsp_buf_pool) 6933 goto out_free_sg_dma_buf; 6934 6935 mempool_free(mboxq, phba->mbox_mem_pool); 6936 6937 /* Verify OAS is supported */ 6938 lpfc_sli4_oas_verify(phba); 6939 6940 /* Verify RAS support on adapter */ 6941 lpfc_sli4_ras_init(phba); 6942 6943 /* Verify all the SLI4 queues */ 6944 rc = lpfc_sli4_queue_verify(phba); 6945 if (rc) 6946 goto out_free_cmd_rsp_buf; 6947 6948 /* Create driver internal CQE event pool */ 6949 rc = lpfc_sli4_cq_event_pool_create(phba); 6950 if (rc) 6951 goto out_free_cmd_rsp_buf; 6952 6953 /* Initialize sgl lists per host */ 6954 lpfc_init_sgl_list(phba); 6955 6956 /* Allocate and initialize active sgl array */ 6957 rc = lpfc_init_active_sgl_array(phba); 6958 if (rc) { 6959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6960 "1430 Failed to initialize sgl list.\n"); 6961 goto out_destroy_cq_event_pool; 6962 } 6963 rc = lpfc_sli4_init_rpi_hdrs(phba); 6964 if (rc) { 6965 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6966 "1432 Failed to initialize rpi headers.\n"); 6967 goto out_free_active_sgl; 6968 } 6969 6970 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6971 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6972 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6973 GFP_KERNEL); 6974 if (!phba->fcf.fcf_rr_bmask) { 6975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6976 "2759 Failed allocate memory for FCF round " 6977 "robin failover bmask\n"); 6978 rc = -ENOMEM; 6979 goto out_remove_rpi_hdrs; 6980 } 6981 6982 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6983 sizeof(struct lpfc_hba_eq_hdl), 6984 GFP_KERNEL); 6985 if (!phba->sli4_hba.hba_eq_hdl) { 6986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6987 "2572 Failed allocate memory for " 6988 "fast-path per-EQ handle array\n"); 6989 rc = -ENOMEM; 6990 goto out_free_fcf_rr_bmask; 6991 } 6992 6993 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6994 sizeof(struct lpfc_vector_map_info), 6995 GFP_KERNEL); 6996 if (!phba->sli4_hba.cpu_map) { 6997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6998 "3327 Failed allocate memory for msi-x " 6999 "interrupt vector mapping\n"); 7000 rc = -ENOMEM; 7001 goto out_free_hba_eq_hdl; 7002 } 7003 7004 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 7005 if (!phba->sli4_hba.eq_info) { 7006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7007 "3321 Failed allocation for per_cpu stats\n"); 7008 rc = -ENOMEM; 7009 goto out_free_hba_cpu_map; 7010 } 7011 7012 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 7013 sizeof(*phba->sli4_hba.idle_stat), 7014 GFP_KERNEL); 7015 if (!phba->sli4_hba.idle_stat) { 7016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7017 "3390 Failed allocation for idle_stat\n"); 7018 rc = -ENOMEM; 7019 goto out_free_hba_eq_info; 7020 } 7021 7022 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7023 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 7024 if (!phba->sli4_hba.c_stat) { 7025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7026 "3332 Failed allocating per cpu hdwq stats\n"); 7027 rc = -ENOMEM; 7028 goto out_free_hba_idle_stat; 7029 } 7030 #endif 7031 7032 /* 7033 * Enable sr-iov virtual functions if supported and configured 7034 * through the module parameter. 7035 */ 7036 if (phba->cfg_sriov_nr_virtfn > 0) { 7037 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7038 phba->cfg_sriov_nr_virtfn); 7039 if (rc) { 7040 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7041 "3020 Requested number of SR-IOV " 7042 "virtual functions (%d) is not " 7043 "supported\n", 7044 phba->cfg_sriov_nr_virtfn); 7045 phba->cfg_sriov_nr_virtfn = 0; 7046 } 7047 } 7048 7049 return 0; 7050 7051 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7052 out_free_hba_idle_stat: 7053 kfree(phba->sli4_hba.idle_stat); 7054 #endif 7055 out_free_hba_eq_info: 7056 free_percpu(phba->sli4_hba.eq_info); 7057 out_free_hba_cpu_map: 7058 kfree(phba->sli4_hba.cpu_map); 7059 out_free_hba_eq_hdl: 7060 kfree(phba->sli4_hba.hba_eq_hdl); 7061 out_free_fcf_rr_bmask: 7062 kfree(phba->fcf.fcf_rr_bmask); 7063 out_remove_rpi_hdrs: 7064 lpfc_sli4_remove_rpi_hdrs(phba); 7065 out_free_active_sgl: 7066 lpfc_free_active_sgl(phba); 7067 out_destroy_cq_event_pool: 7068 lpfc_sli4_cq_event_pool_destroy(phba); 7069 out_free_cmd_rsp_buf: 7070 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 7071 phba->lpfc_cmd_rsp_buf_pool = NULL; 7072 out_free_sg_dma_buf: 7073 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7074 phba->lpfc_sg_dma_buf_pool = NULL; 7075 out_free_bsmbx: 7076 lpfc_destroy_bootstrap_mbox(phba); 7077 out_free_mem: 7078 lpfc_mem_free(phba); 7079 return rc; 7080 } 7081 7082 /** 7083 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 7084 * @phba: pointer to lpfc hba data structure. 7085 * 7086 * This routine is invoked to unset the driver internal resources set up 7087 * specific for supporting the SLI-4 HBA device it attached to. 7088 **/ 7089 static void 7090 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 7091 { 7092 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 7093 7094 free_percpu(phba->sli4_hba.eq_info); 7095 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7096 free_percpu(phba->sli4_hba.c_stat); 7097 #endif 7098 kfree(phba->sli4_hba.idle_stat); 7099 7100 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 7101 kfree(phba->sli4_hba.cpu_map); 7102 phba->sli4_hba.num_possible_cpu = 0; 7103 phba->sli4_hba.num_present_cpu = 0; 7104 phba->sli4_hba.curr_disp_cpu = 0; 7105 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 7106 7107 /* Free memory allocated for fast-path work queue handles */ 7108 kfree(phba->sli4_hba.hba_eq_hdl); 7109 7110 /* Free the allocated rpi headers. */ 7111 lpfc_sli4_remove_rpi_hdrs(phba); 7112 lpfc_sli4_remove_rpis(phba); 7113 7114 /* Free eligible FCF index bmask */ 7115 kfree(phba->fcf.fcf_rr_bmask); 7116 7117 /* Free the ELS sgl list */ 7118 lpfc_free_active_sgl(phba); 7119 lpfc_free_els_sgl_list(phba); 7120 lpfc_free_nvmet_sgl_list(phba); 7121 7122 /* Free the completion queue EQ event pool */ 7123 lpfc_sli4_cq_event_release_all(phba); 7124 lpfc_sli4_cq_event_pool_destroy(phba); 7125 7126 /* Release resource identifiers. */ 7127 lpfc_sli4_dealloc_resource_identifiers(phba); 7128 7129 /* Free the bsmbx region. */ 7130 lpfc_destroy_bootstrap_mbox(phba); 7131 7132 /* Free the SLI Layer memory with SLI4 HBAs */ 7133 lpfc_mem_free_all(phba); 7134 7135 /* Free the current connect table */ 7136 list_for_each_entry_safe(conn_entry, next_conn_entry, 7137 &phba->fcf_conn_rec_list, list) { 7138 list_del_init(&conn_entry->list); 7139 kfree(conn_entry); 7140 } 7141 7142 return; 7143 } 7144 7145 /** 7146 * lpfc_init_api_table_setup - Set up init api function jump table 7147 * @phba: The hba struct for which this call is being executed. 7148 * @dev_grp: The HBA PCI-Device group number. 7149 * 7150 * This routine sets up the device INIT interface API function jump table 7151 * in @phba struct. 7152 * 7153 * Returns: 0 - success, -ENODEV - failure. 7154 **/ 7155 int 7156 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7157 { 7158 phba->lpfc_hba_init_link = lpfc_hba_init_link; 7159 phba->lpfc_hba_down_link = lpfc_hba_down_link; 7160 phba->lpfc_selective_reset = lpfc_selective_reset; 7161 switch (dev_grp) { 7162 case LPFC_PCI_DEV_LP: 7163 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 7164 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 7165 phba->lpfc_stop_port = lpfc_stop_port_s3; 7166 break; 7167 case LPFC_PCI_DEV_OC: 7168 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 7169 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 7170 phba->lpfc_stop_port = lpfc_stop_port_s4; 7171 break; 7172 default: 7173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7174 "1431 Invalid HBA PCI-device group: 0x%x\n", 7175 dev_grp); 7176 return -ENODEV; 7177 } 7178 return 0; 7179 } 7180 7181 /** 7182 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 7183 * @phba: pointer to lpfc hba data structure. 7184 * 7185 * This routine is invoked to set up the driver internal resources after the 7186 * device specific resource setup to support the HBA device it attached to. 7187 * 7188 * Return codes 7189 * 0 - successful 7190 * other values - error 7191 **/ 7192 static int 7193 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 7194 { 7195 int error; 7196 7197 /* Startup the kernel thread for this host adapter. */ 7198 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7199 "lpfc_worker_%d", phba->brd_no); 7200 if (IS_ERR(phba->worker_thread)) { 7201 error = PTR_ERR(phba->worker_thread); 7202 return error; 7203 } 7204 7205 return 0; 7206 } 7207 7208 /** 7209 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7210 * @phba: pointer to lpfc hba data structure. 7211 * 7212 * This routine is invoked to unset the driver internal resources set up after 7213 * the device specific resource setup for supporting the HBA device it 7214 * attached to. 7215 **/ 7216 static void 7217 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7218 { 7219 if (phba->wq) { 7220 flush_workqueue(phba->wq); 7221 destroy_workqueue(phba->wq); 7222 phba->wq = NULL; 7223 } 7224 7225 /* Stop kernel worker thread */ 7226 if (phba->worker_thread) 7227 kthread_stop(phba->worker_thread); 7228 } 7229 7230 /** 7231 * lpfc_free_iocb_list - Free iocb list. 7232 * @phba: pointer to lpfc hba data structure. 7233 * 7234 * This routine is invoked to free the driver's IOCB list and memory. 7235 **/ 7236 void 7237 lpfc_free_iocb_list(struct lpfc_hba *phba) 7238 { 7239 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7240 7241 spin_lock_irq(&phba->hbalock); 7242 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7243 &phba->lpfc_iocb_list, list) { 7244 list_del(&iocbq_entry->list); 7245 kfree(iocbq_entry); 7246 phba->total_iocbq_bufs--; 7247 } 7248 spin_unlock_irq(&phba->hbalock); 7249 7250 return; 7251 } 7252 7253 /** 7254 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7255 * @phba: pointer to lpfc hba data structure. 7256 * @iocb_count: number of requested iocbs 7257 * 7258 * This routine is invoked to allocate and initizlize the driver's IOCB 7259 * list and set up the IOCB tag array accordingly. 7260 * 7261 * Return codes 7262 * 0 - successful 7263 * other values - error 7264 **/ 7265 int 7266 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7267 { 7268 struct lpfc_iocbq *iocbq_entry = NULL; 7269 uint16_t iotag; 7270 int i; 7271 7272 /* Initialize and populate the iocb list per host. */ 7273 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7274 for (i = 0; i < iocb_count; i++) { 7275 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7276 if (iocbq_entry == NULL) { 7277 printk(KERN_ERR "%s: only allocated %d iocbs of " 7278 "expected %d count. Unloading driver.\n", 7279 __func__, i, iocb_count); 7280 goto out_free_iocbq; 7281 } 7282 7283 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7284 if (iotag == 0) { 7285 kfree(iocbq_entry); 7286 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7287 "Unloading driver.\n", __func__); 7288 goto out_free_iocbq; 7289 } 7290 iocbq_entry->sli4_lxritag = NO_XRI; 7291 iocbq_entry->sli4_xritag = NO_XRI; 7292 7293 spin_lock_irq(&phba->hbalock); 7294 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7295 phba->total_iocbq_bufs++; 7296 spin_unlock_irq(&phba->hbalock); 7297 } 7298 7299 return 0; 7300 7301 out_free_iocbq: 7302 lpfc_free_iocb_list(phba); 7303 7304 return -ENOMEM; 7305 } 7306 7307 /** 7308 * lpfc_free_sgl_list - Free a given sgl list. 7309 * @phba: pointer to lpfc hba data structure. 7310 * @sglq_list: pointer to the head of sgl list. 7311 * 7312 * This routine is invoked to free a give sgl list and memory. 7313 **/ 7314 void 7315 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7316 { 7317 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7318 7319 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7320 list_del(&sglq_entry->list); 7321 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7322 kfree(sglq_entry); 7323 } 7324 } 7325 7326 /** 7327 * lpfc_free_els_sgl_list - Free els sgl list. 7328 * @phba: pointer to lpfc hba data structure. 7329 * 7330 * This routine is invoked to free the driver's els sgl list and memory. 7331 **/ 7332 static void 7333 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7334 { 7335 LIST_HEAD(sglq_list); 7336 7337 /* Retrieve all els sgls from driver list */ 7338 spin_lock_irq(&phba->hbalock); 7339 spin_lock(&phba->sli4_hba.sgl_list_lock); 7340 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7341 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7342 spin_unlock_irq(&phba->hbalock); 7343 7344 /* Now free the sgl list */ 7345 lpfc_free_sgl_list(phba, &sglq_list); 7346 } 7347 7348 /** 7349 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7350 * @phba: pointer to lpfc hba data structure. 7351 * 7352 * This routine is invoked to free the driver's nvmet sgl list and memory. 7353 **/ 7354 static void 7355 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7356 { 7357 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7358 LIST_HEAD(sglq_list); 7359 7360 /* Retrieve all nvmet sgls from driver list */ 7361 spin_lock_irq(&phba->hbalock); 7362 spin_lock(&phba->sli4_hba.sgl_list_lock); 7363 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7364 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7365 spin_unlock_irq(&phba->hbalock); 7366 7367 /* Now free the sgl list */ 7368 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7369 list_del(&sglq_entry->list); 7370 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7371 kfree(sglq_entry); 7372 } 7373 7374 /* Update the nvmet_xri_cnt to reflect no current sgls. 7375 * The next initialization cycle sets the count and allocates 7376 * the sgls over again. 7377 */ 7378 phba->sli4_hba.nvmet_xri_cnt = 0; 7379 } 7380 7381 /** 7382 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7383 * @phba: pointer to lpfc hba data structure. 7384 * 7385 * This routine is invoked to allocate the driver's active sgl memory. 7386 * This array will hold the sglq_entry's for active IOs. 7387 **/ 7388 static int 7389 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7390 { 7391 int size; 7392 size = sizeof(struct lpfc_sglq *); 7393 size *= phba->sli4_hba.max_cfg_param.max_xri; 7394 7395 phba->sli4_hba.lpfc_sglq_active_list = 7396 kzalloc(size, GFP_KERNEL); 7397 if (!phba->sli4_hba.lpfc_sglq_active_list) 7398 return -ENOMEM; 7399 return 0; 7400 } 7401 7402 /** 7403 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7404 * @phba: pointer to lpfc hba data structure. 7405 * 7406 * This routine is invoked to walk through the array of active sglq entries 7407 * and free all of the resources. 7408 * This is just a place holder for now. 7409 **/ 7410 static void 7411 lpfc_free_active_sgl(struct lpfc_hba *phba) 7412 { 7413 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7414 } 7415 7416 /** 7417 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7418 * @phba: pointer to lpfc hba data structure. 7419 * 7420 * This routine is invoked to allocate and initizlize the driver's sgl 7421 * list and set up the sgl xritag tag array accordingly. 7422 * 7423 **/ 7424 static void 7425 lpfc_init_sgl_list(struct lpfc_hba *phba) 7426 { 7427 /* Initialize and populate the sglq list per host/VF. */ 7428 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7429 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7430 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7431 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7432 7433 /* els xri-sgl book keeping */ 7434 phba->sli4_hba.els_xri_cnt = 0; 7435 7436 /* nvme xri-buffer book keeping */ 7437 phba->sli4_hba.io_xri_cnt = 0; 7438 } 7439 7440 /** 7441 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7442 * @phba: pointer to lpfc hba data structure. 7443 * 7444 * This routine is invoked to post rpi header templates to the 7445 * port for those SLI4 ports that do not support extents. This routine 7446 * posts a PAGE_SIZE memory region to the port to hold up to 7447 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7448 * and should be called only when interrupts are disabled. 7449 * 7450 * Return codes 7451 * 0 - successful 7452 * -ERROR - otherwise. 7453 **/ 7454 int 7455 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7456 { 7457 int rc = 0; 7458 struct lpfc_rpi_hdr *rpi_hdr; 7459 7460 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7461 if (!phba->sli4_hba.rpi_hdrs_in_use) 7462 return rc; 7463 if (phba->sli4_hba.extents_in_use) 7464 return -EIO; 7465 7466 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7467 if (!rpi_hdr) { 7468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7469 "0391 Error during rpi post operation\n"); 7470 lpfc_sli4_remove_rpis(phba); 7471 rc = -ENODEV; 7472 } 7473 7474 return rc; 7475 } 7476 7477 /** 7478 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7479 * @phba: pointer to lpfc hba data structure. 7480 * 7481 * This routine is invoked to allocate a single 4KB memory region to 7482 * support rpis and stores them in the phba. This single region 7483 * provides support for up to 64 rpis. The region is used globally 7484 * by the device. 7485 * 7486 * Returns: 7487 * A valid rpi hdr on success. 7488 * A NULL pointer on any failure. 7489 **/ 7490 struct lpfc_rpi_hdr * 7491 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7492 { 7493 uint16_t rpi_limit, curr_rpi_range; 7494 struct lpfc_dmabuf *dmabuf; 7495 struct lpfc_rpi_hdr *rpi_hdr; 7496 7497 /* 7498 * If the SLI4 port supports extents, posting the rpi header isn't 7499 * required. Set the expected maximum count and let the actual value 7500 * get set when extents are fully allocated. 7501 */ 7502 if (!phba->sli4_hba.rpi_hdrs_in_use) 7503 return NULL; 7504 if (phba->sli4_hba.extents_in_use) 7505 return NULL; 7506 7507 /* The limit on the logical index is just the max_rpi count. */ 7508 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7509 7510 spin_lock_irq(&phba->hbalock); 7511 /* 7512 * Establish the starting RPI in this header block. The starting 7513 * rpi is normalized to a zero base because the physical rpi is 7514 * port based. 7515 */ 7516 curr_rpi_range = phba->sli4_hba.next_rpi; 7517 spin_unlock_irq(&phba->hbalock); 7518 7519 /* Reached full RPI range */ 7520 if (curr_rpi_range == rpi_limit) 7521 return NULL; 7522 7523 /* 7524 * First allocate the protocol header region for the port. The 7525 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7526 */ 7527 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7528 if (!dmabuf) 7529 return NULL; 7530 7531 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7532 LPFC_HDR_TEMPLATE_SIZE, 7533 &dmabuf->phys, GFP_KERNEL); 7534 if (!dmabuf->virt) { 7535 rpi_hdr = NULL; 7536 goto err_free_dmabuf; 7537 } 7538 7539 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7540 rpi_hdr = NULL; 7541 goto err_free_coherent; 7542 } 7543 7544 /* Save the rpi header data for cleanup later. */ 7545 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7546 if (!rpi_hdr) 7547 goto err_free_coherent; 7548 7549 rpi_hdr->dmabuf = dmabuf; 7550 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7551 rpi_hdr->page_count = 1; 7552 spin_lock_irq(&phba->hbalock); 7553 7554 /* The rpi_hdr stores the logical index only. */ 7555 rpi_hdr->start_rpi = curr_rpi_range; 7556 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7557 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7558 7559 spin_unlock_irq(&phba->hbalock); 7560 return rpi_hdr; 7561 7562 err_free_coherent: 7563 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7564 dmabuf->virt, dmabuf->phys); 7565 err_free_dmabuf: 7566 kfree(dmabuf); 7567 return NULL; 7568 } 7569 7570 /** 7571 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7572 * @phba: pointer to lpfc hba data structure. 7573 * 7574 * This routine is invoked to remove all memory resources allocated 7575 * to support rpis for SLI4 ports not supporting extents. This routine 7576 * presumes the caller has released all rpis consumed by fabric or port 7577 * logins and is prepared to have the header pages removed. 7578 **/ 7579 void 7580 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7581 { 7582 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7583 7584 if (!phba->sli4_hba.rpi_hdrs_in_use) 7585 goto exit; 7586 7587 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7588 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7589 list_del(&rpi_hdr->list); 7590 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7591 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7592 kfree(rpi_hdr->dmabuf); 7593 kfree(rpi_hdr); 7594 } 7595 exit: 7596 /* There are no rpis available to the port now. */ 7597 phba->sli4_hba.next_rpi = 0; 7598 } 7599 7600 /** 7601 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7602 * @pdev: pointer to pci device data structure. 7603 * 7604 * This routine is invoked to allocate the driver hba data structure for an 7605 * HBA device. If the allocation is successful, the phba reference to the 7606 * PCI device data structure is set. 7607 * 7608 * Return codes 7609 * pointer to @phba - successful 7610 * NULL - error 7611 **/ 7612 static struct lpfc_hba * 7613 lpfc_hba_alloc(struct pci_dev *pdev) 7614 { 7615 struct lpfc_hba *phba; 7616 7617 /* Allocate memory for HBA structure */ 7618 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7619 if (!phba) { 7620 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7621 return NULL; 7622 } 7623 7624 /* Set reference to PCI device in HBA structure */ 7625 phba->pcidev = pdev; 7626 7627 /* Assign an unused board number */ 7628 phba->brd_no = lpfc_get_instance(); 7629 if (phba->brd_no < 0) { 7630 kfree(phba); 7631 return NULL; 7632 } 7633 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7634 7635 spin_lock_init(&phba->ct_ev_lock); 7636 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7637 7638 return phba; 7639 } 7640 7641 /** 7642 * lpfc_hba_free - Free driver hba data structure with a device. 7643 * @phba: pointer to lpfc hba data structure. 7644 * 7645 * This routine is invoked to free the driver hba data structure with an 7646 * HBA device. 7647 **/ 7648 static void 7649 lpfc_hba_free(struct lpfc_hba *phba) 7650 { 7651 if (phba->sli_rev == LPFC_SLI_REV4) 7652 kfree(phba->sli4_hba.hdwq); 7653 7654 /* Release the driver assigned board number */ 7655 idr_remove(&lpfc_hba_index, phba->brd_no); 7656 7657 /* Free memory allocated with sli3 rings */ 7658 kfree(phba->sli.sli3_ring); 7659 phba->sli.sli3_ring = NULL; 7660 7661 kfree(phba); 7662 return; 7663 } 7664 7665 /** 7666 * lpfc_create_shost - Create hba physical port with associated scsi host. 7667 * @phba: pointer to lpfc hba data structure. 7668 * 7669 * This routine is invoked to create HBA physical port and associate a SCSI 7670 * host with it. 7671 * 7672 * Return codes 7673 * 0 - successful 7674 * other values - error 7675 **/ 7676 static int 7677 lpfc_create_shost(struct lpfc_hba *phba) 7678 { 7679 struct lpfc_vport *vport; 7680 struct Scsi_Host *shost; 7681 7682 /* Initialize HBA FC structure */ 7683 phba->fc_edtov = FF_DEF_EDTOV; 7684 phba->fc_ratov = FF_DEF_RATOV; 7685 phba->fc_altov = FF_DEF_ALTOV; 7686 phba->fc_arbtov = FF_DEF_ARBTOV; 7687 7688 atomic_set(&phba->sdev_cnt, 0); 7689 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7690 if (!vport) 7691 return -ENODEV; 7692 7693 shost = lpfc_shost_from_vport(vport); 7694 phba->pport = vport; 7695 7696 if (phba->nvmet_support) { 7697 /* Only 1 vport (pport) will support NVME target */ 7698 phba->targetport = NULL; 7699 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 7701 "6076 NVME Target Found\n"); 7702 } 7703 7704 lpfc_debugfs_initialize(vport); 7705 /* Put reference to SCSI host to driver's device private data */ 7706 pci_set_drvdata(phba->pcidev, shost); 7707 7708 /* 7709 * At this point we are fully registered with PSA. In addition, 7710 * any initial discovery should be completed. 7711 */ 7712 vport->load_flag |= FC_ALLOW_FDMI; 7713 if (phba->cfg_enable_SmartSAN || 7714 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7715 7716 /* Setup appropriate attribute masks */ 7717 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7718 if (phba->cfg_enable_SmartSAN) 7719 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7720 else 7721 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7722 } 7723 return 0; 7724 } 7725 7726 /** 7727 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7728 * @phba: pointer to lpfc hba data structure. 7729 * 7730 * This routine is invoked to destroy HBA physical port and the associated 7731 * SCSI host. 7732 **/ 7733 static void 7734 lpfc_destroy_shost(struct lpfc_hba *phba) 7735 { 7736 struct lpfc_vport *vport = phba->pport; 7737 7738 /* Destroy physical port that associated with the SCSI host */ 7739 destroy_port(vport); 7740 7741 return; 7742 } 7743 7744 /** 7745 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7746 * @phba: pointer to lpfc hba data structure. 7747 * @shost: the shost to be used to detect Block guard settings. 7748 * 7749 * This routine sets up the local Block guard protocol settings for @shost. 7750 * This routine also allocates memory for debugging bg buffers. 7751 **/ 7752 static void 7753 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7754 { 7755 uint32_t old_mask; 7756 uint32_t old_guard; 7757 7758 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7759 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7760 "1478 Registering BlockGuard with the " 7761 "SCSI layer\n"); 7762 7763 old_mask = phba->cfg_prot_mask; 7764 old_guard = phba->cfg_prot_guard; 7765 7766 /* Only allow supported values */ 7767 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7768 SHOST_DIX_TYPE0_PROTECTION | 7769 SHOST_DIX_TYPE1_PROTECTION); 7770 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7771 SHOST_DIX_GUARD_CRC); 7772 7773 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7774 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7775 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7776 7777 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7778 if ((old_mask != phba->cfg_prot_mask) || 7779 (old_guard != phba->cfg_prot_guard)) 7780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7781 "1475 Registering BlockGuard with the " 7782 "SCSI layer: mask %d guard %d\n", 7783 phba->cfg_prot_mask, 7784 phba->cfg_prot_guard); 7785 7786 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7787 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7788 } else 7789 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7790 "1479 Not Registering BlockGuard with the SCSI " 7791 "layer, Bad protection parameters: %d %d\n", 7792 old_mask, old_guard); 7793 } 7794 } 7795 7796 /** 7797 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7798 * @phba: pointer to lpfc hba data structure. 7799 * 7800 * This routine is invoked to perform all the necessary post initialization 7801 * setup for the device. 7802 **/ 7803 static void 7804 lpfc_post_init_setup(struct lpfc_hba *phba) 7805 { 7806 struct Scsi_Host *shost; 7807 struct lpfc_adapter_event_header adapter_event; 7808 7809 /* Get the default values for Model Name and Description */ 7810 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7811 7812 /* 7813 * hba setup may have changed the hba_queue_depth so we need to 7814 * adjust the value of can_queue. 7815 */ 7816 shost = pci_get_drvdata(phba->pcidev); 7817 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7818 7819 lpfc_host_attrib_init(shost); 7820 7821 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7822 spin_lock_irq(shost->host_lock); 7823 lpfc_poll_start_timer(phba); 7824 spin_unlock_irq(shost->host_lock); 7825 } 7826 7827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7828 "0428 Perform SCSI scan\n"); 7829 /* Send board arrival event to upper layer */ 7830 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7831 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7832 fc_host_post_vendor_event(shost, fc_get_event_number(), 7833 sizeof(adapter_event), 7834 (char *) &adapter_event, 7835 LPFC_NL_VENDOR_ID); 7836 return; 7837 } 7838 7839 /** 7840 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7841 * @phba: pointer to lpfc hba data structure. 7842 * 7843 * This routine is invoked to set up the PCI device memory space for device 7844 * with SLI-3 interface spec. 7845 * 7846 * Return codes 7847 * 0 - successful 7848 * other values - error 7849 **/ 7850 static int 7851 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7852 { 7853 struct pci_dev *pdev = phba->pcidev; 7854 unsigned long bar0map_len, bar2map_len; 7855 int i, hbq_count; 7856 void *ptr; 7857 int error; 7858 7859 if (!pdev) 7860 return -ENODEV; 7861 7862 /* Set the device DMA mask size */ 7863 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7864 if (error) 7865 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7866 if (error) 7867 return error; 7868 error = -ENODEV; 7869 7870 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7871 * required by each mapping. 7872 */ 7873 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7874 bar0map_len = pci_resource_len(pdev, 0); 7875 7876 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7877 bar2map_len = pci_resource_len(pdev, 2); 7878 7879 /* Map HBA SLIM to a kernel virtual address. */ 7880 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7881 if (!phba->slim_memmap_p) { 7882 dev_printk(KERN_ERR, &pdev->dev, 7883 "ioremap failed for SLIM memory.\n"); 7884 goto out; 7885 } 7886 7887 /* Map HBA Control Registers to a kernel virtual address. */ 7888 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7889 if (!phba->ctrl_regs_memmap_p) { 7890 dev_printk(KERN_ERR, &pdev->dev, 7891 "ioremap failed for HBA control registers.\n"); 7892 goto out_iounmap_slim; 7893 } 7894 7895 /* Allocate memory for SLI-2 structures */ 7896 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7897 &phba->slim2p.phys, GFP_KERNEL); 7898 if (!phba->slim2p.virt) 7899 goto out_iounmap; 7900 7901 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7902 phba->mbox_ext = (phba->slim2p.virt + 7903 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7904 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7905 phba->IOCBs = (phba->slim2p.virt + 7906 offsetof(struct lpfc_sli2_slim, IOCBs)); 7907 7908 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7909 lpfc_sli_hbq_size(), 7910 &phba->hbqslimp.phys, 7911 GFP_KERNEL); 7912 if (!phba->hbqslimp.virt) 7913 goto out_free_slim; 7914 7915 hbq_count = lpfc_sli_hbq_count(); 7916 ptr = phba->hbqslimp.virt; 7917 for (i = 0; i < hbq_count; ++i) { 7918 phba->hbqs[i].hbq_virt = ptr; 7919 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7920 ptr += (lpfc_hbq_defs[i]->entry_count * 7921 sizeof(struct lpfc_hbq_entry)); 7922 } 7923 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7924 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7925 7926 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7927 7928 phba->MBslimaddr = phba->slim_memmap_p; 7929 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7930 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7931 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7932 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7933 7934 return 0; 7935 7936 out_free_slim: 7937 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7938 phba->slim2p.virt, phba->slim2p.phys); 7939 out_iounmap: 7940 iounmap(phba->ctrl_regs_memmap_p); 7941 out_iounmap_slim: 7942 iounmap(phba->slim_memmap_p); 7943 out: 7944 return error; 7945 } 7946 7947 /** 7948 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7949 * @phba: pointer to lpfc hba data structure. 7950 * 7951 * This routine is invoked to unset the PCI device memory space for device 7952 * with SLI-3 interface spec. 7953 **/ 7954 static void 7955 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7956 { 7957 struct pci_dev *pdev; 7958 7959 /* Obtain PCI device reference */ 7960 if (!phba->pcidev) 7961 return; 7962 else 7963 pdev = phba->pcidev; 7964 7965 /* Free coherent DMA memory allocated */ 7966 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7967 phba->hbqslimp.virt, phba->hbqslimp.phys); 7968 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7969 phba->slim2p.virt, phba->slim2p.phys); 7970 7971 /* I/O memory unmap */ 7972 iounmap(phba->ctrl_regs_memmap_p); 7973 iounmap(phba->slim_memmap_p); 7974 7975 return; 7976 } 7977 7978 /** 7979 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7980 * @phba: pointer to lpfc hba data structure. 7981 * 7982 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7983 * done and check status. 7984 * 7985 * Return 0 if successful, otherwise -ENODEV. 7986 **/ 7987 int 7988 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7989 { 7990 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7991 struct lpfc_register reg_data; 7992 int i, port_error = 0; 7993 uint32_t if_type; 7994 7995 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7996 memset(®_data, 0, sizeof(reg_data)); 7997 if (!phba->sli4_hba.PSMPHRregaddr) 7998 return -ENODEV; 7999 8000 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 8001 for (i = 0; i < 3000; i++) { 8002 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 8003 &portsmphr_reg.word0) || 8004 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 8005 /* Port has a fatal POST error, break out */ 8006 port_error = -ENODEV; 8007 break; 8008 } 8009 if (LPFC_POST_STAGE_PORT_READY == 8010 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 8011 break; 8012 msleep(10); 8013 } 8014 8015 /* 8016 * If there was a port error during POST, then don't proceed with 8017 * other register reads as the data may not be valid. Just exit. 8018 */ 8019 if (port_error) { 8020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8021 "1408 Port Failed POST - portsmphr=0x%x, " 8022 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 8023 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 8024 portsmphr_reg.word0, 8025 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 8026 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 8027 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 8028 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 8029 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 8030 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 8031 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 8032 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 8033 } else { 8034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8035 "2534 Device Info: SLIFamily=0x%x, " 8036 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 8037 "SLIHint_2=0x%x, FT=0x%x\n", 8038 bf_get(lpfc_sli_intf_sli_family, 8039 &phba->sli4_hba.sli_intf), 8040 bf_get(lpfc_sli_intf_slirev, 8041 &phba->sli4_hba.sli_intf), 8042 bf_get(lpfc_sli_intf_if_type, 8043 &phba->sli4_hba.sli_intf), 8044 bf_get(lpfc_sli_intf_sli_hint1, 8045 &phba->sli4_hba.sli_intf), 8046 bf_get(lpfc_sli_intf_sli_hint2, 8047 &phba->sli4_hba.sli_intf), 8048 bf_get(lpfc_sli_intf_func_type, 8049 &phba->sli4_hba.sli_intf)); 8050 /* 8051 * Check for other Port errors during the initialization 8052 * process. Fail the load if the port did not come up 8053 * correctly. 8054 */ 8055 if_type = bf_get(lpfc_sli_intf_if_type, 8056 &phba->sli4_hba.sli_intf); 8057 switch (if_type) { 8058 case LPFC_SLI_INTF_IF_TYPE_0: 8059 phba->sli4_hba.ue_mask_lo = 8060 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 8061 phba->sli4_hba.ue_mask_hi = 8062 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 8063 uerrlo_reg.word0 = 8064 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 8065 uerrhi_reg.word0 = 8066 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 8067 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 8068 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 8069 lpfc_printf_log(phba, KERN_ERR, 8070 LOG_TRACE_EVENT, 8071 "1422 Unrecoverable Error " 8072 "Detected during POST " 8073 "uerr_lo_reg=0x%x, " 8074 "uerr_hi_reg=0x%x, " 8075 "ue_mask_lo_reg=0x%x, " 8076 "ue_mask_hi_reg=0x%x\n", 8077 uerrlo_reg.word0, 8078 uerrhi_reg.word0, 8079 phba->sli4_hba.ue_mask_lo, 8080 phba->sli4_hba.ue_mask_hi); 8081 port_error = -ENODEV; 8082 } 8083 break; 8084 case LPFC_SLI_INTF_IF_TYPE_2: 8085 case LPFC_SLI_INTF_IF_TYPE_6: 8086 /* Final checks. The port status should be clean. */ 8087 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 8088 ®_data.word0) || 8089 (bf_get(lpfc_sliport_status_err, ®_data) && 8090 !bf_get(lpfc_sliport_status_rn, ®_data))) { 8091 phba->work_status[0] = 8092 readl(phba->sli4_hba.u.if_type2. 8093 ERR1regaddr); 8094 phba->work_status[1] = 8095 readl(phba->sli4_hba.u.if_type2. 8096 ERR2regaddr); 8097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8098 "2888 Unrecoverable port error " 8099 "following POST: port status reg " 8100 "0x%x, port_smphr reg 0x%x, " 8101 "error 1=0x%x, error 2=0x%x\n", 8102 reg_data.word0, 8103 portsmphr_reg.word0, 8104 phba->work_status[0], 8105 phba->work_status[1]); 8106 port_error = -ENODEV; 8107 } 8108 break; 8109 case LPFC_SLI_INTF_IF_TYPE_1: 8110 default: 8111 break; 8112 } 8113 } 8114 return port_error; 8115 } 8116 8117 /** 8118 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 8119 * @phba: pointer to lpfc hba data structure. 8120 * @if_type: The SLI4 interface type getting configured. 8121 * 8122 * This routine is invoked to set up SLI4 BAR0 PCI config space register 8123 * memory map. 8124 **/ 8125 static void 8126 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8127 { 8128 switch (if_type) { 8129 case LPFC_SLI_INTF_IF_TYPE_0: 8130 phba->sli4_hba.u.if_type0.UERRLOregaddr = 8131 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 8132 phba->sli4_hba.u.if_type0.UERRHIregaddr = 8133 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 8134 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 8135 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 8136 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 8137 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 8138 phba->sli4_hba.SLIINTFregaddr = 8139 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8140 break; 8141 case LPFC_SLI_INTF_IF_TYPE_2: 8142 phba->sli4_hba.u.if_type2.EQDregaddr = 8143 phba->sli4_hba.conf_regs_memmap_p + 8144 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8145 phba->sli4_hba.u.if_type2.ERR1regaddr = 8146 phba->sli4_hba.conf_regs_memmap_p + 8147 LPFC_CTL_PORT_ER1_OFFSET; 8148 phba->sli4_hba.u.if_type2.ERR2regaddr = 8149 phba->sli4_hba.conf_regs_memmap_p + 8150 LPFC_CTL_PORT_ER2_OFFSET; 8151 phba->sli4_hba.u.if_type2.CTRLregaddr = 8152 phba->sli4_hba.conf_regs_memmap_p + 8153 LPFC_CTL_PORT_CTL_OFFSET; 8154 phba->sli4_hba.u.if_type2.STATUSregaddr = 8155 phba->sli4_hba.conf_regs_memmap_p + 8156 LPFC_CTL_PORT_STA_OFFSET; 8157 phba->sli4_hba.SLIINTFregaddr = 8158 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8159 phba->sli4_hba.PSMPHRregaddr = 8160 phba->sli4_hba.conf_regs_memmap_p + 8161 LPFC_CTL_PORT_SEM_OFFSET; 8162 phba->sli4_hba.RQDBregaddr = 8163 phba->sli4_hba.conf_regs_memmap_p + 8164 LPFC_ULP0_RQ_DOORBELL; 8165 phba->sli4_hba.WQDBregaddr = 8166 phba->sli4_hba.conf_regs_memmap_p + 8167 LPFC_ULP0_WQ_DOORBELL; 8168 phba->sli4_hba.CQDBregaddr = 8169 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8170 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8171 phba->sli4_hba.MQDBregaddr = 8172 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8173 phba->sli4_hba.BMBXregaddr = 8174 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8175 break; 8176 case LPFC_SLI_INTF_IF_TYPE_6: 8177 phba->sli4_hba.u.if_type2.EQDregaddr = 8178 phba->sli4_hba.conf_regs_memmap_p + 8179 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8180 phba->sli4_hba.u.if_type2.ERR1regaddr = 8181 phba->sli4_hba.conf_regs_memmap_p + 8182 LPFC_CTL_PORT_ER1_OFFSET; 8183 phba->sli4_hba.u.if_type2.ERR2regaddr = 8184 phba->sli4_hba.conf_regs_memmap_p + 8185 LPFC_CTL_PORT_ER2_OFFSET; 8186 phba->sli4_hba.u.if_type2.CTRLregaddr = 8187 phba->sli4_hba.conf_regs_memmap_p + 8188 LPFC_CTL_PORT_CTL_OFFSET; 8189 phba->sli4_hba.u.if_type2.STATUSregaddr = 8190 phba->sli4_hba.conf_regs_memmap_p + 8191 LPFC_CTL_PORT_STA_OFFSET; 8192 phba->sli4_hba.PSMPHRregaddr = 8193 phba->sli4_hba.conf_regs_memmap_p + 8194 LPFC_CTL_PORT_SEM_OFFSET; 8195 phba->sli4_hba.BMBXregaddr = 8196 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8197 break; 8198 case LPFC_SLI_INTF_IF_TYPE_1: 8199 default: 8200 dev_printk(KERN_ERR, &phba->pcidev->dev, 8201 "FATAL - unsupported SLI4 interface type - %d\n", 8202 if_type); 8203 break; 8204 } 8205 } 8206 8207 /** 8208 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8209 * @phba: pointer to lpfc hba data structure. 8210 * @if_type: sli if type to operate on. 8211 * 8212 * This routine is invoked to set up SLI4 BAR1 register memory map. 8213 **/ 8214 static void 8215 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8216 { 8217 switch (if_type) { 8218 case LPFC_SLI_INTF_IF_TYPE_0: 8219 phba->sli4_hba.PSMPHRregaddr = 8220 phba->sli4_hba.ctrl_regs_memmap_p + 8221 LPFC_SLIPORT_IF0_SMPHR; 8222 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8223 LPFC_HST_ISR0; 8224 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8225 LPFC_HST_IMR0; 8226 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8227 LPFC_HST_ISCR0; 8228 break; 8229 case LPFC_SLI_INTF_IF_TYPE_6: 8230 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8231 LPFC_IF6_RQ_DOORBELL; 8232 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8233 LPFC_IF6_WQ_DOORBELL; 8234 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8235 LPFC_IF6_CQ_DOORBELL; 8236 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8237 LPFC_IF6_EQ_DOORBELL; 8238 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8239 LPFC_IF6_MQ_DOORBELL; 8240 break; 8241 case LPFC_SLI_INTF_IF_TYPE_2: 8242 case LPFC_SLI_INTF_IF_TYPE_1: 8243 default: 8244 dev_err(&phba->pcidev->dev, 8245 "FATAL - unsupported SLI4 interface type - %d\n", 8246 if_type); 8247 break; 8248 } 8249 } 8250 8251 /** 8252 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8253 * @phba: pointer to lpfc hba data structure. 8254 * @vf: virtual function number 8255 * 8256 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8257 * based on the given viftual function number, @vf. 8258 * 8259 * Return 0 if successful, otherwise -ENODEV. 8260 **/ 8261 static int 8262 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8263 { 8264 if (vf > LPFC_VIR_FUNC_MAX) 8265 return -ENODEV; 8266 8267 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8268 vf * LPFC_VFR_PAGE_SIZE + 8269 LPFC_ULP0_RQ_DOORBELL); 8270 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8271 vf * LPFC_VFR_PAGE_SIZE + 8272 LPFC_ULP0_WQ_DOORBELL); 8273 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8274 vf * LPFC_VFR_PAGE_SIZE + 8275 LPFC_EQCQ_DOORBELL); 8276 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8277 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8278 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8279 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8280 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8281 return 0; 8282 } 8283 8284 /** 8285 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8286 * @phba: pointer to lpfc hba data structure. 8287 * 8288 * This routine is invoked to create the bootstrap mailbox 8289 * region consistent with the SLI-4 interface spec. This 8290 * routine allocates all memory necessary to communicate 8291 * mailbox commands to the port and sets up all alignment 8292 * needs. No locks are expected to be held when calling 8293 * this routine. 8294 * 8295 * Return codes 8296 * 0 - successful 8297 * -ENOMEM - could not allocated memory. 8298 **/ 8299 static int 8300 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8301 { 8302 uint32_t bmbx_size; 8303 struct lpfc_dmabuf *dmabuf; 8304 struct dma_address *dma_address; 8305 uint32_t pa_addr; 8306 uint64_t phys_addr; 8307 8308 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8309 if (!dmabuf) 8310 return -ENOMEM; 8311 8312 /* 8313 * The bootstrap mailbox region is comprised of 2 parts 8314 * plus an alignment restriction of 16 bytes. 8315 */ 8316 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8317 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8318 &dmabuf->phys, GFP_KERNEL); 8319 if (!dmabuf->virt) { 8320 kfree(dmabuf); 8321 return -ENOMEM; 8322 } 8323 8324 /* 8325 * Initialize the bootstrap mailbox pointers now so that the register 8326 * operations are simple later. The mailbox dma address is required 8327 * to be 16-byte aligned. Also align the virtual memory as each 8328 * maibox is copied into the bmbx mailbox region before issuing the 8329 * command to the port. 8330 */ 8331 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8332 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8333 8334 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8335 LPFC_ALIGN_16_BYTE); 8336 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8337 LPFC_ALIGN_16_BYTE); 8338 8339 /* 8340 * Set the high and low physical addresses now. The SLI4 alignment 8341 * requirement is 16 bytes and the mailbox is posted to the port 8342 * as two 30-bit addresses. The other data is a bit marking whether 8343 * the 30-bit address is the high or low address. 8344 * Upcast bmbx aphys to 64bits so shift instruction compiles 8345 * clean on 32 bit machines. 8346 */ 8347 dma_address = &phba->sli4_hba.bmbx.dma_address; 8348 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8349 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8350 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8351 LPFC_BMBX_BIT1_ADDR_HI); 8352 8353 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8354 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8355 LPFC_BMBX_BIT1_ADDR_LO); 8356 return 0; 8357 } 8358 8359 /** 8360 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8361 * @phba: pointer to lpfc hba data structure. 8362 * 8363 * This routine is invoked to teardown the bootstrap mailbox 8364 * region and release all host resources. This routine requires 8365 * the caller to ensure all mailbox commands recovered, no 8366 * additional mailbox comands are sent, and interrupts are disabled 8367 * before calling this routine. 8368 * 8369 **/ 8370 static void 8371 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8372 { 8373 dma_free_coherent(&phba->pcidev->dev, 8374 phba->sli4_hba.bmbx.bmbx_size, 8375 phba->sli4_hba.bmbx.dmabuf->virt, 8376 phba->sli4_hba.bmbx.dmabuf->phys); 8377 8378 kfree(phba->sli4_hba.bmbx.dmabuf); 8379 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8380 } 8381 8382 static const char * const lpfc_topo_to_str[] = { 8383 "Loop then P2P", 8384 "Loopback", 8385 "P2P Only", 8386 "Unsupported", 8387 "Loop Only", 8388 "Unsupported", 8389 "P2P then Loop", 8390 }; 8391 8392 #define LINK_FLAGS_DEF 0x0 8393 #define LINK_FLAGS_P2P 0x1 8394 #define LINK_FLAGS_LOOP 0x2 8395 /** 8396 * lpfc_map_topology - Map the topology read from READ_CONFIG 8397 * @phba: pointer to lpfc hba data structure. 8398 * @rd_config: pointer to read config data 8399 * 8400 * This routine is invoked to map the topology values as read 8401 * from the read config mailbox command. If the persistent 8402 * topology feature is supported, the firmware will provide the 8403 * saved topology information to be used in INIT_LINK 8404 **/ 8405 static void 8406 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 8407 { 8408 u8 ptv, tf, pt; 8409 8410 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 8411 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 8412 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 8413 8414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8415 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 8416 ptv, tf, pt); 8417 if (!ptv) { 8418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8419 "2019 FW does not support persistent topology " 8420 "Using driver parameter defined value [%s]", 8421 lpfc_topo_to_str[phba->cfg_topology]); 8422 return; 8423 } 8424 /* FW supports persistent topology - override module parameter value */ 8425 phba->hba_flag |= HBA_PERSISTENT_TOPO; 8426 switch (phba->pcidev->device) { 8427 case PCI_DEVICE_ID_LANCER_G7_FC: 8428 case PCI_DEVICE_ID_LANCER_G6_FC: 8429 if (!tf) { 8430 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 8431 ? FLAGS_TOPOLOGY_MODE_LOOP 8432 : FLAGS_TOPOLOGY_MODE_PT_PT); 8433 } else { 8434 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 8435 } 8436 break; 8437 default: /* G5 */ 8438 if (tf) { 8439 /* If topology failover set - pt is '0' or '1' */ 8440 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 8441 FLAGS_TOPOLOGY_MODE_LOOP_PT); 8442 } else { 8443 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 8444 ? FLAGS_TOPOLOGY_MODE_PT_PT 8445 : FLAGS_TOPOLOGY_MODE_LOOP); 8446 } 8447 break; 8448 } 8449 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 8450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8451 "2020 Using persistent topology value [%s]", 8452 lpfc_topo_to_str[phba->cfg_topology]); 8453 } else { 8454 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8455 "2021 Invalid topology values from FW " 8456 "Using driver parameter defined value [%s]", 8457 lpfc_topo_to_str[phba->cfg_topology]); 8458 } 8459 } 8460 8461 /** 8462 * lpfc_sli4_read_config - Get the config parameters. 8463 * @phba: pointer to lpfc hba data structure. 8464 * 8465 * This routine is invoked to read the configuration parameters from the HBA. 8466 * The configuration parameters are used to set the base and maximum values 8467 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8468 * allocation for the port. 8469 * 8470 * Return codes 8471 * 0 - successful 8472 * -ENOMEM - No available memory 8473 * -EIO - The mailbox failed to complete successfully. 8474 **/ 8475 int 8476 lpfc_sli4_read_config(struct lpfc_hba *phba) 8477 { 8478 LPFC_MBOXQ_t *pmb; 8479 struct lpfc_mbx_read_config *rd_config; 8480 union lpfc_sli4_cfg_shdr *shdr; 8481 uint32_t shdr_status, shdr_add_status; 8482 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8483 struct lpfc_rsrc_desc_fcfcoe *desc; 8484 char *pdesc_0; 8485 uint16_t forced_link_speed; 8486 uint32_t if_type, qmin; 8487 int length, i, rc = 0, rc2; 8488 8489 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8490 if (!pmb) { 8491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8492 "2011 Unable to allocate memory for issuing " 8493 "SLI_CONFIG_SPECIAL mailbox command\n"); 8494 return -ENOMEM; 8495 } 8496 8497 lpfc_read_config(phba, pmb); 8498 8499 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8500 if (rc != MBX_SUCCESS) { 8501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8502 "2012 Mailbox failed , mbxCmd x%x " 8503 "READ_CONFIG, mbxStatus x%x\n", 8504 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8505 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8506 rc = -EIO; 8507 } else { 8508 rd_config = &pmb->u.mqe.un.rd_config; 8509 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8510 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8511 phba->sli4_hba.lnk_info.lnk_tp = 8512 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8513 phba->sli4_hba.lnk_info.lnk_no = 8514 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8515 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8516 "3081 lnk_type:%d, lnk_numb:%d\n", 8517 phba->sli4_hba.lnk_info.lnk_tp, 8518 phba->sli4_hba.lnk_info.lnk_no); 8519 } else 8520 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8521 "3082 Mailbox (x%x) returned ldv:x0\n", 8522 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8523 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8524 phba->bbcredit_support = 1; 8525 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8526 } 8527 8528 phba->sli4_hba.conf_trunk = 8529 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8530 phba->sli4_hba.extents_in_use = 8531 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8532 phba->sli4_hba.max_cfg_param.max_xri = 8533 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8534 /* Reduce resource usage in kdump environment */ 8535 if (is_kdump_kernel() && 8536 phba->sli4_hba.max_cfg_param.max_xri > 512) 8537 phba->sli4_hba.max_cfg_param.max_xri = 512; 8538 phba->sli4_hba.max_cfg_param.xri_base = 8539 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8540 phba->sli4_hba.max_cfg_param.max_vpi = 8541 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8542 /* Limit the max we support */ 8543 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8544 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8545 phba->sli4_hba.max_cfg_param.vpi_base = 8546 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8547 phba->sli4_hba.max_cfg_param.max_rpi = 8548 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8549 phba->sli4_hba.max_cfg_param.rpi_base = 8550 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8551 phba->sli4_hba.max_cfg_param.max_vfi = 8552 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8553 phba->sli4_hba.max_cfg_param.vfi_base = 8554 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8555 phba->sli4_hba.max_cfg_param.max_fcfi = 8556 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8557 phba->sli4_hba.max_cfg_param.max_eq = 8558 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8559 phba->sli4_hba.max_cfg_param.max_rq = 8560 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8561 phba->sli4_hba.max_cfg_param.max_wq = 8562 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8563 phba->sli4_hba.max_cfg_param.max_cq = 8564 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8565 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8566 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8567 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8568 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8569 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8570 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8571 phba->max_vports = phba->max_vpi; 8572 lpfc_map_topology(phba, rd_config); 8573 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8574 "2003 cfg params Extents? %d " 8575 "XRI(B:%d M:%d), " 8576 "VPI(B:%d M:%d) " 8577 "VFI(B:%d M:%d) " 8578 "RPI(B:%d M:%d) " 8579 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 8580 phba->sli4_hba.extents_in_use, 8581 phba->sli4_hba.max_cfg_param.xri_base, 8582 phba->sli4_hba.max_cfg_param.max_xri, 8583 phba->sli4_hba.max_cfg_param.vpi_base, 8584 phba->sli4_hba.max_cfg_param.max_vpi, 8585 phba->sli4_hba.max_cfg_param.vfi_base, 8586 phba->sli4_hba.max_cfg_param.max_vfi, 8587 phba->sli4_hba.max_cfg_param.rpi_base, 8588 phba->sli4_hba.max_cfg_param.max_rpi, 8589 phba->sli4_hba.max_cfg_param.max_fcfi, 8590 phba->sli4_hba.max_cfg_param.max_eq, 8591 phba->sli4_hba.max_cfg_param.max_cq, 8592 phba->sli4_hba.max_cfg_param.max_wq, 8593 phba->sli4_hba.max_cfg_param.max_rq, 8594 phba->lmt); 8595 8596 /* 8597 * Calculate queue resources based on how 8598 * many WQ/CQ/EQs are available. 8599 */ 8600 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8601 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8602 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8603 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8604 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8605 /* 8606 * Whats left after this can go toward NVME / FCP. 8607 * The minus 4 accounts for ELS, NVME LS, MBOX 8608 * plus one extra. When configured for 8609 * NVMET, FCP io channel WQs are not created. 8610 */ 8611 qmin -= 4; 8612 8613 /* Check to see if there is enough for NVME */ 8614 if ((phba->cfg_irq_chann > qmin) || 8615 (phba->cfg_hdw_queue > qmin)) { 8616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8617 "2005 Reducing Queues - " 8618 "FW resource limitation: " 8619 "WQ %d CQ %d EQ %d: min %d: " 8620 "IRQ %d HDWQ %d\n", 8621 phba->sli4_hba.max_cfg_param.max_wq, 8622 phba->sli4_hba.max_cfg_param.max_cq, 8623 phba->sli4_hba.max_cfg_param.max_eq, 8624 qmin, phba->cfg_irq_chann, 8625 phba->cfg_hdw_queue); 8626 8627 if (phba->cfg_irq_chann > qmin) 8628 phba->cfg_irq_chann = qmin; 8629 if (phba->cfg_hdw_queue > qmin) 8630 phba->cfg_hdw_queue = qmin; 8631 } 8632 } 8633 8634 if (rc) 8635 goto read_cfg_out; 8636 8637 /* Update link speed if forced link speed is supported */ 8638 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8639 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8640 forced_link_speed = 8641 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8642 if (forced_link_speed) { 8643 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8644 8645 switch (forced_link_speed) { 8646 case LINK_SPEED_1G: 8647 phba->cfg_link_speed = 8648 LPFC_USER_LINK_SPEED_1G; 8649 break; 8650 case LINK_SPEED_2G: 8651 phba->cfg_link_speed = 8652 LPFC_USER_LINK_SPEED_2G; 8653 break; 8654 case LINK_SPEED_4G: 8655 phba->cfg_link_speed = 8656 LPFC_USER_LINK_SPEED_4G; 8657 break; 8658 case LINK_SPEED_8G: 8659 phba->cfg_link_speed = 8660 LPFC_USER_LINK_SPEED_8G; 8661 break; 8662 case LINK_SPEED_10G: 8663 phba->cfg_link_speed = 8664 LPFC_USER_LINK_SPEED_10G; 8665 break; 8666 case LINK_SPEED_16G: 8667 phba->cfg_link_speed = 8668 LPFC_USER_LINK_SPEED_16G; 8669 break; 8670 case LINK_SPEED_32G: 8671 phba->cfg_link_speed = 8672 LPFC_USER_LINK_SPEED_32G; 8673 break; 8674 case LINK_SPEED_64G: 8675 phba->cfg_link_speed = 8676 LPFC_USER_LINK_SPEED_64G; 8677 break; 8678 case 0xffff: 8679 phba->cfg_link_speed = 8680 LPFC_USER_LINK_SPEED_AUTO; 8681 break; 8682 default: 8683 lpfc_printf_log(phba, KERN_ERR, 8684 LOG_TRACE_EVENT, 8685 "0047 Unrecognized link " 8686 "speed : %d\n", 8687 forced_link_speed); 8688 phba->cfg_link_speed = 8689 LPFC_USER_LINK_SPEED_AUTO; 8690 } 8691 } 8692 } 8693 8694 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8695 length = phba->sli4_hba.max_cfg_param.max_xri - 8696 lpfc_sli4_get_els_iocb_cnt(phba); 8697 if (phba->cfg_hba_queue_depth > length) { 8698 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8699 "3361 HBA queue depth changed from %d to %d\n", 8700 phba->cfg_hba_queue_depth, length); 8701 phba->cfg_hba_queue_depth = length; 8702 } 8703 8704 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8705 LPFC_SLI_INTF_IF_TYPE_2) 8706 goto read_cfg_out; 8707 8708 /* get the pf# and vf# for SLI4 if_type 2 port */ 8709 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8710 sizeof(struct lpfc_sli4_cfg_mhdr)); 8711 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8712 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8713 length, LPFC_SLI4_MBX_EMBED); 8714 8715 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8716 shdr = (union lpfc_sli4_cfg_shdr *) 8717 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8718 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8719 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8720 if (rc2 || shdr_status || shdr_add_status) { 8721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8722 "3026 Mailbox failed , mbxCmd x%x " 8723 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8724 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8725 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8726 goto read_cfg_out; 8727 } 8728 8729 /* search for fc_fcoe resrouce descriptor */ 8730 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8731 8732 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8733 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8734 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8735 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8736 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8737 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8738 goto read_cfg_out; 8739 8740 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8741 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8742 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8743 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8744 phba->sli4_hba.iov.pf_number = 8745 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8746 phba->sli4_hba.iov.vf_number = 8747 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8748 break; 8749 } 8750 } 8751 8752 if (i < LPFC_RSRC_DESC_MAX_NUM) 8753 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8754 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8755 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8756 phba->sli4_hba.iov.vf_number); 8757 else 8758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8759 "3028 GET_FUNCTION_CONFIG: failed to find " 8760 "Resource Descriptor:x%x\n", 8761 LPFC_RSRC_DESC_TYPE_FCFCOE); 8762 8763 read_cfg_out: 8764 mempool_free(pmb, phba->mbox_mem_pool); 8765 return rc; 8766 } 8767 8768 /** 8769 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8770 * @phba: pointer to lpfc hba data structure. 8771 * 8772 * This routine is invoked to setup the port-side endian order when 8773 * the port if_type is 0. This routine has no function for other 8774 * if_types. 8775 * 8776 * Return codes 8777 * 0 - successful 8778 * -ENOMEM - No available memory 8779 * -EIO - The mailbox failed to complete successfully. 8780 **/ 8781 static int 8782 lpfc_setup_endian_order(struct lpfc_hba *phba) 8783 { 8784 LPFC_MBOXQ_t *mboxq; 8785 uint32_t if_type, rc = 0; 8786 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8787 HOST_ENDIAN_HIGH_WORD1}; 8788 8789 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8790 switch (if_type) { 8791 case LPFC_SLI_INTF_IF_TYPE_0: 8792 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8793 GFP_KERNEL); 8794 if (!mboxq) { 8795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8796 "0492 Unable to allocate memory for " 8797 "issuing SLI_CONFIG_SPECIAL mailbox " 8798 "command\n"); 8799 return -ENOMEM; 8800 } 8801 8802 /* 8803 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8804 * two words to contain special data values and no other data. 8805 */ 8806 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8807 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8808 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8809 if (rc != MBX_SUCCESS) { 8810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8811 "0493 SLI_CONFIG_SPECIAL mailbox " 8812 "failed with status x%x\n", 8813 rc); 8814 rc = -EIO; 8815 } 8816 mempool_free(mboxq, phba->mbox_mem_pool); 8817 break; 8818 case LPFC_SLI_INTF_IF_TYPE_6: 8819 case LPFC_SLI_INTF_IF_TYPE_2: 8820 case LPFC_SLI_INTF_IF_TYPE_1: 8821 default: 8822 break; 8823 } 8824 return rc; 8825 } 8826 8827 /** 8828 * lpfc_sli4_queue_verify - Verify and update EQ counts 8829 * @phba: pointer to lpfc hba data structure. 8830 * 8831 * This routine is invoked to check the user settable queue counts for EQs. 8832 * After this routine is called the counts will be set to valid values that 8833 * adhere to the constraints of the system's interrupt vectors and the port's 8834 * queue resources. 8835 * 8836 * Return codes 8837 * 0 - successful 8838 * -ENOMEM - No available memory 8839 **/ 8840 static int 8841 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8842 { 8843 /* 8844 * Sanity check for configured queue parameters against the run-time 8845 * device parameters 8846 */ 8847 8848 if (phba->nvmet_support) { 8849 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 8850 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 8851 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8852 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8853 } 8854 8855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8856 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8857 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8858 phba->cfg_nvmet_mrq); 8859 8860 /* Get EQ depth from module parameter, fake the default for now */ 8861 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8862 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8863 8864 /* Get CQ depth from module parameter, fake the default for now */ 8865 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8866 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8867 return 0; 8868 } 8869 8870 static int 8871 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 8872 { 8873 struct lpfc_queue *qdesc; 8874 u32 wqesize; 8875 int cpu; 8876 8877 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 8878 /* Create Fast Path IO CQs */ 8879 if (phba->enab_exp_wqcq_pages) 8880 /* Increase the CQ size when WQEs contain an embedded cdb */ 8881 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8882 phba->sli4_hba.cq_esize, 8883 LPFC_CQE_EXP_COUNT, cpu); 8884 8885 else 8886 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8887 phba->sli4_hba.cq_esize, 8888 phba->sli4_hba.cq_ecount, cpu); 8889 if (!qdesc) { 8890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8891 "0499 Failed allocate fast-path IO CQ (%d)\n", 8892 idx); 8893 return 1; 8894 } 8895 qdesc->qe_valid = 1; 8896 qdesc->hdwq = idx; 8897 qdesc->chann = cpu; 8898 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 8899 8900 /* Create Fast Path IO WQs */ 8901 if (phba->enab_exp_wqcq_pages) { 8902 /* Increase the WQ size when WQEs contain an embedded cdb */ 8903 wqesize = (phba->fcp_embed_io) ? 8904 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8905 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8906 wqesize, 8907 LPFC_WQE_EXP_COUNT, cpu); 8908 } else 8909 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8910 phba->sli4_hba.wq_esize, 8911 phba->sli4_hba.wq_ecount, cpu); 8912 8913 if (!qdesc) { 8914 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8915 "0503 Failed allocate fast-path IO WQ (%d)\n", 8916 idx); 8917 return 1; 8918 } 8919 qdesc->hdwq = idx; 8920 qdesc->chann = cpu; 8921 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 8922 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8923 return 0; 8924 } 8925 8926 /** 8927 * lpfc_sli4_queue_create - Create all the SLI4 queues 8928 * @phba: pointer to lpfc hba data structure. 8929 * 8930 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8931 * operation. For each SLI4 queue type, the parameters such as queue entry 8932 * count (queue depth) shall be taken from the module parameter. For now, 8933 * we just use some constant number as place holder. 8934 * 8935 * Return codes 8936 * 0 - successful 8937 * -ENOMEM - No availble memory 8938 * -EIO - The mailbox failed to complete successfully. 8939 **/ 8940 int 8941 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8942 { 8943 struct lpfc_queue *qdesc; 8944 int idx, cpu, eqcpu; 8945 struct lpfc_sli4_hdw_queue *qp; 8946 struct lpfc_vector_map_info *cpup; 8947 struct lpfc_vector_map_info *eqcpup; 8948 struct lpfc_eq_intr_info *eqi; 8949 8950 /* 8951 * Create HBA Record arrays. 8952 * Both NVME and FCP will share that same vectors / EQs 8953 */ 8954 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8955 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8956 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8957 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8958 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8959 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8960 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8961 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8962 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8963 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8964 8965 if (!phba->sli4_hba.hdwq) { 8966 phba->sli4_hba.hdwq = kcalloc( 8967 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8968 GFP_KERNEL); 8969 if (!phba->sli4_hba.hdwq) { 8970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8971 "6427 Failed allocate memory for " 8972 "fast-path Hardware Queue array\n"); 8973 goto out_error; 8974 } 8975 /* Prepare hardware queues to take IO buffers */ 8976 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8977 qp = &phba->sli4_hba.hdwq[idx]; 8978 spin_lock_init(&qp->io_buf_list_get_lock); 8979 spin_lock_init(&qp->io_buf_list_put_lock); 8980 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8981 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8982 qp->get_io_bufs = 0; 8983 qp->put_io_bufs = 0; 8984 qp->total_io_bufs = 0; 8985 spin_lock_init(&qp->abts_io_buf_list_lock); 8986 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 8987 qp->abts_scsi_io_bufs = 0; 8988 qp->abts_nvme_io_bufs = 0; 8989 INIT_LIST_HEAD(&qp->sgl_list); 8990 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 8991 spin_lock_init(&qp->hdwq_lock); 8992 } 8993 } 8994 8995 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8996 if (phba->nvmet_support) { 8997 phba->sli4_hba.nvmet_cqset = kcalloc( 8998 phba->cfg_nvmet_mrq, 8999 sizeof(struct lpfc_queue *), 9000 GFP_KERNEL); 9001 if (!phba->sli4_hba.nvmet_cqset) { 9002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9003 "3121 Fail allocate memory for " 9004 "fast-path CQ set array\n"); 9005 goto out_error; 9006 } 9007 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 9008 phba->cfg_nvmet_mrq, 9009 sizeof(struct lpfc_queue *), 9010 GFP_KERNEL); 9011 if (!phba->sli4_hba.nvmet_mrq_hdr) { 9012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9013 "3122 Fail allocate memory for " 9014 "fast-path RQ set hdr array\n"); 9015 goto out_error; 9016 } 9017 phba->sli4_hba.nvmet_mrq_data = kcalloc( 9018 phba->cfg_nvmet_mrq, 9019 sizeof(struct lpfc_queue *), 9020 GFP_KERNEL); 9021 if (!phba->sli4_hba.nvmet_mrq_data) { 9022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9023 "3124 Fail allocate memory for " 9024 "fast-path RQ set data array\n"); 9025 goto out_error; 9026 } 9027 } 9028 } 9029 9030 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9031 9032 /* Create HBA Event Queues (EQs) */ 9033 for_each_present_cpu(cpu) { 9034 /* We only want to create 1 EQ per vector, even though 9035 * multiple CPUs might be using that vector. so only 9036 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 9037 */ 9038 cpup = &phba->sli4_hba.cpu_map[cpu]; 9039 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9040 continue; 9041 9042 /* Get a ptr to the Hardware Queue associated with this CPU */ 9043 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 9044 9045 /* Allocate an EQ */ 9046 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9047 phba->sli4_hba.eq_esize, 9048 phba->sli4_hba.eq_ecount, cpu); 9049 if (!qdesc) { 9050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9051 "0497 Failed allocate EQ (%d)\n", 9052 cpup->hdwq); 9053 goto out_error; 9054 } 9055 qdesc->qe_valid = 1; 9056 qdesc->hdwq = cpup->hdwq; 9057 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 9058 qdesc->last_cpu = qdesc->chann; 9059 9060 /* Save the allocated EQ in the Hardware Queue */ 9061 qp->hba_eq = qdesc; 9062 9063 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 9064 list_add(&qdesc->cpu_list, &eqi->list); 9065 } 9066 9067 /* Now we need to populate the other Hardware Queues, that share 9068 * an IRQ vector, with the associated EQ ptr. 9069 */ 9070 for_each_present_cpu(cpu) { 9071 cpup = &phba->sli4_hba.cpu_map[cpu]; 9072 9073 /* Check for EQ already allocated in previous loop */ 9074 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 9075 continue; 9076 9077 /* Check for multiple CPUs per hdwq */ 9078 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 9079 if (qp->hba_eq) 9080 continue; 9081 9082 /* We need to share an EQ for this hdwq */ 9083 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 9084 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 9085 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 9086 } 9087 9088 /* Allocate IO Path SLI4 CQ/WQs */ 9089 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9090 if (lpfc_alloc_io_wq_cq(phba, idx)) 9091 goto out_error; 9092 } 9093 9094 if (phba->nvmet_support) { 9095 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9096 cpu = lpfc_find_cpu_handle(phba, idx, 9097 LPFC_FIND_BY_HDWQ); 9098 qdesc = lpfc_sli4_queue_alloc(phba, 9099 LPFC_DEFAULT_PAGE_SIZE, 9100 phba->sli4_hba.cq_esize, 9101 phba->sli4_hba.cq_ecount, 9102 cpu); 9103 if (!qdesc) { 9104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9105 "3142 Failed allocate NVME " 9106 "CQ Set (%d)\n", idx); 9107 goto out_error; 9108 } 9109 qdesc->qe_valid = 1; 9110 qdesc->hdwq = idx; 9111 qdesc->chann = cpu; 9112 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 9113 } 9114 } 9115 9116 /* 9117 * Create Slow Path Completion Queues (CQs) 9118 */ 9119 9120 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 9121 /* Create slow-path Mailbox Command Complete Queue */ 9122 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9123 phba->sli4_hba.cq_esize, 9124 phba->sli4_hba.cq_ecount, cpu); 9125 if (!qdesc) { 9126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9127 "0500 Failed allocate slow-path mailbox CQ\n"); 9128 goto out_error; 9129 } 9130 qdesc->qe_valid = 1; 9131 phba->sli4_hba.mbx_cq = qdesc; 9132 9133 /* Create slow-path ELS Complete Queue */ 9134 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9135 phba->sli4_hba.cq_esize, 9136 phba->sli4_hba.cq_ecount, cpu); 9137 if (!qdesc) { 9138 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9139 "0501 Failed allocate slow-path ELS CQ\n"); 9140 goto out_error; 9141 } 9142 qdesc->qe_valid = 1; 9143 qdesc->chann = cpu; 9144 phba->sli4_hba.els_cq = qdesc; 9145 9146 9147 /* 9148 * Create Slow Path Work Queues (WQs) 9149 */ 9150 9151 /* Create Mailbox Command Queue */ 9152 9153 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9154 phba->sli4_hba.mq_esize, 9155 phba->sli4_hba.mq_ecount, cpu); 9156 if (!qdesc) { 9157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9158 "0505 Failed allocate slow-path MQ\n"); 9159 goto out_error; 9160 } 9161 qdesc->chann = cpu; 9162 phba->sli4_hba.mbx_wq = qdesc; 9163 9164 /* 9165 * Create ELS Work Queues 9166 */ 9167 9168 /* Create slow-path ELS Work Queue */ 9169 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9170 phba->sli4_hba.wq_esize, 9171 phba->sli4_hba.wq_ecount, cpu); 9172 if (!qdesc) { 9173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9174 "0504 Failed allocate slow-path ELS WQ\n"); 9175 goto out_error; 9176 } 9177 qdesc->chann = cpu; 9178 phba->sli4_hba.els_wq = qdesc; 9179 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9180 9181 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9182 /* Create NVME LS Complete Queue */ 9183 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9184 phba->sli4_hba.cq_esize, 9185 phba->sli4_hba.cq_ecount, cpu); 9186 if (!qdesc) { 9187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9188 "6079 Failed allocate NVME LS CQ\n"); 9189 goto out_error; 9190 } 9191 qdesc->chann = cpu; 9192 qdesc->qe_valid = 1; 9193 phba->sli4_hba.nvmels_cq = qdesc; 9194 9195 /* Create NVME LS Work Queue */ 9196 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9197 phba->sli4_hba.wq_esize, 9198 phba->sli4_hba.wq_ecount, cpu); 9199 if (!qdesc) { 9200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9201 "6080 Failed allocate NVME LS WQ\n"); 9202 goto out_error; 9203 } 9204 qdesc->chann = cpu; 9205 phba->sli4_hba.nvmels_wq = qdesc; 9206 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9207 } 9208 9209 /* 9210 * Create Receive Queue (RQ) 9211 */ 9212 9213 /* Create Receive Queue for header */ 9214 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9215 phba->sli4_hba.rq_esize, 9216 phba->sli4_hba.rq_ecount, cpu); 9217 if (!qdesc) { 9218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9219 "0506 Failed allocate receive HRQ\n"); 9220 goto out_error; 9221 } 9222 phba->sli4_hba.hdr_rq = qdesc; 9223 9224 /* Create Receive Queue for data */ 9225 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9226 phba->sli4_hba.rq_esize, 9227 phba->sli4_hba.rq_ecount, cpu); 9228 if (!qdesc) { 9229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9230 "0507 Failed allocate receive DRQ\n"); 9231 goto out_error; 9232 } 9233 phba->sli4_hba.dat_rq = qdesc; 9234 9235 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9236 phba->nvmet_support) { 9237 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9238 cpu = lpfc_find_cpu_handle(phba, idx, 9239 LPFC_FIND_BY_HDWQ); 9240 /* Create NVMET Receive Queue for header */ 9241 qdesc = lpfc_sli4_queue_alloc(phba, 9242 LPFC_DEFAULT_PAGE_SIZE, 9243 phba->sli4_hba.rq_esize, 9244 LPFC_NVMET_RQE_DEF_COUNT, 9245 cpu); 9246 if (!qdesc) { 9247 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9248 "3146 Failed allocate " 9249 "receive HRQ\n"); 9250 goto out_error; 9251 } 9252 qdesc->hdwq = idx; 9253 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9254 9255 /* Only needed for header of RQ pair */ 9256 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9257 GFP_KERNEL, 9258 cpu_to_node(cpu)); 9259 if (qdesc->rqbp == NULL) { 9260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9261 "6131 Failed allocate " 9262 "Header RQBP\n"); 9263 goto out_error; 9264 } 9265 9266 /* Put list in known state in case driver load fails. */ 9267 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9268 9269 /* Create NVMET Receive Queue for data */ 9270 qdesc = lpfc_sli4_queue_alloc(phba, 9271 LPFC_DEFAULT_PAGE_SIZE, 9272 phba->sli4_hba.rq_esize, 9273 LPFC_NVMET_RQE_DEF_COUNT, 9274 cpu); 9275 if (!qdesc) { 9276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9277 "3156 Failed allocate " 9278 "receive DRQ\n"); 9279 goto out_error; 9280 } 9281 qdesc->hdwq = idx; 9282 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9283 } 9284 } 9285 9286 /* Clear NVME stats */ 9287 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9288 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9289 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9290 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9291 } 9292 } 9293 9294 /* Clear SCSI stats */ 9295 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9296 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9297 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9298 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9299 } 9300 } 9301 9302 return 0; 9303 9304 out_error: 9305 lpfc_sli4_queue_destroy(phba); 9306 return -ENOMEM; 9307 } 9308 9309 static inline void 9310 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9311 { 9312 if (*qp != NULL) { 9313 lpfc_sli4_queue_free(*qp); 9314 *qp = NULL; 9315 } 9316 } 9317 9318 static inline void 9319 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9320 { 9321 int idx; 9322 9323 if (*qs == NULL) 9324 return; 9325 9326 for (idx = 0; idx < max; idx++) 9327 __lpfc_sli4_release_queue(&(*qs)[idx]); 9328 9329 kfree(*qs); 9330 *qs = NULL; 9331 } 9332 9333 static inline void 9334 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9335 { 9336 struct lpfc_sli4_hdw_queue *hdwq; 9337 struct lpfc_queue *eq; 9338 uint32_t idx; 9339 9340 hdwq = phba->sli4_hba.hdwq; 9341 9342 /* Loop thru all Hardware Queues */ 9343 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9344 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9345 lpfc_sli4_queue_free(hdwq[idx].io_cq); 9346 lpfc_sli4_queue_free(hdwq[idx].io_wq); 9347 hdwq[idx].hba_eq = NULL; 9348 hdwq[idx].io_cq = NULL; 9349 hdwq[idx].io_wq = NULL; 9350 if (phba->cfg_xpsgl && !phba->nvmet_support) 9351 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 9352 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 9353 } 9354 /* Loop thru all IRQ vectors */ 9355 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9356 /* Free the EQ corresponding to the IRQ vector */ 9357 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9358 lpfc_sli4_queue_free(eq); 9359 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9360 } 9361 } 9362 9363 /** 9364 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9365 * @phba: pointer to lpfc hba data structure. 9366 * 9367 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9368 * operation. 9369 * 9370 * Return codes 9371 * 0 - successful 9372 * -ENOMEM - No available memory 9373 * -EIO - The mailbox failed to complete successfully. 9374 **/ 9375 void 9376 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9377 { 9378 /* 9379 * Set FREE_INIT before beginning to free the queues. 9380 * Wait until the users of queues to acknowledge to 9381 * release queues by clearing FREE_WAIT. 9382 */ 9383 spin_lock_irq(&phba->hbalock); 9384 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9385 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9386 spin_unlock_irq(&phba->hbalock); 9387 msleep(20); 9388 spin_lock_irq(&phba->hbalock); 9389 } 9390 spin_unlock_irq(&phba->hbalock); 9391 9392 lpfc_sli4_cleanup_poll_list(phba); 9393 9394 /* Release HBA eqs */ 9395 if (phba->sli4_hba.hdwq) 9396 lpfc_sli4_release_hdwq(phba); 9397 9398 if (phba->nvmet_support) { 9399 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9400 phba->cfg_nvmet_mrq); 9401 9402 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9403 phba->cfg_nvmet_mrq); 9404 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9405 phba->cfg_nvmet_mrq); 9406 } 9407 9408 /* Release mailbox command work queue */ 9409 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9410 9411 /* Release ELS work queue */ 9412 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9413 9414 /* Release ELS work queue */ 9415 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9416 9417 /* Release unsolicited receive queue */ 9418 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9419 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9420 9421 /* Release ELS complete queue */ 9422 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9423 9424 /* Release NVME LS complete queue */ 9425 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9426 9427 /* Release mailbox command complete queue */ 9428 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9429 9430 /* Everything on this list has been freed */ 9431 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9432 9433 /* Done with freeing the queues */ 9434 spin_lock_irq(&phba->hbalock); 9435 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9436 spin_unlock_irq(&phba->hbalock); 9437 } 9438 9439 int 9440 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9441 { 9442 struct lpfc_rqb *rqbp; 9443 struct lpfc_dmabuf *h_buf; 9444 struct rqb_dmabuf *rqb_buffer; 9445 9446 rqbp = rq->rqbp; 9447 while (!list_empty(&rqbp->rqb_buffer_list)) { 9448 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9449 struct lpfc_dmabuf, list); 9450 9451 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9452 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9453 rqbp->buffer_count--; 9454 } 9455 return 1; 9456 } 9457 9458 static int 9459 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9460 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9461 int qidx, uint32_t qtype) 9462 { 9463 struct lpfc_sli_ring *pring; 9464 int rc; 9465 9466 if (!eq || !cq || !wq) { 9467 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9468 "6085 Fast-path %s (%d) not allocated\n", 9469 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9470 return -ENOMEM; 9471 } 9472 9473 /* create the Cq first */ 9474 rc = lpfc_cq_create(phba, cq, eq, 9475 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9476 if (rc) { 9477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9478 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9479 qidx, (uint32_t)rc); 9480 return rc; 9481 } 9482 9483 if (qtype != LPFC_MBOX) { 9484 /* Setup cq_map for fast lookup */ 9485 if (cq_map) 9486 *cq_map = cq->queue_id; 9487 9488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9489 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9490 qidx, cq->queue_id, qidx, eq->queue_id); 9491 9492 /* create the wq */ 9493 rc = lpfc_wq_create(phba, wq, cq, qtype); 9494 if (rc) { 9495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9496 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9497 qidx, (uint32_t)rc); 9498 /* no need to tear down cq - caller will do so */ 9499 return rc; 9500 } 9501 9502 /* Bind this CQ/WQ to the NVME ring */ 9503 pring = wq->pring; 9504 pring->sli.sli4.wqp = (void *)wq; 9505 cq->pring = pring; 9506 9507 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9508 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9509 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9510 } else { 9511 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9512 if (rc) { 9513 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9514 "0539 Failed setup of slow-path MQ: " 9515 "rc = 0x%x\n", rc); 9516 /* no need to tear down cq - caller will do so */ 9517 return rc; 9518 } 9519 9520 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9521 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9522 phba->sli4_hba.mbx_wq->queue_id, 9523 phba->sli4_hba.mbx_cq->queue_id); 9524 } 9525 9526 return 0; 9527 } 9528 9529 /** 9530 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9531 * @phba: pointer to lpfc hba data structure. 9532 * 9533 * This routine will populate the cq_lookup table by all 9534 * available CQ queue_id's. 9535 **/ 9536 static void 9537 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9538 { 9539 struct lpfc_queue *eq, *childq; 9540 int qidx; 9541 9542 memset(phba->sli4_hba.cq_lookup, 0, 9543 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9544 /* Loop thru all IRQ vectors */ 9545 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9546 /* Get the EQ corresponding to the IRQ vector */ 9547 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9548 if (!eq) 9549 continue; 9550 /* Loop through all CQs associated with that EQ */ 9551 list_for_each_entry(childq, &eq->child_list, list) { 9552 if (childq->queue_id > phba->sli4_hba.cq_max) 9553 continue; 9554 if (childq->subtype == LPFC_IO) 9555 phba->sli4_hba.cq_lookup[childq->queue_id] = 9556 childq; 9557 } 9558 } 9559 } 9560 9561 /** 9562 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9563 * @phba: pointer to lpfc hba data structure. 9564 * 9565 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9566 * operation. 9567 * 9568 * Return codes 9569 * 0 - successful 9570 * -ENOMEM - No available memory 9571 * -EIO - The mailbox failed to complete successfully. 9572 **/ 9573 int 9574 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9575 { 9576 uint32_t shdr_status, shdr_add_status; 9577 union lpfc_sli4_cfg_shdr *shdr; 9578 struct lpfc_vector_map_info *cpup; 9579 struct lpfc_sli4_hdw_queue *qp; 9580 LPFC_MBOXQ_t *mboxq; 9581 int qidx, cpu; 9582 uint32_t length, usdelay; 9583 int rc = -ENOMEM; 9584 9585 /* Check for dual-ULP support */ 9586 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9587 if (!mboxq) { 9588 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9589 "3249 Unable to allocate memory for " 9590 "QUERY_FW_CFG mailbox command\n"); 9591 return -ENOMEM; 9592 } 9593 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9594 sizeof(struct lpfc_sli4_cfg_mhdr)); 9595 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9596 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9597 length, LPFC_SLI4_MBX_EMBED); 9598 9599 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9600 9601 shdr = (union lpfc_sli4_cfg_shdr *) 9602 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9603 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9604 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9605 if (shdr_status || shdr_add_status || rc) { 9606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9607 "3250 QUERY_FW_CFG mailbox failed with status " 9608 "x%x add_status x%x, mbx status x%x\n", 9609 shdr_status, shdr_add_status, rc); 9610 if (rc != MBX_TIMEOUT) 9611 mempool_free(mboxq, phba->mbox_mem_pool); 9612 rc = -ENXIO; 9613 goto out_error; 9614 } 9615 9616 phba->sli4_hba.fw_func_mode = 9617 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9618 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9619 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9620 phba->sli4_hba.physical_port = 9621 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9622 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9623 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9624 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9625 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9626 9627 if (rc != MBX_TIMEOUT) 9628 mempool_free(mboxq, phba->mbox_mem_pool); 9629 9630 /* 9631 * Set up HBA Event Queues (EQs) 9632 */ 9633 qp = phba->sli4_hba.hdwq; 9634 9635 /* Set up HBA event queue */ 9636 if (!qp) { 9637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9638 "3147 Fast-path EQs not allocated\n"); 9639 rc = -ENOMEM; 9640 goto out_error; 9641 } 9642 9643 /* Loop thru all IRQ vectors */ 9644 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9645 /* Create HBA Event Queues (EQs) in order */ 9646 for_each_present_cpu(cpu) { 9647 cpup = &phba->sli4_hba.cpu_map[cpu]; 9648 9649 /* Look for the CPU thats using that vector with 9650 * LPFC_CPU_FIRST_IRQ set. 9651 */ 9652 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9653 continue; 9654 if (qidx != cpup->eq) 9655 continue; 9656 9657 /* Create an EQ for that vector */ 9658 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9659 phba->cfg_fcp_imax); 9660 if (rc) { 9661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9662 "0523 Failed setup of fast-path" 9663 " EQ (%d), rc = 0x%x\n", 9664 cpup->eq, (uint32_t)rc); 9665 goto out_destroy; 9666 } 9667 9668 /* Save the EQ for that vector in the hba_eq_hdl */ 9669 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9670 qp[cpup->hdwq].hba_eq; 9671 9672 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9673 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9674 cpup->eq, 9675 qp[cpup->hdwq].hba_eq->queue_id); 9676 } 9677 } 9678 9679 /* Loop thru all Hardware Queues */ 9680 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9681 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9682 cpup = &phba->sli4_hba.cpu_map[cpu]; 9683 9684 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9685 rc = lpfc_create_wq_cq(phba, 9686 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9687 qp[qidx].io_cq, 9688 qp[qidx].io_wq, 9689 &phba->sli4_hba.hdwq[qidx].io_cq_map, 9690 qidx, 9691 LPFC_IO); 9692 if (rc) { 9693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9694 "0535 Failed to setup fastpath " 9695 "IO WQ/CQ (%d), rc = 0x%x\n", 9696 qidx, (uint32_t)rc); 9697 goto out_destroy; 9698 } 9699 } 9700 9701 /* 9702 * Set up Slow Path Complete Queues (CQs) 9703 */ 9704 9705 /* Set up slow-path MBOX CQ/MQ */ 9706 9707 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9709 "0528 %s not allocated\n", 9710 phba->sli4_hba.mbx_cq ? 9711 "Mailbox WQ" : "Mailbox CQ"); 9712 rc = -ENOMEM; 9713 goto out_destroy; 9714 } 9715 9716 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9717 phba->sli4_hba.mbx_cq, 9718 phba->sli4_hba.mbx_wq, 9719 NULL, 0, LPFC_MBOX); 9720 if (rc) { 9721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9722 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9723 (uint32_t)rc); 9724 goto out_destroy; 9725 } 9726 if (phba->nvmet_support) { 9727 if (!phba->sli4_hba.nvmet_cqset) { 9728 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9729 "3165 Fast-path NVME CQ Set " 9730 "array not allocated\n"); 9731 rc = -ENOMEM; 9732 goto out_destroy; 9733 } 9734 if (phba->cfg_nvmet_mrq > 1) { 9735 rc = lpfc_cq_create_set(phba, 9736 phba->sli4_hba.nvmet_cqset, 9737 qp, 9738 LPFC_WCQ, LPFC_NVMET); 9739 if (rc) { 9740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9741 "3164 Failed setup of NVME CQ " 9742 "Set, rc = 0x%x\n", 9743 (uint32_t)rc); 9744 goto out_destroy; 9745 } 9746 } else { 9747 /* Set up NVMET Receive Complete Queue */ 9748 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9749 qp[0].hba_eq, 9750 LPFC_WCQ, LPFC_NVMET); 9751 if (rc) { 9752 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9753 "6089 Failed setup NVMET CQ: " 9754 "rc = 0x%x\n", (uint32_t)rc); 9755 goto out_destroy; 9756 } 9757 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9758 9759 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9760 "6090 NVMET CQ setup: cq-id=%d, " 9761 "parent eq-id=%d\n", 9762 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9763 qp[0].hba_eq->queue_id); 9764 } 9765 } 9766 9767 /* Set up slow-path ELS WQ/CQ */ 9768 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9770 "0530 ELS %s not allocated\n", 9771 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9772 rc = -ENOMEM; 9773 goto out_destroy; 9774 } 9775 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9776 phba->sli4_hba.els_cq, 9777 phba->sli4_hba.els_wq, 9778 NULL, 0, LPFC_ELS); 9779 if (rc) { 9780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9781 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9782 (uint32_t)rc); 9783 goto out_destroy; 9784 } 9785 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9786 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9787 phba->sli4_hba.els_wq->queue_id, 9788 phba->sli4_hba.els_cq->queue_id); 9789 9790 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9791 /* Set up NVME LS Complete Queue */ 9792 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9794 "6091 LS %s not allocated\n", 9795 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9796 rc = -ENOMEM; 9797 goto out_destroy; 9798 } 9799 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9800 phba->sli4_hba.nvmels_cq, 9801 phba->sli4_hba.nvmels_wq, 9802 NULL, 0, LPFC_NVME_LS); 9803 if (rc) { 9804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9805 "0526 Failed setup of NVVME LS WQ/CQ: " 9806 "rc = 0x%x\n", (uint32_t)rc); 9807 goto out_destroy; 9808 } 9809 9810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9811 "6096 ELS WQ setup: wq-id=%d, " 9812 "parent cq-id=%d\n", 9813 phba->sli4_hba.nvmels_wq->queue_id, 9814 phba->sli4_hba.nvmels_cq->queue_id); 9815 } 9816 9817 /* 9818 * Create NVMET Receive Queue (RQ) 9819 */ 9820 if (phba->nvmet_support) { 9821 if ((!phba->sli4_hba.nvmet_cqset) || 9822 (!phba->sli4_hba.nvmet_mrq_hdr) || 9823 (!phba->sli4_hba.nvmet_mrq_data)) { 9824 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9825 "6130 MRQ CQ Queues not " 9826 "allocated\n"); 9827 rc = -ENOMEM; 9828 goto out_destroy; 9829 } 9830 if (phba->cfg_nvmet_mrq > 1) { 9831 rc = lpfc_mrq_create(phba, 9832 phba->sli4_hba.nvmet_mrq_hdr, 9833 phba->sli4_hba.nvmet_mrq_data, 9834 phba->sli4_hba.nvmet_cqset, 9835 LPFC_NVMET); 9836 if (rc) { 9837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9838 "6098 Failed setup of NVMET " 9839 "MRQ: rc = 0x%x\n", 9840 (uint32_t)rc); 9841 goto out_destroy; 9842 } 9843 9844 } else { 9845 rc = lpfc_rq_create(phba, 9846 phba->sli4_hba.nvmet_mrq_hdr[0], 9847 phba->sli4_hba.nvmet_mrq_data[0], 9848 phba->sli4_hba.nvmet_cqset[0], 9849 LPFC_NVMET); 9850 if (rc) { 9851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9852 "6057 Failed setup of NVMET " 9853 "Receive Queue: rc = 0x%x\n", 9854 (uint32_t)rc); 9855 goto out_destroy; 9856 } 9857 9858 lpfc_printf_log( 9859 phba, KERN_INFO, LOG_INIT, 9860 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9861 "dat-rq-id=%d parent cq-id=%d\n", 9862 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9863 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9864 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9865 9866 } 9867 } 9868 9869 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9871 "0540 Receive Queue not allocated\n"); 9872 rc = -ENOMEM; 9873 goto out_destroy; 9874 } 9875 9876 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9877 phba->sli4_hba.els_cq, LPFC_USOL); 9878 if (rc) { 9879 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9880 "0541 Failed setup of Receive Queue: " 9881 "rc = 0x%x\n", (uint32_t)rc); 9882 goto out_destroy; 9883 } 9884 9885 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9886 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9887 "parent cq-id=%d\n", 9888 phba->sli4_hba.hdr_rq->queue_id, 9889 phba->sli4_hba.dat_rq->queue_id, 9890 phba->sli4_hba.els_cq->queue_id); 9891 9892 if (phba->cfg_fcp_imax) 9893 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9894 else 9895 usdelay = 0; 9896 9897 for (qidx = 0; qidx < phba->cfg_irq_chann; 9898 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9899 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9900 usdelay); 9901 9902 if (phba->sli4_hba.cq_max) { 9903 kfree(phba->sli4_hba.cq_lookup); 9904 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9905 sizeof(struct lpfc_queue *), GFP_KERNEL); 9906 if (!phba->sli4_hba.cq_lookup) { 9907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9908 "0549 Failed setup of CQ Lookup table: " 9909 "size 0x%x\n", phba->sli4_hba.cq_max); 9910 rc = -ENOMEM; 9911 goto out_destroy; 9912 } 9913 lpfc_setup_cq_lookup(phba); 9914 } 9915 return 0; 9916 9917 out_destroy: 9918 lpfc_sli4_queue_unset(phba); 9919 out_error: 9920 return rc; 9921 } 9922 9923 /** 9924 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9925 * @phba: pointer to lpfc hba data structure. 9926 * 9927 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9928 * operation. 9929 * 9930 * Return codes 9931 * 0 - successful 9932 * -ENOMEM - No available memory 9933 * -EIO - The mailbox failed to complete successfully. 9934 **/ 9935 void 9936 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9937 { 9938 struct lpfc_sli4_hdw_queue *qp; 9939 struct lpfc_queue *eq; 9940 int qidx; 9941 9942 /* Unset mailbox command work queue */ 9943 if (phba->sli4_hba.mbx_wq) 9944 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9945 9946 /* Unset NVME LS work queue */ 9947 if (phba->sli4_hba.nvmels_wq) 9948 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9949 9950 /* Unset ELS work queue */ 9951 if (phba->sli4_hba.els_wq) 9952 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9953 9954 /* Unset unsolicited receive queue */ 9955 if (phba->sli4_hba.hdr_rq) 9956 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9957 phba->sli4_hba.dat_rq); 9958 9959 /* Unset mailbox command complete queue */ 9960 if (phba->sli4_hba.mbx_cq) 9961 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9962 9963 /* Unset ELS complete queue */ 9964 if (phba->sli4_hba.els_cq) 9965 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9966 9967 /* Unset NVME LS complete queue */ 9968 if (phba->sli4_hba.nvmels_cq) 9969 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9970 9971 if (phba->nvmet_support) { 9972 /* Unset NVMET MRQ queue */ 9973 if (phba->sli4_hba.nvmet_mrq_hdr) { 9974 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9975 lpfc_rq_destroy( 9976 phba, 9977 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9978 phba->sli4_hba.nvmet_mrq_data[qidx]); 9979 } 9980 9981 /* Unset NVMET CQ Set complete queue */ 9982 if (phba->sli4_hba.nvmet_cqset) { 9983 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9984 lpfc_cq_destroy( 9985 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9986 } 9987 } 9988 9989 /* Unset fast-path SLI4 queues */ 9990 if (phba->sli4_hba.hdwq) { 9991 /* Loop thru all Hardware Queues */ 9992 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9993 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9994 qp = &phba->sli4_hba.hdwq[qidx]; 9995 lpfc_wq_destroy(phba, qp->io_wq); 9996 lpfc_cq_destroy(phba, qp->io_cq); 9997 } 9998 /* Loop thru all IRQ vectors */ 9999 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10000 /* Destroy the EQ corresponding to the IRQ vector */ 10001 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10002 lpfc_eq_destroy(phba, eq); 10003 } 10004 } 10005 10006 kfree(phba->sli4_hba.cq_lookup); 10007 phba->sli4_hba.cq_lookup = NULL; 10008 phba->sli4_hba.cq_max = 0; 10009 } 10010 10011 /** 10012 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 10013 * @phba: pointer to lpfc hba data structure. 10014 * 10015 * This routine is invoked to allocate and set up a pool of completion queue 10016 * events. The body of the completion queue event is a completion queue entry 10017 * CQE. For now, this pool is used for the interrupt service routine to queue 10018 * the following HBA completion queue events for the worker thread to process: 10019 * - Mailbox asynchronous events 10020 * - Receive queue completion unsolicited events 10021 * Later, this can be used for all the slow-path events. 10022 * 10023 * Return codes 10024 * 0 - successful 10025 * -ENOMEM - No available memory 10026 **/ 10027 static int 10028 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 10029 { 10030 struct lpfc_cq_event *cq_event; 10031 int i; 10032 10033 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 10034 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 10035 if (!cq_event) 10036 goto out_pool_create_fail; 10037 list_add_tail(&cq_event->list, 10038 &phba->sli4_hba.sp_cqe_event_pool); 10039 } 10040 return 0; 10041 10042 out_pool_create_fail: 10043 lpfc_sli4_cq_event_pool_destroy(phba); 10044 return -ENOMEM; 10045 } 10046 10047 /** 10048 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 10049 * @phba: pointer to lpfc hba data structure. 10050 * 10051 * This routine is invoked to free the pool of completion queue events at 10052 * driver unload time. Note that, it is the responsibility of the driver 10053 * cleanup routine to free all the outstanding completion-queue events 10054 * allocated from this pool back into the pool before invoking this routine 10055 * to destroy the pool. 10056 **/ 10057 static void 10058 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 10059 { 10060 struct lpfc_cq_event *cq_event, *next_cq_event; 10061 10062 list_for_each_entry_safe(cq_event, next_cq_event, 10063 &phba->sli4_hba.sp_cqe_event_pool, list) { 10064 list_del(&cq_event->list); 10065 kfree(cq_event); 10066 } 10067 } 10068 10069 /** 10070 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 10071 * @phba: pointer to lpfc hba data structure. 10072 * 10073 * This routine is the lock free version of the API invoked to allocate a 10074 * completion-queue event from the free pool. 10075 * 10076 * Return: Pointer to the newly allocated completion-queue event if successful 10077 * NULL otherwise. 10078 **/ 10079 struct lpfc_cq_event * 10080 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 10081 { 10082 struct lpfc_cq_event *cq_event = NULL; 10083 10084 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 10085 struct lpfc_cq_event, list); 10086 return cq_event; 10087 } 10088 10089 /** 10090 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 10091 * @phba: pointer to lpfc hba data structure. 10092 * 10093 * This routine is the lock version of the API invoked to allocate a 10094 * completion-queue event from the free pool. 10095 * 10096 * Return: Pointer to the newly allocated completion-queue event if successful 10097 * NULL otherwise. 10098 **/ 10099 struct lpfc_cq_event * 10100 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 10101 { 10102 struct lpfc_cq_event *cq_event; 10103 unsigned long iflags; 10104 10105 spin_lock_irqsave(&phba->hbalock, iflags); 10106 cq_event = __lpfc_sli4_cq_event_alloc(phba); 10107 spin_unlock_irqrestore(&phba->hbalock, iflags); 10108 return cq_event; 10109 } 10110 10111 /** 10112 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10113 * @phba: pointer to lpfc hba data structure. 10114 * @cq_event: pointer to the completion queue event to be freed. 10115 * 10116 * This routine is the lock free version of the API invoked to release a 10117 * completion-queue event back into the free pool. 10118 **/ 10119 void 10120 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10121 struct lpfc_cq_event *cq_event) 10122 { 10123 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 10124 } 10125 10126 /** 10127 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10128 * @phba: pointer to lpfc hba data structure. 10129 * @cq_event: pointer to the completion queue event to be freed. 10130 * 10131 * This routine is the lock version of the API invoked to release a 10132 * completion-queue event back into the free pool. 10133 **/ 10134 void 10135 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10136 struct lpfc_cq_event *cq_event) 10137 { 10138 unsigned long iflags; 10139 spin_lock_irqsave(&phba->hbalock, iflags); 10140 __lpfc_sli4_cq_event_release(phba, cq_event); 10141 spin_unlock_irqrestore(&phba->hbalock, iflags); 10142 } 10143 10144 /** 10145 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 10146 * @phba: pointer to lpfc hba data structure. 10147 * 10148 * This routine is to free all the pending completion-queue events to the 10149 * back into the free pool for device reset. 10150 **/ 10151 static void 10152 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 10153 { 10154 LIST_HEAD(cq_event_list); 10155 struct lpfc_cq_event *cq_event; 10156 unsigned long iflags; 10157 10158 /* Retrieve all the pending WCQEs from pending WCQE lists */ 10159 10160 /* Pending ELS XRI abort events */ 10161 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 10162 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10163 &cq_event_list); 10164 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 10165 10166 /* Pending asynnc events */ 10167 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 10168 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 10169 &cq_event_list); 10170 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 10171 10172 while (!list_empty(&cq_event_list)) { 10173 list_remove_head(&cq_event_list, cq_event, 10174 struct lpfc_cq_event, list); 10175 lpfc_sli4_cq_event_release(phba, cq_event); 10176 } 10177 } 10178 10179 /** 10180 * lpfc_pci_function_reset - Reset pci function. 10181 * @phba: pointer to lpfc hba data structure. 10182 * 10183 * This routine is invoked to request a PCI function reset. It will destroys 10184 * all resources assigned to the PCI function which originates this request. 10185 * 10186 * Return codes 10187 * 0 - successful 10188 * -ENOMEM - No available memory 10189 * -EIO - The mailbox failed to complete successfully. 10190 **/ 10191 int 10192 lpfc_pci_function_reset(struct lpfc_hba *phba) 10193 { 10194 LPFC_MBOXQ_t *mboxq; 10195 uint32_t rc = 0, if_type; 10196 uint32_t shdr_status, shdr_add_status; 10197 uint32_t rdy_chk; 10198 uint32_t port_reset = 0; 10199 union lpfc_sli4_cfg_shdr *shdr; 10200 struct lpfc_register reg_data; 10201 uint16_t devid; 10202 10203 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10204 switch (if_type) { 10205 case LPFC_SLI_INTF_IF_TYPE_0: 10206 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10207 GFP_KERNEL); 10208 if (!mboxq) { 10209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10210 "0494 Unable to allocate memory for " 10211 "issuing SLI_FUNCTION_RESET mailbox " 10212 "command\n"); 10213 return -ENOMEM; 10214 } 10215 10216 /* Setup PCI function reset mailbox-ioctl command */ 10217 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10218 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 10219 LPFC_SLI4_MBX_EMBED); 10220 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10221 shdr = (union lpfc_sli4_cfg_shdr *) 10222 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10223 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10224 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 10225 &shdr->response); 10226 if (rc != MBX_TIMEOUT) 10227 mempool_free(mboxq, phba->mbox_mem_pool); 10228 if (shdr_status || shdr_add_status || rc) { 10229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10230 "0495 SLI_FUNCTION_RESET mailbox " 10231 "failed with status x%x add_status x%x," 10232 " mbx status x%x\n", 10233 shdr_status, shdr_add_status, rc); 10234 rc = -ENXIO; 10235 } 10236 break; 10237 case LPFC_SLI_INTF_IF_TYPE_2: 10238 case LPFC_SLI_INTF_IF_TYPE_6: 10239 wait: 10240 /* 10241 * Poll the Port Status Register and wait for RDY for 10242 * up to 30 seconds. If the port doesn't respond, treat 10243 * it as an error. 10244 */ 10245 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10246 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10247 STATUSregaddr, ®_data.word0)) { 10248 rc = -ENODEV; 10249 goto out; 10250 } 10251 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10252 break; 10253 msleep(20); 10254 } 10255 10256 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10257 phba->work_status[0] = readl( 10258 phba->sli4_hba.u.if_type2.ERR1regaddr); 10259 phba->work_status[1] = readl( 10260 phba->sli4_hba.u.if_type2.ERR2regaddr); 10261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10262 "2890 Port not ready, port status reg " 10263 "0x%x error 1=0x%x, error 2=0x%x\n", 10264 reg_data.word0, 10265 phba->work_status[0], 10266 phba->work_status[1]); 10267 rc = -ENODEV; 10268 goto out; 10269 } 10270 10271 if (!port_reset) { 10272 /* 10273 * Reset the port now 10274 */ 10275 reg_data.word0 = 0; 10276 bf_set(lpfc_sliport_ctrl_end, ®_data, 10277 LPFC_SLIPORT_LITTLE_ENDIAN); 10278 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10279 LPFC_SLIPORT_INIT_PORT); 10280 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10281 CTRLregaddr); 10282 /* flush */ 10283 pci_read_config_word(phba->pcidev, 10284 PCI_DEVICE_ID, &devid); 10285 10286 port_reset = 1; 10287 msleep(20); 10288 goto wait; 10289 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10290 rc = -ENODEV; 10291 goto out; 10292 } 10293 break; 10294 10295 case LPFC_SLI_INTF_IF_TYPE_1: 10296 default: 10297 break; 10298 } 10299 10300 out: 10301 /* Catch the not-ready port failure after a port reset. */ 10302 if (rc) { 10303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10304 "3317 HBA not functional: IP Reset Failed " 10305 "try: echo fw_reset > board_mode\n"); 10306 rc = -ENODEV; 10307 } 10308 10309 return rc; 10310 } 10311 10312 /** 10313 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10314 * @phba: pointer to lpfc hba data structure. 10315 * 10316 * This routine is invoked to set up the PCI device memory space for device 10317 * with SLI-4 interface spec. 10318 * 10319 * Return codes 10320 * 0 - successful 10321 * other values - error 10322 **/ 10323 static int 10324 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10325 { 10326 struct pci_dev *pdev = phba->pcidev; 10327 unsigned long bar0map_len, bar1map_len, bar2map_len; 10328 int error; 10329 uint32_t if_type; 10330 10331 if (!pdev) 10332 return -ENODEV; 10333 10334 /* Set the device DMA mask size */ 10335 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10336 if (error) 10337 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10338 if (error) 10339 return error; 10340 10341 /* 10342 * The BARs and register set definitions and offset locations are 10343 * dependent on the if_type. 10344 */ 10345 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10346 &phba->sli4_hba.sli_intf.word0)) { 10347 return -ENODEV; 10348 } 10349 10350 /* There is no SLI3 failback for SLI4 devices. */ 10351 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10352 LPFC_SLI_INTF_VALID) { 10353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10354 "2894 SLI_INTF reg contents invalid " 10355 "sli_intf reg 0x%x\n", 10356 phba->sli4_hba.sli_intf.word0); 10357 return -ENODEV; 10358 } 10359 10360 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10361 /* 10362 * Get the bus address of SLI4 device Bar regions and the 10363 * number of bytes required by each mapping. The mapping of the 10364 * particular PCI BARs regions is dependent on the type of 10365 * SLI4 device. 10366 */ 10367 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10368 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10369 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10370 10371 /* 10372 * Map SLI4 PCI Config Space Register base to a kernel virtual 10373 * addr 10374 */ 10375 phba->sli4_hba.conf_regs_memmap_p = 10376 ioremap(phba->pci_bar0_map, bar0map_len); 10377 if (!phba->sli4_hba.conf_regs_memmap_p) { 10378 dev_printk(KERN_ERR, &pdev->dev, 10379 "ioremap failed for SLI4 PCI config " 10380 "registers.\n"); 10381 return -ENODEV; 10382 } 10383 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10384 /* Set up BAR0 PCI config space register memory map */ 10385 lpfc_sli4_bar0_register_memmap(phba, if_type); 10386 } else { 10387 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10388 bar0map_len = pci_resource_len(pdev, 1); 10389 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10390 dev_printk(KERN_ERR, &pdev->dev, 10391 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10392 return -ENODEV; 10393 } 10394 phba->sli4_hba.conf_regs_memmap_p = 10395 ioremap(phba->pci_bar0_map, bar0map_len); 10396 if (!phba->sli4_hba.conf_regs_memmap_p) { 10397 dev_printk(KERN_ERR, &pdev->dev, 10398 "ioremap failed for SLI4 PCI config " 10399 "registers.\n"); 10400 return -ENODEV; 10401 } 10402 lpfc_sli4_bar0_register_memmap(phba, if_type); 10403 } 10404 10405 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10406 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10407 /* 10408 * Map SLI4 if type 0 HBA Control Register base to a 10409 * kernel virtual address and setup the registers. 10410 */ 10411 phba->pci_bar1_map = pci_resource_start(pdev, 10412 PCI_64BIT_BAR2); 10413 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10414 phba->sli4_hba.ctrl_regs_memmap_p = 10415 ioremap(phba->pci_bar1_map, 10416 bar1map_len); 10417 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10418 dev_err(&pdev->dev, 10419 "ioremap failed for SLI4 HBA " 10420 "control registers.\n"); 10421 error = -ENOMEM; 10422 goto out_iounmap_conf; 10423 } 10424 phba->pci_bar2_memmap_p = 10425 phba->sli4_hba.ctrl_regs_memmap_p; 10426 lpfc_sli4_bar1_register_memmap(phba, if_type); 10427 } else { 10428 error = -ENOMEM; 10429 goto out_iounmap_conf; 10430 } 10431 } 10432 10433 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10434 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10435 /* 10436 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10437 * virtual address and setup the registers. 10438 */ 10439 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10440 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10441 phba->sli4_hba.drbl_regs_memmap_p = 10442 ioremap(phba->pci_bar1_map, bar1map_len); 10443 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10444 dev_err(&pdev->dev, 10445 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10446 error = -ENOMEM; 10447 goto out_iounmap_conf; 10448 } 10449 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10450 lpfc_sli4_bar1_register_memmap(phba, if_type); 10451 } 10452 10453 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10454 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10455 /* 10456 * Map SLI4 if type 0 HBA Doorbell Register base to 10457 * a kernel virtual address and setup the registers. 10458 */ 10459 phba->pci_bar2_map = pci_resource_start(pdev, 10460 PCI_64BIT_BAR4); 10461 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10462 phba->sli4_hba.drbl_regs_memmap_p = 10463 ioremap(phba->pci_bar2_map, 10464 bar2map_len); 10465 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10466 dev_err(&pdev->dev, 10467 "ioremap failed for SLI4 HBA" 10468 " doorbell registers.\n"); 10469 error = -ENOMEM; 10470 goto out_iounmap_ctrl; 10471 } 10472 phba->pci_bar4_memmap_p = 10473 phba->sli4_hba.drbl_regs_memmap_p; 10474 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10475 if (error) 10476 goto out_iounmap_all; 10477 } else { 10478 error = -ENOMEM; 10479 goto out_iounmap_all; 10480 } 10481 } 10482 10483 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10484 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10485 /* 10486 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10487 * virtual address and setup the registers. 10488 */ 10489 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10490 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10491 phba->sli4_hba.dpp_regs_memmap_p = 10492 ioremap(phba->pci_bar2_map, bar2map_len); 10493 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10494 dev_err(&pdev->dev, 10495 "ioremap failed for SLI4 HBA dpp registers.\n"); 10496 error = -ENOMEM; 10497 goto out_iounmap_ctrl; 10498 } 10499 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10500 } 10501 10502 /* Set up the EQ/CQ register handeling functions now */ 10503 switch (if_type) { 10504 case LPFC_SLI_INTF_IF_TYPE_0: 10505 case LPFC_SLI_INTF_IF_TYPE_2: 10506 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10507 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10508 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10509 break; 10510 case LPFC_SLI_INTF_IF_TYPE_6: 10511 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10512 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10513 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10514 break; 10515 default: 10516 break; 10517 } 10518 10519 return 0; 10520 10521 out_iounmap_all: 10522 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10523 out_iounmap_ctrl: 10524 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10525 out_iounmap_conf: 10526 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10527 10528 return error; 10529 } 10530 10531 /** 10532 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10533 * @phba: pointer to lpfc hba data structure. 10534 * 10535 * This routine is invoked to unset the PCI device memory space for device 10536 * with SLI-4 interface spec. 10537 **/ 10538 static void 10539 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10540 { 10541 uint32_t if_type; 10542 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10543 10544 switch (if_type) { 10545 case LPFC_SLI_INTF_IF_TYPE_0: 10546 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10547 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10548 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10549 break; 10550 case LPFC_SLI_INTF_IF_TYPE_2: 10551 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10552 break; 10553 case LPFC_SLI_INTF_IF_TYPE_6: 10554 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10555 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10556 if (phba->sli4_hba.dpp_regs_memmap_p) 10557 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 10558 break; 10559 case LPFC_SLI_INTF_IF_TYPE_1: 10560 default: 10561 dev_printk(KERN_ERR, &phba->pcidev->dev, 10562 "FATAL - unsupported SLI4 interface type - %d\n", 10563 if_type); 10564 break; 10565 } 10566 } 10567 10568 /** 10569 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10570 * @phba: pointer to lpfc hba data structure. 10571 * 10572 * This routine is invoked to enable the MSI-X interrupt vectors to device 10573 * with SLI-3 interface specs. 10574 * 10575 * Return codes 10576 * 0 - successful 10577 * other values - error 10578 **/ 10579 static int 10580 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10581 { 10582 int rc; 10583 LPFC_MBOXQ_t *pmb; 10584 10585 /* Set up MSI-X multi-message vectors */ 10586 rc = pci_alloc_irq_vectors(phba->pcidev, 10587 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10588 if (rc < 0) { 10589 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10590 "0420 PCI enable MSI-X failed (%d)\n", rc); 10591 goto vec_fail_out; 10592 } 10593 10594 /* 10595 * Assign MSI-X vectors to interrupt handlers 10596 */ 10597 10598 /* vector-0 is associated to slow-path handler */ 10599 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10600 &lpfc_sli_sp_intr_handler, 0, 10601 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10602 if (rc) { 10603 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10604 "0421 MSI-X slow-path request_irq failed " 10605 "(%d)\n", rc); 10606 goto msi_fail_out; 10607 } 10608 10609 /* vector-1 is associated to fast-path handler */ 10610 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10611 &lpfc_sli_fp_intr_handler, 0, 10612 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10613 10614 if (rc) { 10615 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10616 "0429 MSI-X fast-path request_irq failed " 10617 "(%d)\n", rc); 10618 goto irq_fail_out; 10619 } 10620 10621 /* 10622 * Configure HBA MSI-X attention conditions to messages 10623 */ 10624 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10625 10626 if (!pmb) { 10627 rc = -ENOMEM; 10628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10629 "0474 Unable to allocate memory for issuing " 10630 "MBOX_CONFIG_MSI command\n"); 10631 goto mem_fail_out; 10632 } 10633 rc = lpfc_config_msi(phba, pmb); 10634 if (rc) 10635 goto mbx_fail_out; 10636 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10637 if (rc != MBX_SUCCESS) { 10638 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10639 "0351 Config MSI mailbox command failed, " 10640 "mbxCmd x%x, mbxStatus x%x\n", 10641 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10642 goto mbx_fail_out; 10643 } 10644 10645 /* Free memory allocated for mailbox command */ 10646 mempool_free(pmb, phba->mbox_mem_pool); 10647 return rc; 10648 10649 mbx_fail_out: 10650 /* Free memory allocated for mailbox command */ 10651 mempool_free(pmb, phba->mbox_mem_pool); 10652 10653 mem_fail_out: 10654 /* free the irq already requested */ 10655 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10656 10657 irq_fail_out: 10658 /* free the irq already requested */ 10659 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10660 10661 msi_fail_out: 10662 /* Unconfigure MSI-X capability structure */ 10663 pci_free_irq_vectors(phba->pcidev); 10664 10665 vec_fail_out: 10666 return rc; 10667 } 10668 10669 /** 10670 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10671 * @phba: pointer to lpfc hba data structure. 10672 * 10673 * This routine is invoked to enable the MSI interrupt mode to device with 10674 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10675 * enable the MSI vector. The device driver is responsible for calling the 10676 * request_irq() to register MSI vector with a interrupt the handler, which 10677 * is done in this function. 10678 * 10679 * Return codes 10680 * 0 - successful 10681 * other values - error 10682 */ 10683 static int 10684 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10685 { 10686 int rc; 10687 10688 rc = pci_enable_msi(phba->pcidev); 10689 if (!rc) 10690 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10691 "0462 PCI enable MSI mode success.\n"); 10692 else { 10693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10694 "0471 PCI enable MSI mode failed (%d)\n", rc); 10695 return rc; 10696 } 10697 10698 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10699 0, LPFC_DRIVER_NAME, phba); 10700 if (rc) { 10701 pci_disable_msi(phba->pcidev); 10702 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10703 "0478 MSI request_irq failed (%d)\n", rc); 10704 } 10705 return rc; 10706 } 10707 10708 /** 10709 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10710 * @phba: pointer to lpfc hba data structure. 10711 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 10712 * 10713 * This routine is invoked to enable device interrupt and associate driver's 10714 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10715 * spec. Depends on the interrupt mode configured to the driver, the driver 10716 * will try to fallback from the configured interrupt mode to an interrupt 10717 * mode which is supported by the platform, kernel, and device in the order 10718 * of: 10719 * MSI-X -> MSI -> IRQ. 10720 * 10721 * Return codes 10722 * 0 - successful 10723 * other values - error 10724 **/ 10725 static uint32_t 10726 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10727 { 10728 uint32_t intr_mode = LPFC_INTR_ERROR; 10729 int retval; 10730 10731 if (cfg_mode == 2) { 10732 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10733 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10734 if (!retval) { 10735 /* Now, try to enable MSI-X interrupt mode */ 10736 retval = lpfc_sli_enable_msix(phba); 10737 if (!retval) { 10738 /* Indicate initialization to MSI-X mode */ 10739 phba->intr_type = MSIX; 10740 intr_mode = 2; 10741 } 10742 } 10743 } 10744 10745 /* Fallback to MSI if MSI-X initialization failed */ 10746 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10747 retval = lpfc_sli_enable_msi(phba); 10748 if (!retval) { 10749 /* Indicate initialization to MSI mode */ 10750 phba->intr_type = MSI; 10751 intr_mode = 1; 10752 } 10753 } 10754 10755 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10756 if (phba->intr_type == NONE) { 10757 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10758 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10759 if (!retval) { 10760 /* Indicate initialization to INTx mode */ 10761 phba->intr_type = INTx; 10762 intr_mode = 0; 10763 } 10764 } 10765 return intr_mode; 10766 } 10767 10768 /** 10769 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10770 * @phba: pointer to lpfc hba data structure. 10771 * 10772 * This routine is invoked to disable device interrupt and disassociate the 10773 * driver's interrupt handler(s) from interrupt vector(s) to device with 10774 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10775 * release the interrupt vector(s) for the message signaled interrupt. 10776 **/ 10777 static void 10778 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10779 { 10780 int nr_irqs, i; 10781 10782 if (phba->intr_type == MSIX) 10783 nr_irqs = LPFC_MSIX_VECTORS; 10784 else 10785 nr_irqs = 1; 10786 10787 for (i = 0; i < nr_irqs; i++) 10788 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10789 pci_free_irq_vectors(phba->pcidev); 10790 10791 /* Reset interrupt management states */ 10792 phba->intr_type = NONE; 10793 phba->sli.slistat.sli_intr = 0; 10794 } 10795 10796 /** 10797 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10798 * @phba: pointer to lpfc hba data structure. 10799 * @id: EQ vector index or Hardware Queue index 10800 * @match: LPFC_FIND_BY_EQ = match by EQ 10801 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10802 * Return the CPU that matches the selection criteria 10803 */ 10804 static uint16_t 10805 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10806 { 10807 struct lpfc_vector_map_info *cpup; 10808 int cpu; 10809 10810 /* Loop through all CPUs */ 10811 for_each_present_cpu(cpu) { 10812 cpup = &phba->sli4_hba.cpu_map[cpu]; 10813 10814 /* If we are matching by EQ, there may be multiple CPUs using 10815 * using the same vector, so select the one with 10816 * LPFC_CPU_FIRST_IRQ set. 10817 */ 10818 if ((match == LPFC_FIND_BY_EQ) && 10819 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10820 (cpup->eq == id)) 10821 return cpu; 10822 10823 /* If matching by HDWQ, select the first CPU that matches */ 10824 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10825 return cpu; 10826 } 10827 return 0; 10828 } 10829 10830 #ifdef CONFIG_X86 10831 /** 10832 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10833 * @phba: pointer to lpfc hba data structure. 10834 * @cpu: CPU map index 10835 * @phys_id: CPU package physical id 10836 * @core_id: CPU core id 10837 */ 10838 static int 10839 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10840 uint16_t phys_id, uint16_t core_id) 10841 { 10842 struct lpfc_vector_map_info *cpup; 10843 int idx; 10844 10845 for_each_present_cpu(idx) { 10846 cpup = &phba->sli4_hba.cpu_map[idx]; 10847 /* Does the cpup match the one we are looking for */ 10848 if ((cpup->phys_id == phys_id) && 10849 (cpup->core_id == core_id) && 10850 (cpu != idx)) 10851 return 1; 10852 } 10853 return 0; 10854 } 10855 #endif 10856 10857 /* 10858 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 10859 * @phba: pointer to lpfc hba data structure. 10860 * @eqidx: index for eq and irq vector 10861 * @flag: flags to set for vector_map structure 10862 * @cpu: cpu used to index vector_map structure 10863 * 10864 * The routine assigns eq info into vector_map structure 10865 */ 10866 static inline void 10867 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 10868 unsigned int cpu) 10869 { 10870 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 10871 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 10872 10873 cpup->eq = eqidx; 10874 cpup->flag |= flag; 10875 10876 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10877 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 10878 cpu, eqhdl->irq, cpup->eq, cpup->flag); 10879 } 10880 10881 /** 10882 * lpfc_cpu_map_array_init - Initialize cpu_map structure 10883 * @phba: pointer to lpfc hba data structure. 10884 * 10885 * The routine initializes the cpu_map array structure 10886 */ 10887 static void 10888 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 10889 { 10890 struct lpfc_vector_map_info *cpup; 10891 struct lpfc_eq_intr_info *eqi; 10892 int cpu; 10893 10894 for_each_possible_cpu(cpu) { 10895 cpup = &phba->sli4_hba.cpu_map[cpu]; 10896 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 10897 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 10898 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 10899 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 10900 cpup->flag = 0; 10901 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 10902 INIT_LIST_HEAD(&eqi->list); 10903 eqi->icnt = 0; 10904 } 10905 } 10906 10907 /** 10908 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 10909 * @phba: pointer to lpfc hba data structure. 10910 * 10911 * The routine initializes the hba_eq_hdl array structure 10912 */ 10913 static void 10914 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 10915 { 10916 struct lpfc_hba_eq_hdl *eqhdl; 10917 int i; 10918 10919 for (i = 0; i < phba->cfg_irq_chann; i++) { 10920 eqhdl = lpfc_get_eq_hdl(i); 10921 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 10922 eqhdl->phba = phba; 10923 } 10924 } 10925 10926 /** 10927 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10928 * @phba: pointer to lpfc hba data structure. 10929 * @vectors: number of msix vectors allocated. 10930 * 10931 * The routine will figure out the CPU affinity assignment for every 10932 * MSI-X vector allocated for the HBA. 10933 * In addition, the CPU to IO channel mapping will be calculated 10934 * and the phba->sli4_hba.cpu_map array will reflect this. 10935 */ 10936 static void 10937 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10938 { 10939 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 10940 int max_phys_id, min_phys_id; 10941 int max_core_id, min_core_id; 10942 struct lpfc_vector_map_info *cpup; 10943 struct lpfc_vector_map_info *new_cpup; 10944 #ifdef CONFIG_X86 10945 struct cpuinfo_x86 *cpuinfo; 10946 #endif 10947 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 10948 struct lpfc_hdwq_stat *c_stat; 10949 #endif 10950 10951 max_phys_id = 0; 10952 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 10953 max_core_id = 0; 10954 min_core_id = LPFC_VECTOR_MAP_EMPTY; 10955 10956 /* Update CPU map with physical id and core id of each CPU */ 10957 for_each_present_cpu(cpu) { 10958 cpup = &phba->sli4_hba.cpu_map[cpu]; 10959 #ifdef CONFIG_X86 10960 cpuinfo = &cpu_data(cpu); 10961 cpup->phys_id = cpuinfo->phys_proc_id; 10962 cpup->core_id = cpuinfo->cpu_core_id; 10963 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 10964 cpup->flag |= LPFC_CPU_MAP_HYPER; 10965 #else 10966 /* No distinction between CPUs for other platforms */ 10967 cpup->phys_id = 0; 10968 cpup->core_id = cpu; 10969 #endif 10970 10971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10972 "3328 CPU %d physid %d coreid %d flag x%x\n", 10973 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 10974 10975 if (cpup->phys_id > max_phys_id) 10976 max_phys_id = cpup->phys_id; 10977 if (cpup->phys_id < min_phys_id) 10978 min_phys_id = cpup->phys_id; 10979 10980 if (cpup->core_id > max_core_id) 10981 max_core_id = cpup->core_id; 10982 if (cpup->core_id < min_core_id) 10983 min_core_id = cpup->core_id; 10984 } 10985 10986 /* After looking at each irq vector assigned to this pcidev, its 10987 * possible to see that not ALL CPUs have been accounted for. 10988 * Next we will set any unassigned (unaffinitized) cpu map 10989 * entries to a IRQ on the same phys_id. 10990 */ 10991 first_cpu = cpumask_first(cpu_present_mask); 10992 start_cpu = first_cpu; 10993 10994 for_each_present_cpu(cpu) { 10995 cpup = &phba->sli4_hba.cpu_map[cpu]; 10996 10997 /* Is this CPU entry unassigned */ 10998 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10999 /* Mark CPU as IRQ not assigned by the kernel */ 11000 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 11001 11002 /* If so, find a new_cpup thats on the the SAME 11003 * phys_id as cpup. start_cpu will start where we 11004 * left off so all unassigned entries don't get assgined 11005 * the IRQ of the first entry. 11006 */ 11007 new_cpu = start_cpu; 11008 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11009 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11010 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 11011 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 11012 (new_cpup->phys_id == cpup->phys_id)) 11013 goto found_same; 11014 new_cpu = cpumask_next( 11015 new_cpu, cpu_present_mask); 11016 if (new_cpu == nr_cpumask_bits) 11017 new_cpu = first_cpu; 11018 } 11019 /* At this point, we leave the CPU as unassigned */ 11020 continue; 11021 found_same: 11022 /* We found a matching phys_id, so copy the IRQ info */ 11023 cpup->eq = new_cpup->eq; 11024 11025 /* Bump start_cpu to the next slot to minmize the 11026 * chance of having multiple unassigned CPU entries 11027 * selecting the same IRQ. 11028 */ 11029 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11030 if (start_cpu == nr_cpumask_bits) 11031 start_cpu = first_cpu; 11032 11033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11034 "3337 Set Affinity: CPU %d " 11035 "eq %d from peer cpu %d same " 11036 "phys_id (%d)\n", 11037 cpu, cpup->eq, new_cpu, 11038 cpup->phys_id); 11039 } 11040 } 11041 11042 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 11043 start_cpu = first_cpu; 11044 11045 for_each_present_cpu(cpu) { 11046 cpup = &phba->sli4_hba.cpu_map[cpu]; 11047 11048 /* Is this entry unassigned */ 11049 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 11050 /* Mark it as IRQ not assigned by the kernel */ 11051 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 11052 11053 /* If so, find a new_cpup thats on ANY phys_id 11054 * as the cpup. start_cpu will start where we 11055 * left off so all unassigned entries don't get 11056 * assigned the IRQ of the first entry. 11057 */ 11058 new_cpu = start_cpu; 11059 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11060 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11061 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 11062 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 11063 goto found_any; 11064 new_cpu = cpumask_next( 11065 new_cpu, cpu_present_mask); 11066 if (new_cpu == nr_cpumask_bits) 11067 new_cpu = first_cpu; 11068 } 11069 /* We should never leave an entry unassigned */ 11070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11071 "3339 Set Affinity: CPU %d " 11072 "eq %d UNASSIGNED\n", 11073 cpup->hdwq, cpup->eq); 11074 continue; 11075 found_any: 11076 /* We found an available entry, copy the IRQ info */ 11077 cpup->eq = new_cpup->eq; 11078 11079 /* Bump start_cpu to the next slot to minmize the 11080 * chance of having multiple unassigned CPU entries 11081 * selecting the same IRQ. 11082 */ 11083 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11084 if (start_cpu == nr_cpumask_bits) 11085 start_cpu = first_cpu; 11086 11087 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11088 "3338 Set Affinity: CPU %d " 11089 "eq %d from peer cpu %d (%d/%d)\n", 11090 cpu, cpup->eq, new_cpu, 11091 new_cpup->phys_id, new_cpup->core_id); 11092 } 11093 } 11094 11095 /* Assign hdwq indices that are unique across all cpus in the map 11096 * that are also FIRST_CPUs. 11097 */ 11098 idx = 0; 11099 for_each_present_cpu(cpu) { 11100 cpup = &phba->sli4_hba.cpu_map[cpu]; 11101 11102 /* Only FIRST IRQs get a hdwq index assignment. */ 11103 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11104 continue; 11105 11106 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 11107 cpup->hdwq = idx; 11108 idx++; 11109 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11110 "3333 Set Affinity: CPU %d (phys %d core %d): " 11111 "hdwq %d eq %d flg x%x\n", 11112 cpu, cpup->phys_id, cpup->core_id, 11113 cpup->hdwq, cpup->eq, cpup->flag); 11114 } 11115 /* Associate a hdwq with each cpu_map entry 11116 * This will be 1 to 1 - hdwq to cpu, unless there are less 11117 * hardware queues then CPUs. For that case we will just round-robin 11118 * the available hardware queues as they get assigned to CPUs. 11119 * The next_idx is the idx from the FIRST_CPU loop above to account 11120 * for irq_chann < hdwq. The idx is used for round-robin assignments 11121 * and needs to start at 0. 11122 */ 11123 next_idx = idx; 11124 start_cpu = 0; 11125 idx = 0; 11126 for_each_present_cpu(cpu) { 11127 cpup = &phba->sli4_hba.cpu_map[cpu]; 11128 11129 /* FIRST cpus are already mapped. */ 11130 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 11131 continue; 11132 11133 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 11134 * of the unassigned cpus to the next idx so that all 11135 * hdw queues are fully utilized. 11136 */ 11137 if (next_idx < phba->cfg_hdw_queue) { 11138 cpup->hdwq = next_idx; 11139 next_idx++; 11140 continue; 11141 } 11142 11143 /* Not a First CPU and all hdw_queues are used. Reuse a 11144 * Hardware Queue for another CPU, so be smart about it 11145 * and pick one that has its IRQ/EQ mapped to the same phys_id 11146 * (CPU package) and core_id. 11147 */ 11148 new_cpu = start_cpu; 11149 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11150 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11151 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11152 new_cpup->phys_id == cpup->phys_id && 11153 new_cpup->core_id == cpup->core_id) { 11154 goto found_hdwq; 11155 } 11156 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11157 if (new_cpu == nr_cpumask_bits) 11158 new_cpu = first_cpu; 11159 } 11160 11161 /* If we can't match both phys_id and core_id, 11162 * settle for just a phys_id match. 11163 */ 11164 new_cpu = start_cpu; 11165 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11166 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11167 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11168 new_cpup->phys_id == cpup->phys_id) 11169 goto found_hdwq; 11170 11171 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11172 if (new_cpu == nr_cpumask_bits) 11173 new_cpu = first_cpu; 11174 } 11175 11176 /* Otherwise just round robin on cfg_hdw_queue */ 11177 cpup->hdwq = idx % phba->cfg_hdw_queue; 11178 idx++; 11179 goto logit; 11180 found_hdwq: 11181 /* We found an available entry, copy the IRQ info */ 11182 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11183 if (start_cpu == nr_cpumask_bits) 11184 start_cpu = first_cpu; 11185 cpup->hdwq = new_cpup->hdwq; 11186 logit: 11187 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11188 "3335 Set Affinity: CPU %d (phys %d core %d): " 11189 "hdwq %d eq %d flg x%x\n", 11190 cpu, cpup->phys_id, cpup->core_id, 11191 cpup->hdwq, cpup->eq, cpup->flag); 11192 } 11193 11194 /* 11195 * Initialize the cpu_map slots for not-present cpus in case 11196 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 11197 */ 11198 idx = 0; 11199 for_each_possible_cpu(cpu) { 11200 cpup = &phba->sli4_hba.cpu_map[cpu]; 11201 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11202 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 11203 c_stat->hdwq_no = cpup->hdwq; 11204 #endif 11205 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 11206 continue; 11207 11208 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 11209 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11210 c_stat->hdwq_no = cpup->hdwq; 11211 #endif 11212 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11213 "3340 Set Affinity: not present " 11214 "CPU %d hdwq %d\n", 11215 cpu, cpup->hdwq); 11216 } 11217 11218 /* The cpu_map array will be used later during initialization 11219 * when EQ / CQ / WQs are allocated and configured. 11220 */ 11221 return; 11222 } 11223 11224 /** 11225 * lpfc_cpuhp_get_eq 11226 * 11227 * @phba: pointer to lpfc hba data structure. 11228 * @cpu: cpu going offline 11229 * @eqlist: eq list to append to 11230 */ 11231 static int 11232 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 11233 struct list_head *eqlist) 11234 { 11235 const struct cpumask *maskp; 11236 struct lpfc_queue *eq; 11237 struct cpumask *tmp; 11238 u16 idx; 11239 11240 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 11241 if (!tmp) 11242 return -ENOMEM; 11243 11244 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11245 maskp = pci_irq_get_affinity(phba->pcidev, idx); 11246 if (!maskp) 11247 continue; 11248 /* 11249 * if irq is not affinitized to the cpu going 11250 * then we don't need to poll the eq attached 11251 * to it. 11252 */ 11253 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 11254 continue; 11255 /* get the cpus that are online and are affini- 11256 * tized to this irq vector. If the count is 11257 * more than 1 then cpuhp is not going to shut- 11258 * down this vector. Since this cpu has not 11259 * gone offline yet, we need >1. 11260 */ 11261 cpumask_and(tmp, maskp, cpu_online_mask); 11262 if (cpumask_weight(tmp) > 1) 11263 continue; 11264 11265 /* Now that we have an irq to shutdown, get the eq 11266 * mapped to this irq. Note: multiple hdwq's in 11267 * the software can share an eq, but eventually 11268 * only eq will be mapped to this vector 11269 */ 11270 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 11271 list_add(&eq->_poll_list, eqlist); 11272 } 11273 kfree(tmp); 11274 return 0; 11275 } 11276 11277 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 11278 { 11279 if (phba->sli_rev != LPFC_SLI_REV4) 11280 return; 11281 11282 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 11283 &phba->cpuhp); 11284 /* 11285 * unregistering the instance doesn't stop the polling 11286 * timer. Wait for the poll timer to retire. 11287 */ 11288 synchronize_rcu(); 11289 del_timer_sync(&phba->cpuhp_poll_timer); 11290 } 11291 11292 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 11293 { 11294 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 11295 return; 11296 11297 __lpfc_cpuhp_remove(phba); 11298 } 11299 11300 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 11301 { 11302 if (phba->sli_rev != LPFC_SLI_REV4) 11303 return; 11304 11305 rcu_read_lock(); 11306 11307 if (!list_empty(&phba->poll_list)) 11308 mod_timer(&phba->cpuhp_poll_timer, 11309 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 11310 11311 rcu_read_unlock(); 11312 11313 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 11314 &phba->cpuhp); 11315 } 11316 11317 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 11318 { 11319 if (phba->pport->load_flag & FC_UNLOADING) { 11320 *retval = -EAGAIN; 11321 return true; 11322 } 11323 11324 if (phba->sli_rev != LPFC_SLI_REV4) { 11325 *retval = 0; 11326 return true; 11327 } 11328 11329 /* proceed with the hotplug */ 11330 return false; 11331 } 11332 11333 /** 11334 * lpfc_irq_set_aff - set IRQ affinity 11335 * @eqhdl: EQ handle 11336 * @cpu: cpu to set affinity 11337 * 11338 **/ 11339 static inline void 11340 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 11341 { 11342 cpumask_clear(&eqhdl->aff_mask); 11343 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 11344 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11345 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11346 } 11347 11348 /** 11349 * lpfc_irq_clear_aff - clear IRQ affinity 11350 * @eqhdl: EQ handle 11351 * 11352 **/ 11353 static inline void 11354 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 11355 { 11356 cpumask_clear(&eqhdl->aff_mask); 11357 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11358 } 11359 11360 /** 11361 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 11362 * @phba: pointer to HBA context object. 11363 * @cpu: cpu going offline/online 11364 * @offline: true, cpu is going offline. false, cpu is coming online. 11365 * 11366 * If cpu is going offline, we'll try our best effort to find the next 11367 * online cpu on the phba's original_mask and migrate all offlining IRQ 11368 * affinities. 11369 * 11370 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 11371 * 11372 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 11373 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 11374 * 11375 **/ 11376 static void 11377 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 11378 { 11379 struct lpfc_vector_map_info *cpup; 11380 struct cpumask *aff_mask; 11381 unsigned int cpu_select, cpu_next, idx; 11382 const struct cpumask *orig_mask; 11383 11384 if (phba->irq_chann_mode == NORMAL_MODE) 11385 return; 11386 11387 orig_mask = &phba->sli4_hba.irq_aff_mask; 11388 11389 if (!cpumask_test_cpu(cpu, orig_mask)) 11390 return; 11391 11392 cpup = &phba->sli4_hba.cpu_map[cpu]; 11393 11394 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11395 return; 11396 11397 if (offline) { 11398 /* Find next online CPU on original mask */ 11399 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 11400 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 11401 11402 /* Found a valid CPU */ 11403 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 11404 /* Go through each eqhdl and ensure offlining 11405 * cpu aff_mask is migrated 11406 */ 11407 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11408 aff_mask = lpfc_get_aff_mask(idx); 11409 11410 /* Migrate affinity */ 11411 if (cpumask_test_cpu(cpu, aff_mask)) 11412 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 11413 cpu_select); 11414 } 11415 } else { 11416 /* Rely on irqbalance if no online CPUs left on NUMA */ 11417 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 11418 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 11419 } 11420 } else { 11421 /* Migrate affinity back to this CPU */ 11422 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 11423 } 11424 } 11425 11426 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 11427 { 11428 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11429 struct lpfc_queue *eq, *next; 11430 LIST_HEAD(eqlist); 11431 int retval; 11432 11433 if (!phba) { 11434 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11435 return 0; 11436 } 11437 11438 if (__lpfc_cpuhp_checks(phba, &retval)) 11439 return retval; 11440 11441 lpfc_irq_rebalance(phba, cpu, true); 11442 11443 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 11444 if (retval) 11445 return retval; 11446 11447 /* start polling on these eq's */ 11448 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 11449 list_del_init(&eq->_poll_list); 11450 lpfc_sli4_start_polling(eq); 11451 } 11452 11453 return 0; 11454 } 11455 11456 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 11457 { 11458 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11459 struct lpfc_queue *eq, *next; 11460 unsigned int n; 11461 int retval; 11462 11463 if (!phba) { 11464 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11465 return 0; 11466 } 11467 11468 if (__lpfc_cpuhp_checks(phba, &retval)) 11469 return retval; 11470 11471 lpfc_irq_rebalance(phba, cpu, false); 11472 11473 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 11474 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 11475 if (n == cpu) 11476 lpfc_sli4_stop_polling(eq); 11477 } 11478 11479 return 0; 11480 } 11481 11482 /** 11483 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 11484 * @phba: pointer to lpfc hba data structure. 11485 * 11486 * This routine is invoked to enable the MSI-X interrupt vectors to device 11487 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 11488 * to cpus on the system. 11489 * 11490 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 11491 * the number of cpus on the same numa node as this adapter. The vectors are 11492 * allocated without requesting OS affinity mapping. A vector will be 11493 * allocated and assigned to each online and offline cpu. If the cpu is 11494 * online, then affinity will be set to that cpu. If the cpu is offline, then 11495 * affinity will be set to the nearest peer cpu within the numa node that is 11496 * online. If there are no online cpus within the numa node, affinity is not 11497 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 11498 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 11499 * configured. 11500 * 11501 * If numa mode is not enabled and there is more than 1 vector allocated, then 11502 * the driver relies on the managed irq interface where the OS assigns vector to 11503 * cpu affinity. The driver will then use that affinity mapping to setup its 11504 * cpu mapping table. 11505 * 11506 * Return codes 11507 * 0 - successful 11508 * other values - error 11509 **/ 11510 static int 11511 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 11512 { 11513 int vectors, rc, index; 11514 char *name; 11515 const struct cpumask *aff_mask = NULL; 11516 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 11517 struct lpfc_vector_map_info *cpup; 11518 struct lpfc_hba_eq_hdl *eqhdl; 11519 const struct cpumask *maskp; 11520 unsigned int flags = PCI_IRQ_MSIX; 11521 11522 /* Set up MSI-X multi-message vectors */ 11523 vectors = phba->cfg_irq_chann; 11524 11525 if (phba->irq_chann_mode != NORMAL_MODE) 11526 aff_mask = &phba->sli4_hba.irq_aff_mask; 11527 11528 if (aff_mask) { 11529 cpu_cnt = cpumask_weight(aff_mask); 11530 vectors = min(phba->cfg_irq_chann, cpu_cnt); 11531 11532 /* cpu: iterates over aff_mask including offline or online 11533 * cpu_select: iterates over online aff_mask to set affinity 11534 */ 11535 cpu = cpumask_first(aff_mask); 11536 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 11537 } else { 11538 flags |= PCI_IRQ_AFFINITY; 11539 } 11540 11541 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 11542 if (rc < 0) { 11543 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11544 "0484 PCI enable MSI-X failed (%d)\n", rc); 11545 goto vec_fail_out; 11546 } 11547 vectors = rc; 11548 11549 /* Assign MSI-X vectors to interrupt handlers */ 11550 for (index = 0; index < vectors; index++) { 11551 eqhdl = lpfc_get_eq_hdl(index); 11552 name = eqhdl->handler_name; 11553 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11554 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11555 LPFC_DRIVER_HANDLER_NAME"%d", index); 11556 11557 eqhdl->idx = index; 11558 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11559 &lpfc_sli4_hba_intr_handler, 0, 11560 name, eqhdl); 11561 if (rc) { 11562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11563 "0486 MSI-X fast-path (%d) " 11564 "request_irq failed (%d)\n", index, rc); 11565 goto cfg_fail_out; 11566 } 11567 11568 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 11569 11570 if (aff_mask) { 11571 /* If found a neighboring online cpu, set affinity */ 11572 if (cpu_select < nr_cpu_ids) 11573 lpfc_irq_set_aff(eqhdl, cpu_select); 11574 11575 /* Assign EQ to cpu_map */ 11576 lpfc_assign_eq_map_info(phba, index, 11577 LPFC_CPU_FIRST_IRQ, 11578 cpu); 11579 11580 /* Iterate to next offline or online cpu in aff_mask */ 11581 cpu = cpumask_next(cpu, aff_mask); 11582 11583 /* Find next online cpu in aff_mask to set affinity */ 11584 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 11585 } else if (vectors == 1) { 11586 cpu = cpumask_first(cpu_present_mask); 11587 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 11588 cpu); 11589 } else { 11590 maskp = pci_irq_get_affinity(phba->pcidev, index); 11591 11592 /* Loop through all CPUs associated with vector index */ 11593 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 11594 cpup = &phba->sli4_hba.cpu_map[cpu]; 11595 11596 /* If this is the first CPU thats assigned to 11597 * this vector, set LPFC_CPU_FIRST_IRQ. 11598 * 11599 * With certain platforms its possible that irq 11600 * vectors are affinitized to all the cpu's. 11601 * This can result in each cpu_map.eq to be set 11602 * to the last vector, resulting in overwrite 11603 * of all the previous cpu_map.eq. Ensure that 11604 * each vector receives a place in cpu_map. 11605 * Later call to lpfc_cpu_affinity_check will 11606 * ensure we are nicely balanced out. 11607 */ 11608 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 11609 continue; 11610 lpfc_assign_eq_map_info(phba, index, 11611 LPFC_CPU_FIRST_IRQ, 11612 cpu); 11613 break; 11614 } 11615 } 11616 } 11617 11618 if (vectors != phba->cfg_irq_chann) { 11619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11620 "3238 Reducing IO channels to match number of " 11621 "MSI-X vectors, requested %d got %d\n", 11622 phba->cfg_irq_chann, vectors); 11623 if (phba->cfg_irq_chann > vectors) 11624 phba->cfg_irq_chann = vectors; 11625 } 11626 11627 return rc; 11628 11629 cfg_fail_out: 11630 /* free the irq already requested */ 11631 for (--index; index >= 0; index--) { 11632 eqhdl = lpfc_get_eq_hdl(index); 11633 lpfc_irq_clear_aff(eqhdl); 11634 irq_set_affinity_hint(eqhdl->irq, NULL); 11635 free_irq(eqhdl->irq, eqhdl); 11636 } 11637 11638 /* Unconfigure MSI-X capability structure */ 11639 pci_free_irq_vectors(phba->pcidev); 11640 11641 vec_fail_out: 11642 return rc; 11643 } 11644 11645 /** 11646 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11647 * @phba: pointer to lpfc hba data structure. 11648 * 11649 * This routine is invoked to enable the MSI interrupt mode to device with 11650 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 11651 * called to enable the MSI vector. The device driver is responsible for 11652 * calling the request_irq() to register MSI vector with a interrupt the 11653 * handler, which is done in this function. 11654 * 11655 * Return codes 11656 * 0 - successful 11657 * other values - error 11658 **/ 11659 static int 11660 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11661 { 11662 int rc, index; 11663 unsigned int cpu; 11664 struct lpfc_hba_eq_hdl *eqhdl; 11665 11666 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 11667 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 11668 if (rc > 0) 11669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11670 "0487 PCI enable MSI mode success.\n"); 11671 else { 11672 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11673 "0488 PCI enable MSI mode failed (%d)\n", rc); 11674 return rc ? rc : -1; 11675 } 11676 11677 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11678 0, LPFC_DRIVER_NAME, phba); 11679 if (rc) { 11680 pci_free_irq_vectors(phba->pcidev); 11681 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11682 "0490 MSI request_irq failed (%d)\n", rc); 11683 return rc; 11684 } 11685 11686 eqhdl = lpfc_get_eq_hdl(0); 11687 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11688 11689 cpu = cpumask_first(cpu_present_mask); 11690 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 11691 11692 for (index = 0; index < phba->cfg_irq_chann; index++) { 11693 eqhdl = lpfc_get_eq_hdl(index); 11694 eqhdl->idx = index; 11695 } 11696 11697 return 0; 11698 } 11699 11700 /** 11701 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11702 * @phba: pointer to lpfc hba data structure. 11703 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 11704 * 11705 * This routine is invoked to enable device interrupt and associate driver's 11706 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11707 * interface spec. Depends on the interrupt mode configured to the driver, 11708 * the driver will try to fallback from the configured interrupt mode to an 11709 * interrupt mode which is supported by the platform, kernel, and device in 11710 * the order of: 11711 * MSI-X -> MSI -> IRQ. 11712 * 11713 * Return codes 11714 * 0 - successful 11715 * other values - error 11716 **/ 11717 static uint32_t 11718 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11719 { 11720 uint32_t intr_mode = LPFC_INTR_ERROR; 11721 int retval, idx; 11722 11723 if (cfg_mode == 2) { 11724 /* Preparation before conf_msi mbox cmd */ 11725 retval = 0; 11726 if (!retval) { 11727 /* Now, try to enable MSI-X interrupt mode */ 11728 retval = lpfc_sli4_enable_msix(phba); 11729 if (!retval) { 11730 /* Indicate initialization to MSI-X mode */ 11731 phba->intr_type = MSIX; 11732 intr_mode = 2; 11733 } 11734 } 11735 } 11736 11737 /* Fallback to MSI if MSI-X initialization failed */ 11738 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11739 retval = lpfc_sli4_enable_msi(phba); 11740 if (!retval) { 11741 /* Indicate initialization to MSI mode */ 11742 phba->intr_type = MSI; 11743 intr_mode = 1; 11744 } 11745 } 11746 11747 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11748 if (phba->intr_type == NONE) { 11749 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11750 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11751 if (!retval) { 11752 struct lpfc_hba_eq_hdl *eqhdl; 11753 unsigned int cpu; 11754 11755 /* Indicate initialization to INTx mode */ 11756 phba->intr_type = INTx; 11757 intr_mode = 0; 11758 11759 eqhdl = lpfc_get_eq_hdl(0); 11760 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11761 11762 cpu = cpumask_first(cpu_present_mask); 11763 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 11764 cpu); 11765 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11766 eqhdl = lpfc_get_eq_hdl(idx); 11767 eqhdl->idx = idx; 11768 } 11769 } 11770 } 11771 return intr_mode; 11772 } 11773 11774 /** 11775 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11776 * @phba: pointer to lpfc hba data structure. 11777 * 11778 * This routine is invoked to disable device interrupt and disassociate 11779 * the driver's interrupt handler(s) from interrupt vector(s) to device 11780 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11781 * will release the interrupt vector(s) for the message signaled interrupt. 11782 **/ 11783 static void 11784 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11785 { 11786 /* Disable the currently initialized interrupt mode */ 11787 if (phba->intr_type == MSIX) { 11788 int index; 11789 struct lpfc_hba_eq_hdl *eqhdl; 11790 11791 /* Free up MSI-X multi-message vectors */ 11792 for (index = 0; index < phba->cfg_irq_chann; index++) { 11793 eqhdl = lpfc_get_eq_hdl(index); 11794 lpfc_irq_clear_aff(eqhdl); 11795 irq_set_affinity_hint(eqhdl->irq, NULL); 11796 free_irq(eqhdl->irq, eqhdl); 11797 } 11798 } else { 11799 free_irq(phba->pcidev->irq, phba); 11800 } 11801 11802 pci_free_irq_vectors(phba->pcidev); 11803 11804 /* Reset interrupt management states */ 11805 phba->intr_type = NONE; 11806 phba->sli.slistat.sli_intr = 0; 11807 } 11808 11809 /** 11810 * lpfc_unset_hba - Unset SLI3 hba device initialization 11811 * @phba: pointer to lpfc hba data structure. 11812 * 11813 * This routine is invoked to unset the HBA device initialization steps to 11814 * a device with SLI-3 interface spec. 11815 **/ 11816 static void 11817 lpfc_unset_hba(struct lpfc_hba *phba) 11818 { 11819 struct lpfc_vport *vport = phba->pport; 11820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11821 11822 spin_lock_irq(shost->host_lock); 11823 vport->load_flag |= FC_UNLOADING; 11824 spin_unlock_irq(shost->host_lock); 11825 11826 kfree(phba->vpi_bmask); 11827 kfree(phba->vpi_ids); 11828 11829 lpfc_stop_hba_timers(phba); 11830 11831 phba->pport->work_port_events = 0; 11832 11833 lpfc_sli_hba_down(phba); 11834 11835 lpfc_sli_brdrestart(phba); 11836 11837 lpfc_sli_disable_intr(phba); 11838 11839 return; 11840 } 11841 11842 /** 11843 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11844 * @phba: Pointer to HBA context object. 11845 * 11846 * This function is called in the SLI4 code path to wait for completion 11847 * of device's XRIs exchange busy. It will check the XRI exchange busy 11848 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11849 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11850 * I/Os every 30 seconds, log error message, and wait forever. Only when 11851 * all XRI exchange busy complete, the driver unload shall proceed with 11852 * invoking the function reset ioctl mailbox command to the CNA and the 11853 * the rest of the driver unload resource release. 11854 **/ 11855 static void 11856 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11857 { 11858 struct lpfc_sli4_hdw_queue *qp; 11859 int idx, ccnt; 11860 int wait_time = 0; 11861 int io_xri_cmpl = 1; 11862 int nvmet_xri_cmpl = 1; 11863 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11864 11865 /* Driver just aborted IOs during the hba_unset process. Pause 11866 * here to give the HBA time to complete the IO and get entries 11867 * into the abts lists. 11868 */ 11869 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11870 11871 /* Wait for NVME pending IO to flush back to transport. */ 11872 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11873 lpfc_nvme_wait_for_io_drain(phba); 11874 11875 ccnt = 0; 11876 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11877 qp = &phba->sli4_hba.hdwq[idx]; 11878 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 11879 if (!io_xri_cmpl) /* if list is NOT empty */ 11880 ccnt++; 11881 } 11882 if (ccnt) 11883 io_xri_cmpl = 0; 11884 11885 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11886 nvmet_xri_cmpl = 11887 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11888 } 11889 11890 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 11891 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11892 if (!nvmet_xri_cmpl) 11893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11894 "6424 NVMET XRI exchange busy " 11895 "wait time: %d seconds.\n", 11896 wait_time/1000); 11897 if (!io_xri_cmpl) 11898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11899 "6100 IO XRI exchange busy " 11900 "wait time: %d seconds.\n", 11901 wait_time/1000); 11902 if (!els_xri_cmpl) 11903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11904 "2878 ELS XRI exchange busy " 11905 "wait time: %d seconds.\n", 11906 wait_time/1000); 11907 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11908 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11909 } else { 11910 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11911 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11912 } 11913 11914 ccnt = 0; 11915 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11916 qp = &phba->sli4_hba.hdwq[idx]; 11917 io_xri_cmpl = list_empty( 11918 &qp->lpfc_abts_io_buf_list); 11919 if (!io_xri_cmpl) /* if list is NOT empty */ 11920 ccnt++; 11921 } 11922 if (ccnt) 11923 io_xri_cmpl = 0; 11924 11925 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11926 nvmet_xri_cmpl = list_empty( 11927 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11928 } 11929 els_xri_cmpl = 11930 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11931 11932 } 11933 } 11934 11935 /** 11936 * lpfc_sli4_hba_unset - Unset the fcoe hba 11937 * @phba: Pointer to HBA context object. 11938 * 11939 * This function is called in the SLI4 code path to reset the HBA's FCoE 11940 * function. The caller is not required to hold any lock. This routine 11941 * issues PCI function reset mailbox command to reset the FCoE function. 11942 * At the end of the function, it calls lpfc_hba_down_post function to 11943 * free any pending commands. 11944 **/ 11945 static void 11946 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11947 { 11948 int wait_cnt = 0; 11949 LPFC_MBOXQ_t *mboxq; 11950 struct pci_dev *pdev = phba->pcidev; 11951 11952 lpfc_stop_hba_timers(phba); 11953 if (phba->pport) 11954 phba->sli4_hba.intr_enable = 0; 11955 11956 /* 11957 * Gracefully wait out the potential current outstanding asynchronous 11958 * mailbox command. 11959 */ 11960 11961 /* First, block any pending async mailbox command from posted */ 11962 spin_lock_irq(&phba->hbalock); 11963 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11964 spin_unlock_irq(&phba->hbalock); 11965 /* Now, trying to wait it out if we can */ 11966 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11967 msleep(10); 11968 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11969 break; 11970 } 11971 /* Forcefully release the outstanding mailbox command if timed out */ 11972 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11973 spin_lock_irq(&phba->hbalock); 11974 mboxq = phba->sli.mbox_active; 11975 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11976 __lpfc_mbox_cmpl_put(phba, mboxq); 11977 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11978 phba->sli.mbox_active = NULL; 11979 spin_unlock_irq(&phba->hbalock); 11980 } 11981 11982 /* Abort all iocbs associated with the hba */ 11983 lpfc_sli_hba_iocb_abort(phba); 11984 11985 /* Wait for completion of device XRI exchange busy */ 11986 lpfc_sli4_xri_exchange_busy_wait(phba); 11987 11988 /* per-phba callback de-registration for hotplug event */ 11989 if (phba->pport) 11990 lpfc_cpuhp_remove(phba); 11991 11992 /* Disable PCI subsystem interrupt */ 11993 lpfc_sli4_disable_intr(phba); 11994 11995 /* Disable SR-IOV if enabled */ 11996 if (phba->cfg_sriov_nr_virtfn) 11997 pci_disable_sriov(pdev); 11998 11999 /* Stop kthread signal shall trigger work_done one more time */ 12000 kthread_stop(phba->worker_thread); 12001 12002 /* Disable FW logging to host memory */ 12003 lpfc_ras_stop_fwlog(phba); 12004 12005 /* Unset the queues shared with the hardware then release all 12006 * allocated resources. 12007 */ 12008 lpfc_sli4_queue_unset(phba); 12009 lpfc_sli4_queue_destroy(phba); 12010 12011 /* Reset SLI4 HBA FCoE function */ 12012 lpfc_pci_function_reset(phba); 12013 12014 /* Free RAS DMA memory */ 12015 if (phba->ras_fwlog.ras_enabled) 12016 lpfc_sli4_ras_dma_free(phba); 12017 12018 /* Stop the SLI4 device port */ 12019 if (phba->pport) 12020 phba->pport->work_port_events = 0; 12021 } 12022 12023 /** 12024 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 12025 * @phba: Pointer to HBA context object. 12026 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 12027 * 12028 * This function is called in the SLI4 code path to read the port's 12029 * sli4 capabilities. 12030 * 12031 * This function may be be called from any context that can block-wait 12032 * for the completion. The expectation is that this routine is called 12033 * typically from probe_one or from the online routine. 12034 **/ 12035 int 12036 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12037 { 12038 int rc; 12039 struct lpfc_mqe *mqe; 12040 struct lpfc_pc_sli4_params *sli4_params; 12041 uint32_t mbox_tmo; 12042 12043 rc = 0; 12044 mqe = &mboxq->u.mqe; 12045 12046 /* Read the port's SLI4 Parameters port capabilities */ 12047 lpfc_pc_sli4_params(mboxq); 12048 if (!phba->sli4_hba.intr_enable) 12049 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 12050 else { 12051 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 12052 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12053 } 12054 12055 if (unlikely(rc)) 12056 return 1; 12057 12058 sli4_params = &phba->sli4_hba.pc_sli4_params; 12059 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 12060 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 12061 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 12062 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 12063 &mqe->un.sli4_params); 12064 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 12065 &mqe->un.sli4_params); 12066 sli4_params->proto_types = mqe->un.sli4_params.word3; 12067 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 12068 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 12069 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 12070 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 12071 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 12072 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 12073 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 12074 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 12075 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 12076 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 12077 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 12078 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 12079 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 12080 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 12081 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 12082 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 12083 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 12084 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 12085 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 12086 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 12087 12088 /* Make sure that sge_supp_len can be handled by the driver */ 12089 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 12090 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 12091 12092 return rc; 12093 } 12094 12095 /** 12096 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 12097 * @phba: Pointer to HBA context object. 12098 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 12099 * 12100 * This function is called in the SLI4 code path to read the port's 12101 * sli4 capabilities. 12102 * 12103 * This function may be be called from any context that can block-wait 12104 * for the completion. The expectation is that this routine is called 12105 * typically from probe_one or from the online routine. 12106 **/ 12107 int 12108 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12109 { 12110 int rc; 12111 struct lpfc_mqe *mqe = &mboxq->u.mqe; 12112 struct lpfc_pc_sli4_params *sli4_params; 12113 uint32_t mbox_tmo; 12114 int length; 12115 bool exp_wqcq_pages = true; 12116 struct lpfc_sli4_parameters *mbx_sli4_parameters; 12117 12118 /* 12119 * By default, the driver assumes the SLI4 port requires RPI 12120 * header postings. The SLI4_PARAM response will correct this 12121 * assumption. 12122 */ 12123 phba->sli4_hba.rpi_hdrs_in_use = 1; 12124 12125 /* Read the port's SLI4 Config Parameters */ 12126 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 12127 sizeof(struct lpfc_sli4_cfg_mhdr)); 12128 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 12129 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 12130 length, LPFC_SLI4_MBX_EMBED); 12131 if (!phba->sli4_hba.intr_enable) 12132 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 12133 else { 12134 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 12135 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12136 } 12137 if (unlikely(rc)) 12138 return rc; 12139 sli4_params = &phba->sli4_hba.pc_sli4_params; 12140 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 12141 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 12142 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 12143 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 12144 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 12145 mbx_sli4_parameters); 12146 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 12147 mbx_sli4_parameters); 12148 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 12149 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 12150 else 12151 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 12152 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 12153 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 12154 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 12155 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 12156 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 12157 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 12158 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 12159 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 12160 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 12161 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 12162 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 12163 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 12164 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 12165 mbx_sli4_parameters); 12166 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 12167 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 12168 mbx_sli4_parameters); 12169 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 12170 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 12171 12172 /* Check for Extended Pre-Registered SGL support */ 12173 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 12174 12175 /* Check for firmware nvme support */ 12176 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 12177 bf_get(cfg_xib, mbx_sli4_parameters)); 12178 12179 if (rc) { 12180 /* Save this to indicate the Firmware supports NVME */ 12181 sli4_params->nvme = 1; 12182 12183 /* Firmware NVME support, check driver FC4 NVME support */ 12184 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 12185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12186 "6133 Disabling NVME support: " 12187 "FC4 type not supported: x%x\n", 12188 phba->cfg_enable_fc4_type); 12189 goto fcponly; 12190 } 12191 } else { 12192 /* No firmware NVME support, check driver FC4 NVME support */ 12193 sli4_params->nvme = 0; 12194 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 12196 "6101 Disabling NVME support: Not " 12197 "supported by firmware (%d %d) x%x\n", 12198 bf_get(cfg_nvme, mbx_sli4_parameters), 12199 bf_get(cfg_xib, mbx_sli4_parameters), 12200 phba->cfg_enable_fc4_type); 12201 fcponly: 12202 phba->nvme_support = 0; 12203 phba->nvmet_support = 0; 12204 phba->cfg_nvmet_mrq = 0; 12205 phba->cfg_nvme_seg_cnt = 0; 12206 12207 /* If no FC4 type support, move to just SCSI support */ 12208 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 12209 return -ENODEV; 12210 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 12211 } 12212 } 12213 12214 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 12215 * accommodate 512K and 1M IOs in a single nvme buf. 12216 */ 12217 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12218 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 12219 12220 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 12221 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 12222 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 12223 phba->cfg_enable_pbde = 0; 12224 12225 /* 12226 * To support Suppress Response feature we must satisfy 3 conditions. 12227 * lpfc_suppress_rsp module parameter must be set (default). 12228 * In SLI4-Parameters Descriptor: 12229 * Extended Inline Buffers (XIB) must be supported. 12230 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 12231 * (double negative). 12232 */ 12233 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 12234 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 12235 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 12236 else 12237 phba->cfg_suppress_rsp = 0; 12238 12239 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 12240 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 12241 12242 /* Make sure that sge_supp_len can be handled by the driver */ 12243 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 12244 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 12245 12246 /* 12247 * Check whether the adapter supports an embedded copy of the 12248 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 12249 * to use this option, 128-byte WQEs must be used. 12250 */ 12251 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 12252 phba->fcp_embed_io = 1; 12253 else 12254 phba->fcp_embed_io = 0; 12255 12256 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12257 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 12258 bf_get(cfg_xib, mbx_sli4_parameters), 12259 phba->cfg_enable_pbde, 12260 phba->fcp_embed_io, phba->nvme_support, 12261 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 12262 12263 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 12264 LPFC_SLI_INTF_IF_TYPE_2) && 12265 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 12266 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 12267 exp_wqcq_pages = false; 12268 12269 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 12270 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 12271 exp_wqcq_pages && 12272 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 12273 phba->enab_exp_wqcq_pages = 1; 12274 else 12275 phba->enab_exp_wqcq_pages = 0; 12276 /* 12277 * Check if the SLI port supports MDS Diagnostics 12278 */ 12279 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 12280 phba->mds_diags_support = 1; 12281 else 12282 phba->mds_diags_support = 0; 12283 12284 /* 12285 * Check if the SLI port supports NSLER 12286 */ 12287 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 12288 phba->nsler = 1; 12289 else 12290 phba->nsler = 0; 12291 12292 /* Save PB info for use during HBA setup */ 12293 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters); 12294 sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters); 12295 sli4_params->mib_size = mbx_sli4_parameters->mib_size; 12296 sli4_params->mi_value = LPFC_DFLT_MIB_VAL; 12297 12298 /* Next we check for Vendor MIB support */ 12299 if (sli4_params->mi_ver && phba->cfg_enable_mi) 12300 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; 12301 12302 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12303 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n", 12304 sli4_params->mi_ver, phba->cfg_enable_mi, 12305 sli4_params->mi_value, sli4_params->mib_bde_cnt, 12306 sli4_params->mib_size); 12307 return 0; 12308 } 12309 12310 /** 12311 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 12312 * @pdev: pointer to PCI device 12313 * @pid: pointer to PCI device identifier 12314 * 12315 * This routine is to be called to attach a device with SLI-3 interface spec 12316 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12317 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12318 * information of the device and driver to see if the driver state that it can 12319 * support this kind of device. If the match is successful, the driver core 12320 * invokes this routine. If this routine determines it can claim the HBA, it 12321 * does all the initialization that it needs to do to handle the HBA properly. 12322 * 12323 * Return code 12324 * 0 - driver can claim the device 12325 * negative value - driver can not claim the device 12326 **/ 12327 static int 12328 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 12329 { 12330 struct lpfc_hba *phba; 12331 struct lpfc_vport *vport = NULL; 12332 struct Scsi_Host *shost = NULL; 12333 int error; 12334 uint32_t cfg_mode, intr_mode; 12335 12336 /* Allocate memory for HBA structure */ 12337 phba = lpfc_hba_alloc(pdev); 12338 if (!phba) 12339 return -ENOMEM; 12340 12341 /* Perform generic PCI device enabling operation */ 12342 error = lpfc_enable_pci_dev(phba); 12343 if (error) 12344 goto out_free_phba; 12345 12346 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 12347 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 12348 if (error) 12349 goto out_disable_pci_dev; 12350 12351 /* Set up SLI-3 specific device PCI memory space */ 12352 error = lpfc_sli_pci_mem_setup(phba); 12353 if (error) { 12354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12355 "1402 Failed to set up pci memory space.\n"); 12356 goto out_disable_pci_dev; 12357 } 12358 12359 /* Set up SLI-3 specific device driver resources */ 12360 error = lpfc_sli_driver_resource_setup(phba); 12361 if (error) { 12362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12363 "1404 Failed to set up driver resource.\n"); 12364 goto out_unset_pci_mem_s3; 12365 } 12366 12367 /* Initialize and populate the iocb list per host */ 12368 12369 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 12370 if (error) { 12371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12372 "1405 Failed to initialize iocb list.\n"); 12373 goto out_unset_driver_resource_s3; 12374 } 12375 12376 /* Set up common device driver resources */ 12377 error = lpfc_setup_driver_resource_phase2(phba); 12378 if (error) { 12379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12380 "1406 Failed to set up driver resource.\n"); 12381 goto out_free_iocb_list; 12382 } 12383 12384 /* Get the default values for Model Name and Description */ 12385 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12386 12387 /* Create SCSI host to the physical port */ 12388 error = lpfc_create_shost(phba); 12389 if (error) { 12390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12391 "1407 Failed to create scsi host.\n"); 12392 goto out_unset_driver_resource; 12393 } 12394 12395 /* Configure sysfs attributes */ 12396 vport = phba->pport; 12397 error = lpfc_alloc_sysfs_attr(vport); 12398 if (error) { 12399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12400 "1476 Failed to allocate sysfs attr\n"); 12401 goto out_destroy_shost; 12402 } 12403 12404 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12405 /* Now, trying to enable interrupt and bring up the device */ 12406 cfg_mode = phba->cfg_use_msi; 12407 while (true) { 12408 /* Put device to a known state before enabling interrupt */ 12409 lpfc_stop_port(phba); 12410 /* Configure and enable interrupt */ 12411 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 12412 if (intr_mode == LPFC_INTR_ERROR) { 12413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12414 "0431 Failed to enable interrupt.\n"); 12415 error = -ENODEV; 12416 goto out_free_sysfs_attr; 12417 } 12418 /* SLI-3 HBA setup */ 12419 if (lpfc_sli_hba_setup(phba)) { 12420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12421 "1477 Failed to set up hba\n"); 12422 error = -ENODEV; 12423 goto out_remove_device; 12424 } 12425 12426 /* Wait 50ms for the interrupts of previous mailbox commands */ 12427 msleep(50); 12428 /* Check active interrupts on message signaled interrupts */ 12429 if (intr_mode == 0 || 12430 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 12431 /* Log the current active interrupt mode */ 12432 phba->intr_mode = intr_mode; 12433 lpfc_log_intr_mode(phba, intr_mode); 12434 break; 12435 } else { 12436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12437 "0447 Configure interrupt mode (%d) " 12438 "failed active interrupt test.\n", 12439 intr_mode); 12440 /* Disable the current interrupt mode */ 12441 lpfc_sli_disable_intr(phba); 12442 /* Try next level of interrupt mode */ 12443 cfg_mode = --intr_mode; 12444 } 12445 } 12446 12447 /* Perform post initialization setup */ 12448 lpfc_post_init_setup(phba); 12449 12450 /* Check if there are static vports to be created. */ 12451 lpfc_create_static_vport(phba); 12452 12453 return 0; 12454 12455 out_remove_device: 12456 lpfc_unset_hba(phba); 12457 out_free_sysfs_attr: 12458 lpfc_free_sysfs_attr(vport); 12459 out_destroy_shost: 12460 lpfc_destroy_shost(phba); 12461 out_unset_driver_resource: 12462 lpfc_unset_driver_resource_phase2(phba); 12463 out_free_iocb_list: 12464 lpfc_free_iocb_list(phba); 12465 out_unset_driver_resource_s3: 12466 lpfc_sli_driver_resource_unset(phba); 12467 out_unset_pci_mem_s3: 12468 lpfc_sli_pci_mem_unset(phba); 12469 out_disable_pci_dev: 12470 lpfc_disable_pci_dev(phba); 12471 if (shost) 12472 scsi_host_put(shost); 12473 out_free_phba: 12474 lpfc_hba_free(phba); 12475 return error; 12476 } 12477 12478 /** 12479 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 12480 * @pdev: pointer to PCI device 12481 * 12482 * This routine is to be called to disattach a device with SLI-3 interface 12483 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12484 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12485 * device to be removed from the PCI subsystem properly. 12486 **/ 12487 static void 12488 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 12489 { 12490 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12492 struct lpfc_vport **vports; 12493 struct lpfc_hba *phba = vport->phba; 12494 int i; 12495 12496 spin_lock_irq(&phba->hbalock); 12497 vport->load_flag |= FC_UNLOADING; 12498 spin_unlock_irq(&phba->hbalock); 12499 12500 lpfc_free_sysfs_attr(vport); 12501 12502 /* Release all the vports against this physical port */ 12503 vports = lpfc_create_vport_work_array(phba); 12504 if (vports != NULL) 12505 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12506 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12507 continue; 12508 fc_vport_terminate(vports[i]->fc_vport); 12509 } 12510 lpfc_destroy_vport_work_array(phba, vports); 12511 12512 /* Remove FC host with the physical port */ 12513 fc_remove_host(shost); 12514 scsi_remove_host(shost); 12515 12516 /* Clean up all nodes, mailboxes and IOs. */ 12517 lpfc_cleanup(vport); 12518 12519 /* 12520 * Bring down the SLI Layer. This step disable all interrupts, 12521 * clears the rings, discards all mailbox commands, and resets 12522 * the HBA. 12523 */ 12524 12525 /* HBA interrupt will be disabled after this call */ 12526 lpfc_sli_hba_down(phba); 12527 /* Stop kthread signal shall trigger work_done one more time */ 12528 kthread_stop(phba->worker_thread); 12529 /* Final cleanup of txcmplq and reset the HBA */ 12530 lpfc_sli_brdrestart(phba); 12531 12532 kfree(phba->vpi_bmask); 12533 kfree(phba->vpi_ids); 12534 12535 lpfc_stop_hba_timers(phba); 12536 spin_lock_irq(&phba->port_list_lock); 12537 list_del_init(&vport->listentry); 12538 spin_unlock_irq(&phba->port_list_lock); 12539 12540 lpfc_debugfs_terminate(vport); 12541 12542 /* Disable SR-IOV if enabled */ 12543 if (phba->cfg_sriov_nr_virtfn) 12544 pci_disable_sriov(pdev); 12545 12546 /* Disable interrupt */ 12547 lpfc_sli_disable_intr(phba); 12548 12549 scsi_host_put(shost); 12550 12551 /* 12552 * Call scsi_free before mem_free since scsi bufs are released to their 12553 * corresponding pools here. 12554 */ 12555 lpfc_scsi_free(phba); 12556 lpfc_free_iocb_list(phba); 12557 12558 lpfc_mem_free_all(phba); 12559 12560 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 12561 phba->hbqslimp.virt, phba->hbqslimp.phys); 12562 12563 /* Free resources associated with SLI2 interface */ 12564 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 12565 phba->slim2p.virt, phba->slim2p.phys); 12566 12567 /* unmap adapter SLIM and Control Registers */ 12568 iounmap(phba->ctrl_regs_memmap_p); 12569 iounmap(phba->slim_memmap_p); 12570 12571 lpfc_hba_free(phba); 12572 12573 pci_release_mem_regions(pdev); 12574 pci_disable_device(pdev); 12575 } 12576 12577 /** 12578 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 12579 * @dev_d: pointer to device 12580 * 12581 * This routine is to be called from the kernel's PCI subsystem to support 12582 * system Power Management (PM) to device with SLI-3 interface spec. When 12583 * PM invokes this method, it quiesces the device by stopping the driver's 12584 * worker thread for the device, turning off device's interrupt and DMA, 12585 * and bring the device offline. Note that as the driver implements the 12586 * minimum PM requirements to a power-aware driver's PM support for the 12587 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12588 * to the suspend() method call will be treated as SUSPEND and the driver will 12589 * fully reinitialize its device during resume() method call, the driver will 12590 * set device to PCI_D3hot state in PCI config space instead of setting it 12591 * according to the @msg provided by the PM. 12592 * 12593 * Return code 12594 * 0 - driver suspended the device 12595 * Error otherwise 12596 **/ 12597 static int __maybe_unused 12598 lpfc_pci_suspend_one_s3(struct device *dev_d) 12599 { 12600 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 12601 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12602 12603 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12604 "0473 PCI device Power Management suspend.\n"); 12605 12606 /* Bring down the device */ 12607 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12608 lpfc_offline(phba); 12609 kthread_stop(phba->worker_thread); 12610 12611 /* Disable interrupt from device */ 12612 lpfc_sli_disable_intr(phba); 12613 12614 return 0; 12615 } 12616 12617 /** 12618 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 12619 * @dev_d: pointer to device 12620 * 12621 * This routine is to be called from the kernel's PCI subsystem to support 12622 * system Power Management (PM) to device with SLI-3 interface spec. When PM 12623 * invokes this method, it restores the device's PCI config space state and 12624 * fully reinitializes the device and brings it online. Note that as the 12625 * driver implements the minimum PM requirements to a power-aware driver's 12626 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 12627 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 12628 * driver will fully reinitialize its device during resume() method call, 12629 * the device will be set to PCI_D0 directly in PCI config space before 12630 * restoring the state. 12631 * 12632 * Return code 12633 * 0 - driver suspended the device 12634 * Error otherwise 12635 **/ 12636 static int __maybe_unused 12637 lpfc_pci_resume_one_s3(struct device *dev_d) 12638 { 12639 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 12640 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12641 uint32_t intr_mode; 12642 int error; 12643 12644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12645 "0452 PCI device Power Management resume.\n"); 12646 12647 /* Startup the kernel thread for this host adapter. */ 12648 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12649 "lpfc_worker_%d", phba->brd_no); 12650 if (IS_ERR(phba->worker_thread)) { 12651 error = PTR_ERR(phba->worker_thread); 12652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12653 "0434 PM resume failed to start worker " 12654 "thread: error=x%x.\n", error); 12655 return error; 12656 } 12657 12658 /* Configure and enable interrupt */ 12659 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12660 if (intr_mode == LPFC_INTR_ERROR) { 12661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12662 "0430 PM resume Failed to enable interrupt\n"); 12663 return -EIO; 12664 } else 12665 phba->intr_mode = intr_mode; 12666 12667 /* Restart HBA and bring it online */ 12668 lpfc_sli_brdrestart(phba); 12669 lpfc_online(phba); 12670 12671 /* Log the current active interrupt mode */ 12672 lpfc_log_intr_mode(phba, phba->intr_mode); 12673 12674 return 0; 12675 } 12676 12677 /** 12678 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12679 * @phba: pointer to lpfc hba data structure. 12680 * 12681 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12682 * aborts all the outstanding SCSI I/Os to the pci device. 12683 **/ 12684 static void 12685 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12686 { 12687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12688 "2723 PCI channel I/O abort preparing for recovery\n"); 12689 12690 /* 12691 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12692 * and let the SCSI mid-layer to retry them to recover. 12693 */ 12694 lpfc_sli_abort_fcp_rings(phba); 12695 } 12696 12697 /** 12698 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12699 * @phba: pointer to lpfc hba data structure. 12700 * 12701 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12702 * disables the device interrupt and pci device, and aborts the internal FCP 12703 * pending I/Os. 12704 **/ 12705 static void 12706 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12707 { 12708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12709 "2710 PCI channel disable preparing for reset\n"); 12710 12711 /* Block any management I/Os to the device */ 12712 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12713 12714 /* Block all SCSI devices' I/Os on the host */ 12715 lpfc_scsi_dev_block(phba); 12716 12717 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12718 lpfc_sli_flush_io_rings(phba); 12719 12720 /* stop all timers */ 12721 lpfc_stop_hba_timers(phba); 12722 12723 /* Disable interrupt and pci device */ 12724 lpfc_sli_disable_intr(phba); 12725 pci_disable_device(phba->pcidev); 12726 } 12727 12728 /** 12729 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12730 * @phba: pointer to lpfc hba data structure. 12731 * 12732 * This routine is called to prepare the SLI3 device for PCI slot permanently 12733 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12734 * pending I/Os. 12735 **/ 12736 static void 12737 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12738 { 12739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12740 "2711 PCI channel permanent disable for failure\n"); 12741 /* Block all SCSI devices' I/Os on the host */ 12742 lpfc_scsi_dev_block(phba); 12743 12744 /* stop all timers */ 12745 lpfc_stop_hba_timers(phba); 12746 12747 /* Clean up all driver's outstanding SCSI I/Os */ 12748 lpfc_sli_flush_io_rings(phba); 12749 } 12750 12751 /** 12752 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12753 * @pdev: pointer to PCI device. 12754 * @state: the current PCI connection state. 12755 * 12756 * This routine is called from the PCI subsystem for I/O error handling to 12757 * device with SLI-3 interface spec. This function is called by the PCI 12758 * subsystem after a PCI bus error affecting this device has been detected. 12759 * When this function is invoked, it will need to stop all the I/Os and 12760 * interrupt(s) to the device. Once that is done, it will return 12761 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12762 * as desired. 12763 * 12764 * Return codes 12765 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12766 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12767 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12768 **/ 12769 static pci_ers_result_t 12770 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12771 { 12772 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12773 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12774 12775 switch (state) { 12776 case pci_channel_io_normal: 12777 /* Non-fatal error, prepare for recovery */ 12778 lpfc_sli_prep_dev_for_recover(phba); 12779 return PCI_ERS_RESULT_CAN_RECOVER; 12780 case pci_channel_io_frozen: 12781 /* Fatal error, prepare for slot reset */ 12782 lpfc_sli_prep_dev_for_reset(phba); 12783 return PCI_ERS_RESULT_NEED_RESET; 12784 case pci_channel_io_perm_failure: 12785 /* Permanent failure, prepare for device down */ 12786 lpfc_sli_prep_dev_for_perm_failure(phba); 12787 return PCI_ERS_RESULT_DISCONNECT; 12788 default: 12789 /* Unknown state, prepare and request slot reset */ 12790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12791 "0472 Unknown PCI error state: x%x\n", state); 12792 lpfc_sli_prep_dev_for_reset(phba); 12793 return PCI_ERS_RESULT_NEED_RESET; 12794 } 12795 } 12796 12797 /** 12798 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12799 * @pdev: pointer to PCI device. 12800 * 12801 * This routine is called from the PCI subsystem for error handling to 12802 * device with SLI-3 interface spec. This is called after PCI bus has been 12803 * reset to restart the PCI card from scratch, as if from a cold-boot. 12804 * During the PCI subsystem error recovery, after driver returns 12805 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12806 * recovery and then call this routine before calling the .resume method 12807 * to recover the device. This function will initialize the HBA device, 12808 * enable the interrupt, but it will just put the HBA to offline state 12809 * without passing any I/O traffic. 12810 * 12811 * Return codes 12812 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12813 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12814 */ 12815 static pci_ers_result_t 12816 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12817 { 12818 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12819 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12820 struct lpfc_sli *psli = &phba->sli; 12821 uint32_t intr_mode; 12822 12823 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12824 if (pci_enable_device_mem(pdev)) { 12825 printk(KERN_ERR "lpfc: Cannot re-enable " 12826 "PCI device after reset.\n"); 12827 return PCI_ERS_RESULT_DISCONNECT; 12828 } 12829 12830 pci_restore_state(pdev); 12831 12832 /* 12833 * As the new kernel behavior of pci_restore_state() API call clears 12834 * device saved_state flag, need to save the restored state again. 12835 */ 12836 pci_save_state(pdev); 12837 12838 if (pdev->is_busmaster) 12839 pci_set_master(pdev); 12840 12841 spin_lock_irq(&phba->hbalock); 12842 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12843 spin_unlock_irq(&phba->hbalock); 12844 12845 /* Configure and enable interrupt */ 12846 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12847 if (intr_mode == LPFC_INTR_ERROR) { 12848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12849 "0427 Cannot re-enable interrupt after " 12850 "slot reset.\n"); 12851 return PCI_ERS_RESULT_DISCONNECT; 12852 } else 12853 phba->intr_mode = intr_mode; 12854 12855 /* Take device offline, it will perform cleanup */ 12856 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12857 lpfc_offline(phba); 12858 lpfc_sli_brdrestart(phba); 12859 12860 /* Log the current active interrupt mode */ 12861 lpfc_log_intr_mode(phba, phba->intr_mode); 12862 12863 return PCI_ERS_RESULT_RECOVERED; 12864 } 12865 12866 /** 12867 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12868 * @pdev: pointer to PCI device 12869 * 12870 * This routine is called from the PCI subsystem for error handling to device 12871 * with SLI-3 interface spec. It is called when kernel error recovery tells 12872 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12873 * error recovery. After this call, traffic can start to flow from this device 12874 * again. 12875 */ 12876 static void 12877 lpfc_io_resume_s3(struct pci_dev *pdev) 12878 { 12879 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12880 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12881 12882 /* Bring device online, it will be no-op for non-fatal error resume */ 12883 lpfc_online(phba); 12884 } 12885 12886 /** 12887 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12888 * @phba: pointer to lpfc hba data structure. 12889 * 12890 * returns the number of ELS/CT IOCBs to reserve 12891 **/ 12892 int 12893 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12894 { 12895 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12896 12897 if (phba->sli_rev == LPFC_SLI_REV4) { 12898 if (max_xri <= 100) 12899 return 10; 12900 else if (max_xri <= 256) 12901 return 25; 12902 else if (max_xri <= 512) 12903 return 50; 12904 else if (max_xri <= 1024) 12905 return 100; 12906 else if (max_xri <= 1536) 12907 return 150; 12908 else if (max_xri <= 2048) 12909 return 200; 12910 else 12911 return 250; 12912 } else 12913 return 0; 12914 } 12915 12916 /** 12917 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12918 * @phba: pointer to lpfc hba data structure. 12919 * 12920 * returns the number of ELS/CT + NVMET IOCBs to reserve 12921 **/ 12922 int 12923 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12924 { 12925 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12926 12927 if (phba->nvmet_support) 12928 max_xri += LPFC_NVMET_BUF_POST; 12929 return max_xri; 12930 } 12931 12932 12933 static int 12934 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12935 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12936 const struct firmware *fw) 12937 { 12938 int rc; 12939 12940 /* Three cases: (1) FW was not supported on the detected adapter. 12941 * (2) FW update has been locked out administratively. 12942 * (3) Some other error during FW update. 12943 * In each case, an unmaskable message is written to the console 12944 * for admin diagnosis. 12945 */ 12946 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 12947 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12948 magic_number != MAGIC_NUMBER_G6) || 12949 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12950 magic_number != MAGIC_NUMBER_G7)) { 12951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12952 "3030 This firmware version is not supported on" 12953 " this HBA model. Device:%x Magic:%x Type:%x " 12954 "ID:%x Size %d %zd\n", 12955 phba->pcidev->device, magic_number, ftype, fid, 12956 fsize, fw->size); 12957 rc = -EINVAL; 12958 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 12959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12960 "3021 Firmware downloads have been prohibited " 12961 "by a system configuration setting on " 12962 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12963 "%zd\n", 12964 phba->pcidev->device, magic_number, ftype, fid, 12965 fsize, fw->size); 12966 rc = -EACCES; 12967 } else { 12968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12969 "3022 FW Download failed. Add Status x%x " 12970 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12971 "%zd\n", 12972 offset, phba->pcidev->device, magic_number, 12973 ftype, fid, fsize, fw->size); 12974 rc = -EIO; 12975 } 12976 return rc; 12977 } 12978 12979 /** 12980 * lpfc_write_firmware - attempt to write a firmware image to the port 12981 * @fw: pointer to firmware image returned from request_firmware. 12982 * @context: pointer to firmware image returned from request_firmware. 12983 * 12984 **/ 12985 static void 12986 lpfc_write_firmware(const struct firmware *fw, void *context) 12987 { 12988 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12989 char fwrev[FW_REV_STR_SIZE]; 12990 struct lpfc_grp_hdr *image; 12991 struct list_head dma_buffer_list; 12992 int i, rc = 0; 12993 struct lpfc_dmabuf *dmabuf, *next; 12994 uint32_t offset = 0, temp_offset = 0; 12995 uint32_t magic_number, ftype, fid, fsize; 12996 12997 /* It can be null in no-wait mode, sanity check */ 12998 if (!fw) { 12999 rc = -ENXIO; 13000 goto out; 13001 } 13002 image = (struct lpfc_grp_hdr *)fw->data; 13003 13004 magic_number = be32_to_cpu(image->magic_number); 13005 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 13006 fid = bf_get_be32(lpfc_grp_hdr_id, image); 13007 fsize = be32_to_cpu(image->size); 13008 13009 INIT_LIST_HEAD(&dma_buffer_list); 13010 lpfc_decode_firmware_rev(phba, fwrev, 1); 13011 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 13012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13013 "3023 Updating Firmware, Current Version:%s " 13014 "New Version:%s\n", 13015 fwrev, image->revision); 13016 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 13017 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 13018 GFP_KERNEL); 13019 if (!dmabuf) { 13020 rc = -ENOMEM; 13021 goto release_out; 13022 } 13023 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 13024 SLI4_PAGE_SIZE, 13025 &dmabuf->phys, 13026 GFP_KERNEL); 13027 if (!dmabuf->virt) { 13028 kfree(dmabuf); 13029 rc = -ENOMEM; 13030 goto release_out; 13031 } 13032 list_add_tail(&dmabuf->list, &dma_buffer_list); 13033 } 13034 while (offset < fw->size) { 13035 temp_offset = offset; 13036 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 13037 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 13038 memcpy(dmabuf->virt, 13039 fw->data + temp_offset, 13040 fw->size - temp_offset); 13041 temp_offset = fw->size; 13042 break; 13043 } 13044 memcpy(dmabuf->virt, fw->data + temp_offset, 13045 SLI4_PAGE_SIZE); 13046 temp_offset += SLI4_PAGE_SIZE; 13047 } 13048 rc = lpfc_wr_object(phba, &dma_buffer_list, 13049 (fw->size - offset), &offset); 13050 if (rc) { 13051 rc = lpfc_log_write_firmware_error(phba, offset, 13052 magic_number, 13053 ftype, 13054 fid, 13055 fsize, 13056 fw); 13057 goto release_out; 13058 } 13059 } 13060 rc = offset; 13061 } else 13062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13063 "3029 Skipped Firmware update, Current " 13064 "Version:%s New Version:%s\n", 13065 fwrev, image->revision); 13066 13067 release_out: 13068 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 13069 list_del(&dmabuf->list); 13070 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 13071 dmabuf->virt, dmabuf->phys); 13072 kfree(dmabuf); 13073 } 13074 release_firmware(fw); 13075 out: 13076 if (rc < 0) 13077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13078 "3062 Firmware update error, status %d.\n", rc); 13079 else 13080 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13081 "3024 Firmware update success: size %d.\n", rc); 13082 } 13083 13084 /** 13085 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 13086 * @phba: pointer to lpfc hba data structure. 13087 * @fw_upgrade: which firmware to update. 13088 * 13089 * This routine is called to perform Linux generic firmware upgrade on device 13090 * that supports such feature. 13091 **/ 13092 int 13093 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 13094 { 13095 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 13096 int ret; 13097 const struct firmware *fw; 13098 13099 /* Only supported on SLI4 interface type 2 for now */ 13100 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 13101 LPFC_SLI_INTF_IF_TYPE_2) 13102 return -EPERM; 13103 13104 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 13105 13106 if (fw_upgrade == INT_FW_UPGRADE) { 13107 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 13108 file_name, &phba->pcidev->dev, 13109 GFP_KERNEL, (void *)phba, 13110 lpfc_write_firmware); 13111 } else if (fw_upgrade == RUN_FW_UPGRADE) { 13112 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 13113 if (!ret) 13114 lpfc_write_firmware(fw, (void *)phba); 13115 } else { 13116 ret = -EINVAL; 13117 } 13118 13119 return ret; 13120 } 13121 13122 /** 13123 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 13124 * @pdev: pointer to PCI device 13125 * @pid: pointer to PCI device identifier 13126 * 13127 * This routine is called from the kernel's PCI subsystem to device with 13128 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13129 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13130 * information of the device and driver to see if the driver state that it 13131 * can support this kind of device. If the match is successful, the driver 13132 * core invokes this routine. If this routine determines it can claim the HBA, 13133 * it does all the initialization that it needs to do to handle the HBA 13134 * properly. 13135 * 13136 * Return code 13137 * 0 - driver can claim the device 13138 * negative value - driver can not claim the device 13139 **/ 13140 static int 13141 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 13142 { 13143 struct lpfc_hba *phba; 13144 struct lpfc_vport *vport = NULL; 13145 struct Scsi_Host *shost = NULL; 13146 int error; 13147 uint32_t cfg_mode, intr_mode; 13148 13149 /* Allocate memory for HBA structure */ 13150 phba = lpfc_hba_alloc(pdev); 13151 if (!phba) 13152 return -ENOMEM; 13153 13154 /* Perform generic PCI device enabling operation */ 13155 error = lpfc_enable_pci_dev(phba); 13156 if (error) 13157 goto out_free_phba; 13158 13159 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 13160 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 13161 if (error) 13162 goto out_disable_pci_dev; 13163 13164 /* Set up SLI-4 specific device PCI memory space */ 13165 error = lpfc_sli4_pci_mem_setup(phba); 13166 if (error) { 13167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13168 "1410 Failed to set up pci memory space.\n"); 13169 goto out_disable_pci_dev; 13170 } 13171 13172 /* Set up SLI-4 Specific device driver resources */ 13173 error = lpfc_sli4_driver_resource_setup(phba); 13174 if (error) { 13175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13176 "1412 Failed to set up driver resource.\n"); 13177 goto out_unset_pci_mem_s4; 13178 } 13179 13180 INIT_LIST_HEAD(&phba->active_rrq_list); 13181 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 13182 13183 /* Set up common device driver resources */ 13184 error = lpfc_setup_driver_resource_phase2(phba); 13185 if (error) { 13186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13187 "1414 Failed to set up driver resource.\n"); 13188 goto out_unset_driver_resource_s4; 13189 } 13190 13191 /* Get the default values for Model Name and Description */ 13192 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13193 13194 /* Now, trying to enable interrupt and bring up the device */ 13195 cfg_mode = phba->cfg_use_msi; 13196 13197 /* Put device to a known state before enabling interrupt */ 13198 phba->pport = NULL; 13199 lpfc_stop_port(phba); 13200 13201 /* Init cpu_map array */ 13202 lpfc_cpu_map_array_init(phba); 13203 13204 /* Init hba_eq_hdl array */ 13205 lpfc_hba_eq_hdl_array_init(phba); 13206 13207 /* Configure and enable interrupt */ 13208 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 13209 if (intr_mode == LPFC_INTR_ERROR) { 13210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13211 "0426 Failed to enable interrupt.\n"); 13212 error = -ENODEV; 13213 goto out_unset_driver_resource; 13214 } 13215 /* Default to single EQ for non-MSI-X */ 13216 if (phba->intr_type != MSIX) { 13217 phba->cfg_irq_chann = 1; 13218 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13219 if (phba->nvmet_support) 13220 phba->cfg_nvmet_mrq = 1; 13221 } 13222 } 13223 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 13224 13225 /* Create SCSI host to the physical port */ 13226 error = lpfc_create_shost(phba); 13227 if (error) { 13228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13229 "1415 Failed to create scsi host.\n"); 13230 goto out_disable_intr; 13231 } 13232 vport = phba->pport; 13233 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13234 13235 /* Configure sysfs attributes */ 13236 error = lpfc_alloc_sysfs_attr(vport); 13237 if (error) { 13238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13239 "1416 Failed to allocate sysfs attr\n"); 13240 goto out_destroy_shost; 13241 } 13242 13243 /* Set up SLI-4 HBA */ 13244 if (lpfc_sli4_hba_setup(phba)) { 13245 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13246 "1421 Failed to set up hba\n"); 13247 error = -ENODEV; 13248 goto out_free_sysfs_attr; 13249 } 13250 13251 /* Log the current active interrupt mode */ 13252 phba->intr_mode = intr_mode; 13253 lpfc_log_intr_mode(phba, intr_mode); 13254 13255 /* Perform post initialization setup */ 13256 lpfc_post_init_setup(phba); 13257 13258 /* NVME support in FW earlier in the driver load corrects the 13259 * FC4 type making a check for nvme_support unnecessary. 13260 */ 13261 if (phba->nvmet_support == 0) { 13262 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13263 /* Create NVME binding with nvme_fc_transport. This 13264 * ensures the vport is initialized. If the localport 13265 * create fails, it should not unload the driver to 13266 * support field issues. 13267 */ 13268 error = lpfc_nvme_create_localport(vport); 13269 if (error) { 13270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13271 "6004 NVME registration " 13272 "failed, error x%x\n", 13273 error); 13274 } 13275 } 13276 } 13277 13278 /* check for firmware upgrade or downgrade */ 13279 if (phba->cfg_request_firmware_upgrade) 13280 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 13281 13282 /* Check if there are static vports to be created. */ 13283 lpfc_create_static_vport(phba); 13284 13285 /* Enable RAS FW log support */ 13286 lpfc_sli4_ras_setup(phba); 13287 13288 INIT_LIST_HEAD(&phba->poll_list); 13289 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 13290 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13291 13292 return 0; 13293 13294 out_free_sysfs_attr: 13295 lpfc_free_sysfs_attr(vport); 13296 out_destroy_shost: 13297 lpfc_destroy_shost(phba); 13298 out_disable_intr: 13299 lpfc_sli4_disable_intr(phba); 13300 out_unset_driver_resource: 13301 lpfc_unset_driver_resource_phase2(phba); 13302 out_unset_driver_resource_s4: 13303 lpfc_sli4_driver_resource_unset(phba); 13304 out_unset_pci_mem_s4: 13305 lpfc_sli4_pci_mem_unset(phba); 13306 out_disable_pci_dev: 13307 lpfc_disable_pci_dev(phba); 13308 if (shost) 13309 scsi_host_put(shost); 13310 out_free_phba: 13311 lpfc_hba_free(phba); 13312 return error; 13313 } 13314 13315 /** 13316 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 13317 * @pdev: pointer to PCI device 13318 * 13319 * This routine is called from the kernel's PCI subsystem to device with 13320 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13321 * removed from PCI bus, it performs all the necessary cleanup for the HBA 13322 * device to be removed from the PCI subsystem properly. 13323 **/ 13324 static void 13325 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 13326 { 13327 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13328 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 13329 struct lpfc_vport **vports; 13330 struct lpfc_hba *phba = vport->phba; 13331 int i; 13332 13333 /* Mark the device unloading flag */ 13334 spin_lock_irq(&phba->hbalock); 13335 vport->load_flag |= FC_UNLOADING; 13336 spin_unlock_irq(&phba->hbalock); 13337 13338 lpfc_free_sysfs_attr(vport); 13339 13340 /* Release all the vports against this physical port */ 13341 vports = lpfc_create_vport_work_array(phba); 13342 if (vports != NULL) 13343 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 13344 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 13345 continue; 13346 fc_vport_terminate(vports[i]->fc_vport); 13347 } 13348 lpfc_destroy_vport_work_array(phba, vports); 13349 13350 /* Remove FC host with the physical port */ 13351 fc_remove_host(shost); 13352 scsi_remove_host(shost); 13353 13354 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 13355 * localports are destroyed after to cleanup all transport memory. 13356 */ 13357 lpfc_cleanup(vport); 13358 lpfc_nvmet_destroy_targetport(phba); 13359 lpfc_nvme_destroy_localport(vport); 13360 13361 /* De-allocate multi-XRI pools */ 13362 if (phba->cfg_xri_rebalancing) 13363 lpfc_destroy_multixri_pools(phba); 13364 13365 /* 13366 * Bring down the SLI Layer. This step disables all interrupts, 13367 * clears the rings, discards all mailbox commands, and resets 13368 * the HBA FCoE function. 13369 */ 13370 lpfc_debugfs_terminate(vport); 13371 13372 lpfc_stop_hba_timers(phba); 13373 spin_lock_irq(&phba->port_list_lock); 13374 list_del_init(&vport->listentry); 13375 spin_unlock_irq(&phba->port_list_lock); 13376 13377 /* Perform scsi free before driver resource_unset since scsi 13378 * buffers are released to their corresponding pools here. 13379 */ 13380 lpfc_io_free(phba); 13381 lpfc_free_iocb_list(phba); 13382 lpfc_sli4_hba_unset(phba); 13383 13384 lpfc_unset_driver_resource_phase2(phba); 13385 lpfc_sli4_driver_resource_unset(phba); 13386 13387 /* Unmap adapter Control and Doorbell registers */ 13388 lpfc_sli4_pci_mem_unset(phba); 13389 13390 /* Release PCI resources and disable device's PCI function */ 13391 scsi_host_put(shost); 13392 lpfc_disable_pci_dev(phba); 13393 13394 /* Finally, free the driver's device data structure */ 13395 lpfc_hba_free(phba); 13396 13397 return; 13398 } 13399 13400 /** 13401 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 13402 * @dev_d: pointer to device 13403 * 13404 * This routine is called from the kernel's PCI subsystem to support system 13405 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 13406 * this method, it quiesces the device by stopping the driver's worker 13407 * thread for the device, turning off device's interrupt and DMA, and bring 13408 * the device offline. Note that as the driver implements the minimum PM 13409 * requirements to a power-aware driver's PM support for suspend/resume -- all 13410 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 13411 * method call will be treated as SUSPEND and the driver will fully 13412 * reinitialize its device during resume() method call, the driver will set 13413 * device to PCI_D3hot state in PCI config space instead of setting it 13414 * according to the @msg provided by the PM. 13415 * 13416 * Return code 13417 * 0 - driver suspended the device 13418 * Error otherwise 13419 **/ 13420 static int __maybe_unused 13421 lpfc_pci_suspend_one_s4(struct device *dev_d) 13422 { 13423 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 13424 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13425 13426 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13427 "2843 PCI device Power Management suspend.\n"); 13428 13429 /* Bring down the device */ 13430 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13431 lpfc_offline(phba); 13432 kthread_stop(phba->worker_thread); 13433 13434 /* Disable interrupt from device */ 13435 lpfc_sli4_disable_intr(phba); 13436 lpfc_sli4_queue_destroy(phba); 13437 13438 return 0; 13439 } 13440 13441 /** 13442 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 13443 * @dev_d: pointer to device 13444 * 13445 * This routine is called from the kernel's PCI subsystem to support system 13446 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 13447 * this method, it restores the device's PCI config space state and fully 13448 * reinitializes the device and brings it online. Note that as the driver 13449 * implements the minimum PM requirements to a power-aware driver's PM for 13450 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 13451 * to the suspend() method call will be treated as SUSPEND and the driver 13452 * will fully reinitialize its device during resume() method call, the device 13453 * will be set to PCI_D0 directly in PCI config space before restoring the 13454 * state. 13455 * 13456 * Return code 13457 * 0 - driver suspended the device 13458 * Error otherwise 13459 **/ 13460 static int __maybe_unused 13461 lpfc_pci_resume_one_s4(struct device *dev_d) 13462 { 13463 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 13464 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13465 uint32_t intr_mode; 13466 int error; 13467 13468 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13469 "0292 PCI device Power Management resume.\n"); 13470 13471 /* Startup the kernel thread for this host adapter. */ 13472 phba->worker_thread = kthread_run(lpfc_do_work, phba, 13473 "lpfc_worker_%d", phba->brd_no); 13474 if (IS_ERR(phba->worker_thread)) { 13475 error = PTR_ERR(phba->worker_thread); 13476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13477 "0293 PM resume failed to start worker " 13478 "thread: error=x%x.\n", error); 13479 return error; 13480 } 13481 13482 /* Configure and enable interrupt */ 13483 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13484 if (intr_mode == LPFC_INTR_ERROR) { 13485 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13486 "0294 PM resume Failed to enable interrupt\n"); 13487 return -EIO; 13488 } else 13489 phba->intr_mode = intr_mode; 13490 13491 /* Restart HBA and bring it online */ 13492 lpfc_sli_brdrestart(phba); 13493 lpfc_online(phba); 13494 13495 /* Log the current active interrupt mode */ 13496 lpfc_log_intr_mode(phba, phba->intr_mode); 13497 13498 return 0; 13499 } 13500 13501 /** 13502 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 13503 * @phba: pointer to lpfc hba data structure. 13504 * 13505 * This routine is called to prepare the SLI4 device for PCI slot recover. It 13506 * aborts all the outstanding SCSI I/Os to the pci device. 13507 **/ 13508 static void 13509 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 13510 { 13511 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13512 "2828 PCI channel I/O abort preparing for recovery\n"); 13513 /* 13514 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 13515 * and let the SCSI mid-layer to retry them to recover. 13516 */ 13517 lpfc_sli_abort_fcp_rings(phba); 13518 } 13519 13520 /** 13521 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 13522 * @phba: pointer to lpfc hba data structure. 13523 * 13524 * This routine is called to prepare the SLI4 device for PCI slot reset. It 13525 * disables the device interrupt and pci device, and aborts the internal FCP 13526 * pending I/Os. 13527 **/ 13528 static void 13529 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 13530 { 13531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13532 "2826 PCI channel disable preparing for reset\n"); 13533 13534 /* Block any management I/Os to the device */ 13535 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 13536 13537 /* Block all SCSI devices' I/Os on the host */ 13538 lpfc_scsi_dev_block(phba); 13539 13540 /* Flush all driver's outstanding I/Os as we are to reset */ 13541 lpfc_sli_flush_io_rings(phba); 13542 13543 /* stop all timers */ 13544 lpfc_stop_hba_timers(phba); 13545 13546 /* Disable interrupt and pci device */ 13547 lpfc_sli4_disable_intr(phba); 13548 lpfc_sli4_queue_destroy(phba); 13549 pci_disable_device(phba->pcidev); 13550 } 13551 13552 /** 13553 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 13554 * @phba: pointer to lpfc hba data structure. 13555 * 13556 * This routine is called to prepare the SLI4 device for PCI slot permanently 13557 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 13558 * pending I/Os. 13559 **/ 13560 static void 13561 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 13562 { 13563 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13564 "2827 PCI channel permanent disable for failure\n"); 13565 13566 /* Block all SCSI devices' I/Os on the host */ 13567 lpfc_scsi_dev_block(phba); 13568 13569 /* stop all timers */ 13570 lpfc_stop_hba_timers(phba); 13571 13572 /* Clean up all driver's outstanding I/Os */ 13573 lpfc_sli_flush_io_rings(phba); 13574 } 13575 13576 /** 13577 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 13578 * @pdev: pointer to PCI device. 13579 * @state: the current PCI connection state. 13580 * 13581 * This routine is called from the PCI subsystem for error handling to device 13582 * with SLI-4 interface spec. This function is called by the PCI subsystem 13583 * after a PCI bus error affecting this device has been detected. When this 13584 * function is invoked, it will need to stop all the I/Os and interrupt(s) 13585 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 13586 * for the PCI subsystem to perform proper recovery as desired. 13587 * 13588 * Return codes 13589 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13590 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13591 **/ 13592 static pci_ers_result_t 13593 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 13594 { 13595 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13596 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13597 13598 switch (state) { 13599 case pci_channel_io_normal: 13600 /* Non-fatal error, prepare for recovery */ 13601 lpfc_sli4_prep_dev_for_recover(phba); 13602 return PCI_ERS_RESULT_CAN_RECOVER; 13603 case pci_channel_io_frozen: 13604 /* Fatal error, prepare for slot reset */ 13605 lpfc_sli4_prep_dev_for_reset(phba); 13606 return PCI_ERS_RESULT_NEED_RESET; 13607 case pci_channel_io_perm_failure: 13608 /* Permanent failure, prepare for device down */ 13609 lpfc_sli4_prep_dev_for_perm_failure(phba); 13610 return PCI_ERS_RESULT_DISCONNECT; 13611 default: 13612 /* Unknown state, prepare and request slot reset */ 13613 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13614 "2825 Unknown PCI error state: x%x\n", state); 13615 lpfc_sli4_prep_dev_for_reset(phba); 13616 return PCI_ERS_RESULT_NEED_RESET; 13617 } 13618 } 13619 13620 /** 13621 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 13622 * @pdev: pointer to PCI device. 13623 * 13624 * This routine is called from the PCI subsystem for error handling to device 13625 * with SLI-4 interface spec. It is called after PCI bus has been reset to 13626 * restart the PCI card from scratch, as if from a cold-boot. During the 13627 * PCI subsystem error recovery, after the driver returns 13628 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 13629 * recovery and then call this routine before calling the .resume method to 13630 * recover the device. This function will initialize the HBA device, enable 13631 * the interrupt, but it will just put the HBA to offline state without 13632 * passing any I/O traffic. 13633 * 13634 * Return codes 13635 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13636 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13637 */ 13638 static pci_ers_result_t 13639 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13640 { 13641 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13642 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13643 struct lpfc_sli *psli = &phba->sli; 13644 uint32_t intr_mode; 13645 13646 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13647 if (pci_enable_device_mem(pdev)) { 13648 printk(KERN_ERR "lpfc: Cannot re-enable " 13649 "PCI device after reset.\n"); 13650 return PCI_ERS_RESULT_DISCONNECT; 13651 } 13652 13653 pci_restore_state(pdev); 13654 13655 /* 13656 * As the new kernel behavior of pci_restore_state() API call clears 13657 * device saved_state flag, need to save the restored state again. 13658 */ 13659 pci_save_state(pdev); 13660 13661 if (pdev->is_busmaster) 13662 pci_set_master(pdev); 13663 13664 spin_lock_irq(&phba->hbalock); 13665 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13666 spin_unlock_irq(&phba->hbalock); 13667 13668 /* Configure and enable interrupt */ 13669 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13670 if (intr_mode == LPFC_INTR_ERROR) { 13671 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13672 "2824 Cannot re-enable interrupt after " 13673 "slot reset.\n"); 13674 return PCI_ERS_RESULT_DISCONNECT; 13675 } else 13676 phba->intr_mode = intr_mode; 13677 13678 /* Log the current active interrupt mode */ 13679 lpfc_log_intr_mode(phba, phba->intr_mode); 13680 13681 return PCI_ERS_RESULT_RECOVERED; 13682 } 13683 13684 /** 13685 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13686 * @pdev: pointer to PCI device 13687 * 13688 * This routine is called from the PCI subsystem for error handling to device 13689 * with SLI-4 interface spec. It is called when kernel error recovery tells 13690 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13691 * error recovery. After this call, traffic can start to flow from this device 13692 * again. 13693 **/ 13694 static void 13695 lpfc_io_resume_s4(struct pci_dev *pdev) 13696 { 13697 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13698 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13699 13700 /* 13701 * In case of slot reset, as function reset is performed through 13702 * mailbox command which needs DMA to be enabled, this operation 13703 * has to be moved to the io resume phase. Taking device offline 13704 * will perform the necessary cleanup. 13705 */ 13706 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13707 /* Perform device reset */ 13708 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13709 lpfc_offline(phba); 13710 lpfc_sli_brdrestart(phba); 13711 /* Bring the device back online */ 13712 lpfc_online(phba); 13713 } 13714 } 13715 13716 /** 13717 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13718 * @pdev: pointer to PCI device 13719 * @pid: pointer to PCI device identifier 13720 * 13721 * This routine is to be registered to the kernel's PCI subsystem. When an 13722 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13723 * at PCI device-specific information of the device and driver to see if the 13724 * driver state that it can support this kind of device. If the match is 13725 * successful, the driver core invokes this routine. This routine dispatches 13726 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13727 * do all the initialization that it needs to do to handle the HBA device 13728 * properly. 13729 * 13730 * Return code 13731 * 0 - driver can claim the device 13732 * negative value - driver can not claim the device 13733 **/ 13734 static int 13735 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13736 { 13737 int rc; 13738 struct lpfc_sli_intf intf; 13739 13740 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13741 return -ENODEV; 13742 13743 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13744 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13745 rc = lpfc_pci_probe_one_s4(pdev, pid); 13746 else 13747 rc = lpfc_pci_probe_one_s3(pdev, pid); 13748 13749 return rc; 13750 } 13751 13752 /** 13753 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13754 * @pdev: pointer to PCI device 13755 * 13756 * This routine is to be registered to the kernel's PCI subsystem. When an 13757 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13758 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13759 * remove routine, which will perform all the necessary cleanup for the 13760 * device to be removed from the PCI subsystem properly. 13761 **/ 13762 static void 13763 lpfc_pci_remove_one(struct pci_dev *pdev) 13764 { 13765 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13766 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13767 13768 switch (phba->pci_dev_grp) { 13769 case LPFC_PCI_DEV_LP: 13770 lpfc_pci_remove_one_s3(pdev); 13771 break; 13772 case LPFC_PCI_DEV_OC: 13773 lpfc_pci_remove_one_s4(pdev); 13774 break; 13775 default: 13776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13777 "1424 Invalid PCI device group: 0x%x\n", 13778 phba->pci_dev_grp); 13779 break; 13780 } 13781 return; 13782 } 13783 13784 /** 13785 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13786 * @dev: pointer to device 13787 * 13788 * This routine is to be registered to the kernel's PCI subsystem to support 13789 * system Power Management (PM). When PM invokes this method, it dispatches 13790 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13791 * suspend the device. 13792 * 13793 * Return code 13794 * 0 - driver suspended the device 13795 * Error otherwise 13796 **/ 13797 static int __maybe_unused 13798 lpfc_pci_suspend_one(struct device *dev) 13799 { 13800 struct Scsi_Host *shost = dev_get_drvdata(dev); 13801 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13802 int rc = -ENODEV; 13803 13804 switch (phba->pci_dev_grp) { 13805 case LPFC_PCI_DEV_LP: 13806 rc = lpfc_pci_suspend_one_s3(dev); 13807 break; 13808 case LPFC_PCI_DEV_OC: 13809 rc = lpfc_pci_suspend_one_s4(dev); 13810 break; 13811 default: 13812 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13813 "1425 Invalid PCI device group: 0x%x\n", 13814 phba->pci_dev_grp); 13815 break; 13816 } 13817 return rc; 13818 } 13819 13820 /** 13821 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13822 * @dev: pointer to device 13823 * 13824 * This routine is to be registered to the kernel's PCI subsystem to support 13825 * system Power Management (PM). When PM invokes this method, it dispatches 13826 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13827 * resume the device. 13828 * 13829 * Return code 13830 * 0 - driver suspended the device 13831 * Error otherwise 13832 **/ 13833 static int __maybe_unused 13834 lpfc_pci_resume_one(struct device *dev) 13835 { 13836 struct Scsi_Host *shost = dev_get_drvdata(dev); 13837 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13838 int rc = -ENODEV; 13839 13840 switch (phba->pci_dev_grp) { 13841 case LPFC_PCI_DEV_LP: 13842 rc = lpfc_pci_resume_one_s3(dev); 13843 break; 13844 case LPFC_PCI_DEV_OC: 13845 rc = lpfc_pci_resume_one_s4(dev); 13846 break; 13847 default: 13848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13849 "1426 Invalid PCI device group: 0x%x\n", 13850 phba->pci_dev_grp); 13851 break; 13852 } 13853 return rc; 13854 } 13855 13856 /** 13857 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13858 * @pdev: pointer to PCI device. 13859 * @state: the current PCI connection state. 13860 * 13861 * This routine is registered to the PCI subsystem for error handling. This 13862 * function is called by the PCI subsystem after a PCI bus error affecting 13863 * this device has been detected. When this routine is invoked, it dispatches 13864 * the action to the proper SLI-3 or SLI-4 device error detected handling 13865 * routine, which will perform the proper error detected operation. 13866 * 13867 * Return codes 13868 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13869 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13870 **/ 13871 static pci_ers_result_t 13872 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13873 { 13874 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13875 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13876 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13877 13878 switch (phba->pci_dev_grp) { 13879 case LPFC_PCI_DEV_LP: 13880 rc = lpfc_io_error_detected_s3(pdev, state); 13881 break; 13882 case LPFC_PCI_DEV_OC: 13883 rc = lpfc_io_error_detected_s4(pdev, state); 13884 break; 13885 default: 13886 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13887 "1427 Invalid PCI device group: 0x%x\n", 13888 phba->pci_dev_grp); 13889 break; 13890 } 13891 return rc; 13892 } 13893 13894 /** 13895 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13896 * @pdev: pointer to PCI device. 13897 * 13898 * This routine is registered to the PCI subsystem for error handling. This 13899 * function is called after PCI bus has been reset to restart the PCI card 13900 * from scratch, as if from a cold-boot. When this routine is invoked, it 13901 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13902 * routine, which will perform the proper device reset. 13903 * 13904 * Return codes 13905 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13906 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13907 **/ 13908 static pci_ers_result_t 13909 lpfc_io_slot_reset(struct pci_dev *pdev) 13910 { 13911 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13912 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13913 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13914 13915 switch (phba->pci_dev_grp) { 13916 case LPFC_PCI_DEV_LP: 13917 rc = lpfc_io_slot_reset_s3(pdev); 13918 break; 13919 case LPFC_PCI_DEV_OC: 13920 rc = lpfc_io_slot_reset_s4(pdev); 13921 break; 13922 default: 13923 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13924 "1428 Invalid PCI device group: 0x%x\n", 13925 phba->pci_dev_grp); 13926 break; 13927 } 13928 return rc; 13929 } 13930 13931 /** 13932 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13933 * @pdev: pointer to PCI device 13934 * 13935 * This routine is registered to the PCI subsystem for error handling. It 13936 * is called when kernel error recovery tells the lpfc driver that it is 13937 * OK to resume normal PCI operation after PCI bus error recovery. When 13938 * this routine is invoked, it dispatches the action to the proper SLI-3 13939 * or SLI-4 device io_resume routine, which will resume the device operation. 13940 **/ 13941 static void 13942 lpfc_io_resume(struct pci_dev *pdev) 13943 { 13944 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13945 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13946 13947 switch (phba->pci_dev_grp) { 13948 case LPFC_PCI_DEV_LP: 13949 lpfc_io_resume_s3(pdev); 13950 break; 13951 case LPFC_PCI_DEV_OC: 13952 lpfc_io_resume_s4(pdev); 13953 break; 13954 default: 13955 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13956 "1429 Invalid PCI device group: 0x%x\n", 13957 phba->pci_dev_grp); 13958 break; 13959 } 13960 return; 13961 } 13962 13963 /** 13964 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13965 * @phba: pointer to lpfc hba data structure. 13966 * 13967 * This routine checks to see if OAS is supported for this adapter. If 13968 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13969 * the enable oas flag is cleared and the pool created for OAS device data 13970 * is destroyed. 13971 * 13972 **/ 13973 static void 13974 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13975 { 13976 13977 if (!phba->cfg_EnableXLane) 13978 return; 13979 13980 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13981 phba->cfg_fof = 1; 13982 } else { 13983 phba->cfg_fof = 0; 13984 mempool_destroy(phba->device_data_mem_pool); 13985 phba->device_data_mem_pool = NULL; 13986 } 13987 13988 return; 13989 } 13990 13991 /** 13992 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13993 * @phba: pointer to lpfc hba data structure. 13994 * 13995 * This routine checks to see if RAS is supported by the adapter. Check the 13996 * function through which RAS support enablement is to be done. 13997 **/ 13998 void 13999 lpfc_sli4_ras_init(struct lpfc_hba *phba) 14000 { 14001 switch (phba->pcidev->device) { 14002 case PCI_DEVICE_ID_LANCER_G6_FC: 14003 case PCI_DEVICE_ID_LANCER_G7_FC: 14004 phba->ras_fwlog.ras_hwsupport = true; 14005 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 14006 phba->cfg_ras_fwlog_buffsize) 14007 phba->ras_fwlog.ras_enabled = true; 14008 else 14009 phba->ras_fwlog.ras_enabled = false; 14010 break; 14011 default: 14012 phba->ras_fwlog.ras_hwsupport = false; 14013 } 14014 } 14015 14016 14017 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 14018 14019 static const struct pci_error_handlers lpfc_err_handler = { 14020 .error_detected = lpfc_io_error_detected, 14021 .slot_reset = lpfc_io_slot_reset, 14022 .resume = lpfc_io_resume, 14023 }; 14024 14025 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 14026 lpfc_pci_suspend_one, 14027 lpfc_pci_resume_one); 14028 14029 static struct pci_driver lpfc_driver = { 14030 .name = LPFC_DRIVER_NAME, 14031 .id_table = lpfc_id_table, 14032 .probe = lpfc_pci_probe_one, 14033 .remove = lpfc_pci_remove_one, 14034 .shutdown = lpfc_pci_remove_one, 14035 .driver.pm = &lpfc_pci_pm_ops_one, 14036 .err_handler = &lpfc_err_handler, 14037 }; 14038 14039 static const struct file_operations lpfc_mgmt_fop = { 14040 .owner = THIS_MODULE, 14041 }; 14042 14043 static struct miscdevice lpfc_mgmt_dev = { 14044 .minor = MISC_DYNAMIC_MINOR, 14045 .name = "lpfcmgmt", 14046 .fops = &lpfc_mgmt_fop, 14047 }; 14048 14049 /** 14050 * lpfc_init - lpfc module initialization routine 14051 * 14052 * This routine is to be invoked when the lpfc module is loaded into the 14053 * kernel. The special kernel macro module_init() is used to indicate the 14054 * role of this routine to the kernel as lpfc module entry point. 14055 * 14056 * Return codes 14057 * 0 - successful 14058 * -ENOMEM - FC attach transport failed 14059 * all others - failed 14060 */ 14061 static int __init 14062 lpfc_init(void) 14063 { 14064 int error = 0; 14065 14066 pr_info(LPFC_MODULE_DESC "\n"); 14067 pr_info(LPFC_COPYRIGHT "\n"); 14068 14069 error = misc_register(&lpfc_mgmt_dev); 14070 if (error) 14071 printk(KERN_ERR "Could not register lpfcmgmt device, " 14072 "misc_register returned with status %d", error); 14073 14074 error = -ENOMEM; 14075 lpfc_transport_functions.vport_create = lpfc_vport_create; 14076 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 14077 lpfc_transport_template = 14078 fc_attach_transport(&lpfc_transport_functions); 14079 if (lpfc_transport_template == NULL) 14080 goto unregister; 14081 lpfc_vport_transport_template = 14082 fc_attach_transport(&lpfc_vport_transport_functions); 14083 if (lpfc_vport_transport_template == NULL) { 14084 fc_release_transport(lpfc_transport_template); 14085 goto unregister; 14086 } 14087 lpfc_wqe_cmd_template(); 14088 lpfc_nvmet_cmd_template(); 14089 14090 /* Initialize in case vector mapping is needed */ 14091 lpfc_present_cpu = num_present_cpus(); 14092 14093 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 14094 "lpfc/sli4:online", 14095 lpfc_cpu_online, lpfc_cpu_offline); 14096 if (error < 0) 14097 goto cpuhp_failure; 14098 lpfc_cpuhp_state = error; 14099 14100 error = pci_register_driver(&lpfc_driver); 14101 if (error) 14102 goto unwind; 14103 14104 return error; 14105 14106 unwind: 14107 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14108 cpuhp_failure: 14109 fc_release_transport(lpfc_transport_template); 14110 fc_release_transport(lpfc_vport_transport_template); 14111 unregister: 14112 misc_deregister(&lpfc_mgmt_dev); 14113 14114 return error; 14115 } 14116 14117 void lpfc_dmp_dbg(struct lpfc_hba *phba) 14118 { 14119 unsigned int start_idx; 14120 unsigned int dbg_cnt; 14121 unsigned int temp_idx; 14122 int i; 14123 int j = 0; 14124 unsigned long rem_nsec; 14125 14126 if (phba->cfg_log_verbose) 14127 return; 14128 14129 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 14130 return; 14131 14132 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 14133 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 14134 temp_idx = start_idx; 14135 if (dbg_cnt >= DBG_LOG_SZ) { 14136 dbg_cnt = DBG_LOG_SZ; 14137 temp_idx -= 1; 14138 } else { 14139 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 14140 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 14141 } else { 14142 if (start_idx < dbg_cnt) 14143 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 14144 else 14145 start_idx -= dbg_cnt; 14146 } 14147 } 14148 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 14149 start_idx, temp_idx, dbg_cnt); 14150 14151 for (i = 0; i < dbg_cnt; i++) { 14152 if ((start_idx + i) < DBG_LOG_SZ) 14153 temp_idx = (start_idx + i) % DBG_LOG_SZ; 14154 else 14155 temp_idx = j++; 14156 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 14157 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 14158 temp_idx, 14159 (unsigned long)phba->dbg_log[temp_idx].t_ns, 14160 rem_nsec / 1000, 14161 phba->dbg_log[temp_idx].log); 14162 } 14163 atomic_set(&phba->dbg_log_cnt, 0); 14164 atomic_set(&phba->dbg_log_dmping, 0); 14165 } 14166 14167 __printf(2, 3) 14168 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 14169 { 14170 unsigned int idx; 14171 va_list args; 14172 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 14173 struct va_format vaf; 14174 14175 14176 va_start(args, fmt); 14177 if (unlikely(dbg_dmping)) { 14178 vaf.fmt = fmt; 14179 vaf.va = &args; 14180 dev_info(&phba->pcidev->dev, "%pV", &vaf); 14181 va_end(args); 14182 return; 14183 } 14184 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 14185 DBG_LOG_SZ; 14186 14187 atomic_inc(&phba->dbg_log_cnt); 14188 14189 vscnprintf(phba->dbg_log[idx].log, 14190 sizeof(phba->dbg_log[idx].log), fmt, args); 14191 va_end(args); 14192 14193 phba->dbg_log[idx].t_ns = local_clock(); 14194 } 14195 14196 /** 14197 * lpfc_exit - lpfc module removal routine 14198 * 14199 * This routine is invoked when the lpfc module is removed from the kernel. 14200 * The special kernel macro module_exit() is used to indicate the role of 14201 * this routine to the kernel as lpfc module exit point. 14202 */ 14203 static void __exit 14204 lpfc_exit(void) 14205 { 14206 misc_deregister(&lpfc_mgmt_dev); 14207 pci_unregister_driver(&lpfc_driver); 14208 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14209 fc_release_transport(lpfc_transport_template); 14210 fc_release_transport(lpfc_vport_transport_template); 14211 idr_destroy(&lpfc_hba_index); 14212 } 14213 14214 module_init(lpfc_init); 14215 module_exit(lpfc_exit); 14216 MODULE_LICENSE("GPL"); 14217 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 14218 MODULE_AUTHOR("Broadcom"); 14219 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 14220