1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 44 #include <scsi/scsi.h> 45 #include <scsi/scsi_device.h> 46 #include <scsi/scsi_host.h> 47 #include <scsi/scsi_transport_fc.h> 48 #include <scsi/scsi_tcq.h> 49 #include <scsi/fc/fc_fs.h> 50 51 #include <linux/nvme-fc-driver.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_nvmet.h" 63 #include "lpfc_logmsg.h" 64 #include "lpfc_crtn.h" 65 #include "lpfc_vport.h" 66 #include "lpfc_version.h" 67 #include "lpfc_ids.h" 68 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 72 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 73 static int lpfc_post_rcv_buf(struct lpfc_hba *); 74 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 75 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 76 static int lpfc_setup_endian_order(struct lpfc_hba *); 77 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 78 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 79 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 80 static void lpfc_init_sgl_list(struct lpfc_hba *); 81 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 82 static void lpfc_free_active_sgl(struct lpfc_hba *); 83 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 84 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 85 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 86 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 87 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 88 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 89 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 90 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 91 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 92 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 93 94 static struct scsi_transport_template *lpfc_transport_template = NULL; 95 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 96 static DEFINE_IDR(lpfc_hba_index); 97 #define LPFC_NVMET_BUF_POST 254 98 99 /** 100 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 101 * @phba: pointer to lpfc hba data structure. 102 * 103 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 104 * mailbox command. It retrieves the revision information from the HBA and 105 * collects the Vital Product Data (VPD) about the HBA for preparing the 106 * configuration of the HBA. 107 * 108 * Return codes: 109 * 0 - success. 110 * -ERESTART - requests the SLI layer to reset the HBA and try again. 111 * Any other value - indicates an error. 112 **/ 113 int 114 lpfc_config_port_prep(struct lpfc_hba *phba) 115 { 116 lpfc_vpd_t *vp = &phba->vpd; 117 int i = 0, rc; 118 LPFC_MBOXQ_t *pmb; 119 MAILBOX_t *mb; 120 char *lpfc_vpd_data = NULL; 121 uint16_t offset = 0; 122 static char licensed[56] = 123 "key unlock for use with gnu public licensed code only\0"; 124 static int init_key = 1; 125 126 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 127 if (!pmb) { 128 phba->link_state = LPFC_HBA_ERROR; 129 return -ENOMEM; 130 } 131 132 mb = &pmb->u.mb; 133 phba->link_state = LPFC_INIT_MBX_CMDS; 134 135 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 136 if (init_key) { 137 uint32_t *ptext = (uint32_t *) licensed; 138 139 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 140 *ptext = cpu_to_be32(*ptext); 141 init_key = 0; 142 } 143 144 lpfc_read_nv(phba, pmb); 145 memset((char*)mb->un.varRDnvp.rsvd3, 0, 146 sizeof (mb->un.varRDnvp.rsvd3)); 147 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 148 sizeof (licensed)); 149 150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 151 152 if (rc != MBX_SUCCESS) { 153 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 154 "0324 Config Port initialization " 155 "error, mbxCmd x%x READ_NVPARM, " 156 "mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free(pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 162 sizeof(phba->wwnn)); 163 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 164 sizeof(phba->wwpn)); 165 } 166 167 /* 168 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 169 * which was already set in lpfc_get_cfgparam() 170 */ 171 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 172 173 /* Setup and issue mailbox READ REV command */ 174 lpfc_read_rev(phba, pmb); 175 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 176 if (rc != MBX_SUCCESS) { 177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 178 "0439 Adapter failed to init, mbxCmd x%x " 179 "READ_REV, mbxStatus x%x\n", 180 mb->mbxCommand, mb->mbxStatus); 181 mempool_free( pmb, phba->mbox_mem_pool); 182 return -ERESTART; 183 } 184 185 186 /* 187 * The value of rr must be 1 since the driver set the cv field to 1. 188 * This setting requires the FW to set all revision fields. 189 */ 190 if (mb->un.varRdRev.rr == 0) { 191 vp->rev.rBit = 0; 192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 193 "0440 Adapter failed to init, READ_REV has " 194 "missing revision information.\n"); 195 mempool_free(pmb, phba->mbox_mem_pool); 196 return -ERESTART; 197 } 198 199 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 200 mempool_free(pmb, phba->mbox_mem_pool); 201 return -EINVAL; 202 } 203 204 /* Save information as VPD data */ 205 vp->rev.rBit = 1; 206 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 207 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 208 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 209 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 210 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 211 vp->rev.biuRev = mb->un.varRdRev.biuRev; 212 vp->rev.smRev = mb->un.varRdRev.smRev; 213 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 214 vp->rev.endecRev = mb->un.varRdRev.endecRev; 215 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 216 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 217 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 218 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 219 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 220 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 221 222 /* If the sli feature level is less then 9, we must 223 * tear down all RPIs and VPIs on link down if NPIV 224 * is enabled. 225 */ 226 if (vp->rev.feaLevelHigh < 9) 227 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 228 229 if (lpfc_is_LC_HBA(phba->pcidev->device)) 230 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 231 sizeof (phba->RandomData)); 232 233 /* Get adapter VPD information */ 234 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 235 if (!lpfc_vpd_data) 236 goto out_free_mbox; 237 do { 238 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 239 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 240 241 if (rc != MBX_SUCCESS) { 242 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 243 "0441 VPD not present on adapter, " 244 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 245 mb->mbxCommand, mb->mbxStatus); 246 mb->un.varDmp.word_cnt = 0; 247 } 248 /* dump mem may return a zero when finished or we got a 249 * mailbox error, either way we are done. 250 */ 251 if (mb->un.varDmp.word_cnt == 0) 252 break; 253 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 254 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 255 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 256 lpfc_vpd_data + offset, 257 mb->un.varDmp.word_cnt); 258 offset += mb->un.varDmp.word_cnt; 259 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 260 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 261 262 kfree(lpfc_vpd_data); 263 out_free_mbox: 264 mempool_free(pmb, phba->mbox_mem_pool); 265 return 0; 266 } 267 268 /** 269 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 270 * @phba: pointer to lpfc hba data structure. 271 * @pmboxq: pointer to the driver internal queue element for mailbox command. 272 * 273 * This is the completion handler for driver's configuring asynchronous event 274 * mailbox command to the device. If the mailbox command returns successfully, 275 * it will set internal async event support flag to 1; otherwise, it will 276 * set internal async event support flag to 0. 277 **/ 278 static void 279 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 280 { 281 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 282 phba->temp_sensor_support = 1; 283 else 284 phba->temp_sensor_support = 0; 285 mempool_free(pmboxq, phba->mbox_mem_pool); 286 return; 287 } 288 289 /** 290 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 291 * @phba: pointer to lpfc hba data structure. 292 * @pmboxq: pointer to the driver internal queue element for mailbox command. 293 * 294 * This is the completion handler for dump mailbox command for getting 295 * wake up parameters. When this command complete, the response contain 296 * Option rom version of the HBA. This function translate the version number 297 * into a human readable string and store it in OptionROMVersion. 298 **/ 299 static void 300 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 301 { 302 struct prog_id *prg; 303 uint32_t prog_id_word; 304 char dist = ' '; 305 /* character array used for decoding dist type. */ 306 char dist_char[] = "nabx"; 307 308 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 309 mempool_free(pmboxq, phba->mbox_mem_pool); 310 return; 311 } 312 313 prg = (struct prog_id *) &prog_id_word; 314 315 /* word 7 contain option rom version */ 316 prog_id_word = pmboxq->u.mb.un.varWords[7]; 317 318 /* Decode the Option rom version word to a readable string */ 319 if (prg->dist < 4) 320 dist = dist_char[prg->dist]; 321 322 if ((prg->dist == 3) && (prg->num == 0)) 323 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 324 prg->ver, prg->rev, prg->lev); 325 else 326 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 327 prg->ver, prg->rev, prg->lev, 328 dist, prg->num); 329 mempool_free(pmboxq, phba->mbox_mem_pool); 330 return; 331 } 332 333 /** 334 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 335 * cfg_soft_wwnn, cfg_soft_wwpn 336 * @vport: pointer to lpfc vport data structure. 337 * 338 * 339 * Return codes 340 * None. 341 **/ 342 void 343 lpfc_update_vport_wwn(struct lpfc_vport *vport) 344 { 345 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 346 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 347 348 /* If the soft name exists then update it using the service params */ 349 if (vport->phba->cfg_soft_wwnn) 350 u64_to_wwn(vport->phba->cfg_soft_wwnn, 351 vport->fc_sparam.nodeName.u.wwn); 352 if (vport->phba->cfg_soft_wwpn) 353 u64_to_wwn(vport->phba->cfg_soft_wwpn, 354 vport->fc_sparam.portName.u.wwn); 355 356 /* 357 * If the name is empty or there exists a soft name 358 * then copy the service params name, otherwise use the fc name 359 */ 360 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 361 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 362 sizeof(struct lpfc_name)); 363 else 364 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 365 sizeof(struct lpfc_name)); 366 367 /* 368 * If the port name has changed, then set the Param changes flag 369 * to unreg the login 370 */ 371 if (vport->fc_portname.u.wwn[0] != 0 && 372 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 373 sizeof(struct lpfc_name))) 374 vport->vport_flag |= FAWWPN_PARAM_CHG; 375 376 if (vport->fc_portname.u.wwn[0] == 0 || 377 vport->phba->cfg_soft_wwpn || 378 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 379 vport->vport_flag & FAWWPN_SET) { 380 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 381 sizeof(struct lpfc_name)); 382 vport->vport_flag &= ~FAWWPN_SET; 383 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 384 vport->vport_flag |= FAWWPN_SET; 385 } 386 else 387 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 388 sizeof(struct lpfc_name)); 389 } 390 391 /** 392 * lpfc_config_port_post - Perform lpfc initialization after config port 393 * @phba: pointer to lpfc hba data structure. 394 * 395 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 396 * command call. It performs all internal resource and state setups on the 397 * port: post IOCB buffers, enable appropriate host interrupt attentions, 398 * ELS ring timers, etc. 399 * 400 * Return codes 401 * 0 - success. 402 * Any other value - error. 403 **/ 404 int 405 lpfc_config_port_post(struct lpfc_hba *phba) 406 { 407 struct lpfc_vport *vport = phba->pport; 408 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 409 LPFC_MBOXQ_t *pmb; 410 MAILBOX_t *mb; 411 struct lpfc_dmabuf *mp; 412 struct lpfc_sli *psli = &phba->sli; 413 uint32_t status, timeout; 414 int i, j; 415 int rc; 416 417 spin_lock_irq(&phba->hbalock); 418 /* 419 * If the Config port completed correctly the HBA is not 420 * over heated any more. 421 */ 422 if (phba->over_temp_state == HBA_OVER_TEMP) 423 phba->over_temp_state = HBA_NORMAL_TEMP; 424 spin_unlock_irq(&phba->hbalock); 425 426 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 427 if (!pmb) { 428 phba->link_state = LPFC_HBA_ERROR; 429 return -ENOMEM; 430 } 431 mb = &pmb->u.mb; 432 433 /* Get login parameters for NID. */ 434 rc = lpfc_read_sparam(phba, pmb, 0); 435 if (rc) { 436 mempool_free(pmb, phba->mbox_mem_pool); 437 return -ENOMEM; 438 } 439 440 pmb->vport = vport; 441 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 443 "0448 Adapter failed init, mbxCmd x%x " 444 "READ_SPARM mbxStatus x%x\n", 445 mb->mbxCommand, mb->mbxStatus); 446 phba->link_state = LPFC_HBA_ERROR; 447 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 448 mempool_free(pmb, phba->mbox_mem_pool); 449 lpfc_mbuf_free(phba, mp->virt, mp->phys); 450 kfree(mp); 451 return -EIO; 452 } 453 454 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 455 456 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 457 lpfc_mbuf_free(phba, mp->virt, mp->phys); 458 kfree(mp); 459 pmb->ctx_buf = NULL; 460 lpfc_update_vport_wwn(vport); 461 462 /* Update the fc_host data structures with new wwn. */ 463 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 464 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 465 fc_host_max_npiv_vports(shost) = phba->max_vpi; 466 467 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 468 /* This should be consolidated into parse_vpd ? - mr */ 469 if (phba->SerialNumber[0] == 0) { 470 uint8_t *outptr; 471 472 outptr = &vport->fc_nodename.u.s.IEEE[0]; 473 for (i = 0; i < 12; i++) { 474 status = *outptr++; 475 j = ((status & 0xf0) >> 4); 476 if (j <= 9) 477 phba->SerialNumber[i] = 478 (char)((uint8_t) 0x30 + (uint8_t) j); 479 else 480 phba->SerialNumber[i] = 481 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 482 i++; 483 j = (status & 0xf); 484 if (j <= 9) 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x30 + (uint8_t) j); 487 else 488 phba->SerialNumber[i] = 489 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 490 } 491 } 492 493 lpfc_read_config(phba, pmb); 494 pmb->vport = vport; 495 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 497 "0453 Adapter failed to init, mbxCmd x%x " 498 "READ_CONFIG, mbxStatus x%x\n", 499 mb->mbxCommand, mb->mbxStatus); 500 phba->link_state = LPFC_HBA_ERROR; 501 mempool_free( pmb, phba->mbox_mem_pool); 502 return -EIO; 503 } 504 505 /* Check if the port is disabled */ 506 lpfc_sli_read_link_ste(phba); 507 508 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 509 i = (mb->un.varRdConfig.max_xri + 1); 510 if (phba->cfg_hba_queue_depth > i) { 511 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 512 "3359 HBA queue depth changed from %d to %d\n", 513 phba->cfg_hba_queue_depth, i); 514 phba->cfg_hba_queue_depth = i; 515 } 516 517 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 518 i = (mb->un.varRdConfig.max_xri >> 3); 519 if (phba->pport->cfg_lun_queue_depth > i) { 520 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 521 "3360 LUN queue depth changed from %d to %d\n", 522 phba->pport->cfg_lun_queue_depth, i); 523 phba->pport->cfg_lun_queue_depth = i; 524 } 525 526 phba->lmt = mb->un.varRdConfig.lmt; 527 528 /* Get the default values for Model Name and Description */ 529 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 530 531 phba->link_state = LPFC_LINK_DOWN; 532 533 /* Only process IOCBs on ELS ring till hba_state is READY */ 534 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 535 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 536 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 537 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 538 539 /* Post receive buffers for desired rings */ 540 if (phba->sli_rev != 3) 541 lpfc_post_rcv_buf(phba); 542 543 /* 544 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 545 */ 546 if (phba->intr_type == MSIX) { 547 rc = lpfc_config_msi(phba, pmb); 548 if (rc) { 549 mempool_free(pmb, phba->mbox_mem_pool); 550 return -EIO; 551 } 552 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 553 if (rc != MBX_SUCCESS) { 554 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 555 "0352 Config MSI mailbox command " 556 "failed, mbxCmd x%x, mbxStatus x%x\n", 557 pmb->u.mb.mbxCommand, 558 pmb->u.mb.mbxStatus); 559 mempool_free(pmb, phba->mbox_mem_pool); 560 return -EIO; 561 } 562 } 563 564 spin_lock_irq(&phba->hbalock); 565 /* Initialize ERATT handling flag */ 566 phba->hba_flag &= ~HBA_ERATT_HANDLED; 567 568 /* Enable appropriate host interrupts */ 569 if (lpfc_readl(phba->HCregaddr, &status)) { 570 spin_unlock_irq(&phba->hbalock); 571 return -EIO; 572 } 573 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 574 if (psli->num_rings > 0) 575 status |= HC_R0INT_ENA; 576 if (psli->num_rings > 1) 577 status |= HC_R1INT_ENA; 578 if (psli->num_rings > 2) 579 status |= HC_R2INT_ENA; 580 if (psli->num_rings > 3) 581 status |= HC_R3INT_ENA; 582 583 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 584 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 585 status &= ~(HC_R0INT_ENA); 586 587 writel(status, phba->HCregaddr); 588 readl(phba->HCregaddr); /* flush */ 589 spin_unlock_irq(&phba->hbalock); 590 591 /* Set up ring-0 (ELS) timer */ 592 timeout = phba->fc_ratov * 2; 593 mod_timer(&vport->els_tmofunc, 594 jiffies + msecs_to_jiffies(1000 * timeout)); 595 /* Set up heart beat (HB) timer */ 596 mod_timer(&phba->hb_tmofunc, 597 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 598 phba->hb_outstanding = 0; 599 phba->last_completion_time = jiffies; 600 /* Set up error attention (ERATT) polling timer */ 601 mod_timer(&phba->eratt_poll, 602 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 603 604 if (phba->hba_flag & LINK_DISABLED) { 605 lpfc_printf_log(phba, 606 KERN_ERR, LOG_INIT, 607 "2598 Adapter Link is disabled.\n"); 608 lpfc_down_link(phba, pmb); 609 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 611 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 612 lpfc_printf_log(phba, 613 KERN_ERR, LOG_INIT, 614 "2599 Adapter failed to issue DOWN_LINK" 615 " mbox command rc 0x%x\n", rc); 616 617 mempool_free(pmb, phba->mbox_mem_pool); 618 return -EIO; 619 } 620 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 621 mempool_free(pmb, phba->mbox_mem_pool); 622 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 623 if (rc) 624 return rc; 625 } 626 /* MBOX buffer will be freed in mbox compl */ 627 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 628 if (!pmb) { 629 phba->link_state = LPFC_HBA_ERROR; 630 return -ENOMEM; 631 } 632 633 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 634 pmb->mbox_cmpl = lpfc_config_async_cmpl; 635 pmb->vport = phba->pport; 636 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 637 638 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 639 lpfc_printf_log(phba, 640 KERN_ERR, 641 LOG_INIT, 642 "0456 Adapter failed to issue " 643 "ASYNCEVT_ENABLE mbox status x%x\n", 644 rc); 645 mempool_free(pmb, phba->mbox_mem_pool); 646 } 647 648 /* Get Option rom version */ 649 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 650 if (!pmb) { 651 phba->link_state = LPFC_HBA_ERROR; 652 return -ENOMEM; 653 } 654 655 lpfc_dump_wakeup_param(phba, pmb); 656 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 657 pmb->vport = phba->pport; 658 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 659 660 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 662 "to get Option ROM version status x%x\n", rc); 663 mempool_free(pmb, phba->mbox_mem_pool); 664 } 665 666 return 0; 667 } 668 669 /** 670 * lpfc_hba_init_link - Initialize the FC link 671 * @phba: pointer to lpfc hba data structure. 672 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 673 * 674 * This routine will issue the INIT_LINK mailbox command call. 675 * It is available to other drivers through the lpfc_hba data 676 * structure for use as a delayed link up mechanism with the 677 * module parameter lpfc_suppress_link_up. 678 * 679 * Return code 680 * 0 - success 681 * Any other value - error 682 **/ 683 static int 684 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 685 { 686 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 687 } 688 689 /** 690 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 691 * @phba: pointer to lpfc hba data structure. 692 * @fc_topology: desired fc topology. 693 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 694 * 695 * This routine will issue the INIT_LINK mailbox command call. 696 * It is available to other drivers through the lpfc_hba data 697 * structure for use as a delayed link up mechanism with the 698 * module parameter lpfc_suppress_link_up. 699 * 700 * Return code 701 * 0 - success 702 * Any other value - error 703 **/ 704 int 705 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 706 uint32_t flag) 707 { 708 struct lpfc_vport *vport = phba->pport; 709 LPFC_MBOXQ_t *pmb; 710 MAILBOX_t *mb; 711 int rc; 712 713 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 714 if (!pmb) { 715 phba->link_state = LPFC_HBA_ERROR; 716 return -ENOMEM; 717 } 718 mb = &pmb->u.mb; 719 pmb->vport = vport; 720 721 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 723 !(phba->lmt & LMT_1Gb)) || 724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 725 !(phba->lmt & LMT_2Gb)) || 726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 727 !(phba->lmt & LMT_4Gb)) || 728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 729 !(phba->lmt & LMT_8Gb)) || 730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 731 !(phba->lmt & LMT_10Gb)) || 732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 733 !(phba->lmt & LMT_16Gb)) || 734 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 735 !(phba->lmt & LMT_32Gb)) || 736 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 737 !(phba->lmt & LMT_64Gb))) { 738 /* Reset link speed to auto */ 739 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 740 "1302 Invalid speed for this board:%d " 741 "Reset link speed to auto.\n", 742 phba->cfg_link_speed); 743 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 744 } 745 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 746 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 747 if (phba->sli_rev < LPFC_SLI_REV4) 748 lpfc_set_loopback_flag(phba); 749 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 750 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 752 "0498 Adapter failed to init, mbxCmd x%x " 753 "INIT_LINK, mbxStatus x%x\n", 754 mb->mbxCommand, mb->mbxStatus); 755 if (phba->sli_rev <= LPFC_SLI_REV3) { 756 /* Clear all interrupt enable conditions */ 757 writel(0, phba->HCregaddr); 758 readl(phba->HCregaddr); /* flush */ 759 /* Clear all pending interrupts */ 760 writel(0xffffffff, phba->HAregaddr); 761 readl(phba->HAregaddr); /* flush */ 762 } 763 phba->link_state = LPFC_HBA_ERROR; 764 if (rc != MBX_BUSY || flag == MBX_POLL) 765 mempool_free(pmb, phba->mbox_mem_pool); 766 return -EIO; 767 } 768 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 769 if (flag == MBX_POLL) 770 mempool_free(pmb, phba->mbox_mem_pool); 771 772 return 0; 773 } 774 775 /** 776 * lpfc_hba_down_link - this routine downs the FC link 777 * @phba: pointer to lpfc hba data structure. 778 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 779 * 780 * This routine will issue the DOWN_LINK mailbox command call. 781 * It is available to other drivers through the lpfc_hba data 782 * structure for use to stop the link. 783 * 784 * Return code 785 * 0 - success 786 * Any other value - error 787 **/ 788 static int 789 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 790 { 791 LPFC_MBOXQ_t *pmb; 792 int rc; 793 794 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 795 if (!pmb) { 796 phba->link_state = LPFC_HBA_ERROR; 797 return -ENOMEM; 798 } 799 800 lpfc_printf_log(phba, 801 KERN_ERR, LOG_INIT, 802 "0491 Adapter Link is disabled.\n"); 803 lpfc_down_link(phba, pmb); 804 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 805 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 806 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 807 lpfc_printf_log(phba, 808 KERN_ERR, LOG_INIT, 809 "2522 Adapter failed to issue DOWN_LINK" 810 " mbox command rc 0x%x\n", rc); 811 812 mempool_free(pmb, phba->mbox_mem_pool); 813 return -EIO; 814 } 815 if (flag == MBX_POLL) 816 mempool_free(pmb, phba->mbox_mem_pool); 817 818 return 0; 819 } 820 821 /** 822 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 823 * @phba: pointer to lpfc HBA data structure. 824 * 825 * This routine will do LPFC uninitialization before the HBA is reset when 826 * bringing down the SLI Layer. 827 * 828 * Return codes 829 * 0 - success. 830 * Any other value - error. 831 **/ 832 int 833 lpfc_hba_down_prep(struct lpfc_hba *phba) 834 { 835 struct lpfc_vport **vports; 836 int i; 837 838 if (phba->sli_rev <= LPFC_SLI_REV3) { 839 /* Disable interrupts */ 840 writel(0, phba->HCregaddr); 841 readl(phba->HCregaddr); /* flush */ 842 } 843 844 if (phba->pport->load_flag & FC_UNLOADING) 845 lpfc_cleanup_discovery_resources(phba->pport); 846 else { 847 vports = lpfc_create_vport_work_array(phba); 848 if (vports != NULL) 849 for (i = 0; i <= phba->max_vports && 850 vports[i] != NULL; i++) 851 lpfc_cleanup_discovery_resources(vports[i]); 852 lpfc_destroy_vport_work_array(phba, vports); 853 } 854 return 0; 855 } 856 857 /** 858 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 859 * rspiocb which got deferred 860 * 861 * @phba: pointer to lpfc HBA data structure. 862 * 863 * This routine will cleanup completed slow path events after HBA is reset 864 * when bringing down the SLI Layer. 865 * 866 * 867 * Return codes 868 * void. 869 **/ 870 static void 871 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 872 { 873 struct lpfc_iocbq *rspiocbq; 874 struct hbq_dmabuf *dmabuf; 875 struct lpfc_cq_event *cq_event; 876 877 spin_lock_irq(&phba->hbalock); 878 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 879 spin_unlock_irq(&phba->hbalock); 880 881 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 882 /* Get the response iocb from the head of work queue */ 883 spin_lock_irq(&phba->hbalock); 884 list_remove_head(&phba->sli4_hba.sp_queue_event, 885 cq_event, struct lpfc_cq_event, list); 886 spin_unlock_irq(&phba->hbalock); 887 888 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 889 case CQE_CODE_COMPL_WQE: 890 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 891 cq_event); 892 lpfc_sli_release_iocbq(phba, rspiocbq); 893 break; 894 case CQE_CODE_RECEIVE: 895 case CQE_CODE_RECEIVE_V1: 896 dmabuf = container_of(cq_event, struct hbq_dmabuf, 897 cq_event); 898 lpfc_in_buf_free(phba, &dmabuf->dbuf); 899 } 900 } 901 } 902 903 /** 904 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 905 * @phba: pointer to lpfc HBA data structure. 906 * 907 * This routine will cleanup posted ELS buffers after the HBA is reset 908 * when bringing down the SLI Layer. 909 * 910 * 911 * Return codes 912 * void. 913 **/ 914 static void 915 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 916 { 917 struct lpfc_sli *psli = &phba->sli; 918 struct lpfc_sli_ring *pring; 919 struct lpfc_dmabuf *mp, *next_mp; 920 LIST_HEAD(buflist); 921 int count; 922 923 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 924 lpfc_sli_hbqbuf_free_all(phba); 925 else { 926 /* Cleanup preposted buffers on the ELS ring */ 927 pring = &psli->sli3_ring[LPFC_ELS_RING]; 928 spin_lock_irq(&phba->hbalock); 929 list_splice_init(&pring->postbufq, &buflist); 930 spin_unlock_irq(&phba->hbalock); 931 932 count = 0; 933 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 934 list_del(&mp->list); 935 count++; 936 lpfc_mbuf_free(phba, mp->virt, mp->phys); 937 kfree(mp); 938 } 939 940 spin_lock_irq(&phba->hbalock); 941 pring->postbufq_cnt -= count; 942 spin_unlock_irq(&phba->hbalock); 943 } 944 } 945 946 /** 947 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 948 * @phba: pointer to lpfc HBA data structure. 949 * 950 * This routine will cleanup the txcmplq after the HBA is reset when bringing 951 * down the SLI Layer. 952 * 953 * Return codes 954 * void 955 **/ 956 static void 957 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 958 { 959 struct lpfc_sli *psli = &phba->sli; 960 struct lpfc_queue *qp = NULL; 961 struct lpfc_sli_ring *pring; 962 LIST_HEAD(completions); 963 int i; 964 struct lpfc_iocbq *piocb, *next_iocb; 965 966 if (phba->sli_rev != LPFC_SLI_REV4) { 967 for (i = 0; i < psli->num_rings; i++) { 968 pring = &psli->sli3_ring[i]; 969 spin_lock_irq(&phba->hbalock); 970 /* At this point in time the HBA is either reset or DOA 971 * Nothing should be on txcmplq as it will 972 * NEVER complete. 973 */ 974 list_splice_init(&pring->txcmplq, &completions); 975 pring->txcmplq_cnt = 0; 976 spin_unlock_irq(&phba->hbalock); 977 978 lpfc_sli_abort_iocb_ring(phba, pring); 979 } 980 /* Cancel all the IOCBs from the completions list */ 981 lpfc_sli_cancel_iocbs(phba, &completions, 982 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 983 return; 984 } 985 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 986 pring = qp->pring; 987 if (!pring) 988 continue; 989 spin_lock_irq(&pring->ring_lock); 990 list_for_each_entry_safe(piocb, next_iocb, 991 &pring->txcmplq, list) 992 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 993 list_splice_init(&pring->txcmplq, &completions); 994 pring->txcmplq_cnt = 0; 995 spin_unlock_irq(&pring->ring_lock); 996 lpfc_sli_abort_iocb_ring(phba, pring); 997 } 998 /* Cancel all the IOCBs from the completions list */ 999 lpfc_sli_cancel_iocbs(phba, &completions, 1000 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1001 } 1002 1003 /** 1004 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1005 int i; 1006 * @phba: pointer to lpfc HBA data structure. 1007 * 1008 * This routine will do uninitialization after the HBA is reset when bring 1009 * down the SLI Layer. 1010 * 1011 * Return codes 1012 * 0 - success. 1013 * Any other value - error. 1014 **/ 1015 static int 1016 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1017 { 1018 lpfc_hba_free_post_buf(phba); 1019 lpfc_hba_clean_txcmplq(phba); 1020 return 0; 1021 } 1022 1023 /** 1024 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1025 * @phba: pointer to lpfc HBA data structure. 1026 * 1027 * This routine will do uninitialization after the HBA is reset when bring 1028 * down the SLI Layer. 1029 * 1030 * Return codes 1031 * 0 - success. 1032 * Any other value - error. 1033 **/ 1034 static int 1035 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1036 { 1037 struct lpfc_io_buf *psb, *psb_next; 1038 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1039 struct lpfc_sli4_hdw_queue *qp; 1040 LIST_HEAD(aborts); 1041 LIST_HEAD(nvme_aborts); 1042 LIST_HEAD(nvmet_aborts); 1043 struct lpfc_sglq *sglq_entry = NULL; 1044 int cnt, idx; 1045 1046 1047 lpfc_sli_hbqbuf_free_all(phba); 1048 lpfc_hba_clean_txcmplq(phba); 1049 1050 /* At this point in time the HBA is either reset or DOA. Either 1051 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1052 * on the lpfc_els_sgl_list so that it can either be freed if the 1053 * driver is unloading or reposted if the driver is restarting 1054 * the port. 1055 */ 1056 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1057 /* scsl_buf_list */ 1058 /* sgl_list_lock required because worker thread uses this 1059 * list. 1060 */ 1061 spin_lock(&phba->sli4_hba.sgl_list_lock); 1062 list_for_each_entry(sglq_entry, 1063 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1064 sglq_entry->state = SGL_FREED; 1065 1066 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1067 &phba->sli4_hba.lpfc_els_sgl_list); 1068 1069 1070 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1071 1072 /* abts_xxxx_buf_list_lock required because worker thread uses this 1073 * list. 1074 */ 1075 cnt = 0; 1076 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1077 qp = &phba->sli4_hba.hdwq[idx]; 1078 1079 spin_lock(&qp->abts_io_buf_list_lock); 1080 list_splice_init(&qp->lpfc_abts_io_buf_list, 1081 &aborts); 1082 1083 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1084 psb->pCmd = NULL; 1085 psb->status = IOSTAT_SUCCESS; 1086 cnt++; 1087 } 1088 spin_lock(&qp->io_buf_list_put_lock); 1089 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1090 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1091 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1092 qp->abts_scsi_io_bufs = 0; 1093 qp->abts_nvme_io_bufs = 0; 1094 spin_unlock(&qp->io_buf_list_put_lock); 1095 spin_unlock(&qp->abts_io_buf_list_lock); 1096 } 1097 spin_unlock_irq(&phba->hbalock); 1098 1099 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1100 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1101 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1102 &nvmet_aborts); 1103 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1104 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1105 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1106 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1107 } 1108 } 1109 1110 lpfc_sli4_free_sp_events(phba); 1111 return cnt; 1112 } 1113 1114 /** 1115 * lpfc_hba_down_post - Wrapper func for hba down post routine 1116 * @phba: pointer to lpfc HBA data structure. 1117 * 1118 * This routine wraps the actual SLI3 or SLI4 routine for performing 1119 * uninitialization after the HBA is reset when bring down the SLI Layer. 1120 * 1121 * Return codes 1122 * 0 - success. 1123 * Any other value - error. 1124 **/ 1125 int 1126 lpfc_hba_down_post(struct lpfc_hba *phba) 1127 { 1128 return (*phba->lpfc_hba_down_post)(phba); 1129 } 1130 1131 /** 1132 * lpfc_hb_timeout - The HBA-timer timeout handler 1133 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1134 * 1135 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1136 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1137 * work-port-events bitmap and the worker thread is notified. This timeout 1138 * event will be used by the worker thread to invoke the actual timeout 1139 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1140 * be performed in the timeout handler and the HBA timeout event bit shall 1141 * be cleared by the worker thread after it has taken the event bitmap out. 1142 **/ 1143 static void 1144 lpfc_hb_timeout(struct timer_list *t) 1145 { 1146 struct lpfc_hba *phba; 1147 uint32_t tmo_posted; 1148 unsigned long iflag; 1149 1150 phba = from_timer(phba, t, hb_tmofunc); 1151 1152 /* Check for heart beat timeout conditions */ 1153 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1154 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1155 if (!tmo_posted) 1156 phba->pport->work_port_events |= WORKER_HB_TMO; 1157 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1158 1159 /* Tell the worker thread there is work to do */ 1160 if (!tmo_posted) 1161 lpfc_worker_wake_up(phba); 1162 return; 1163 } 1164 1165 /** 1166 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1167 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1168 * 1169 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1170 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1171 * work-port-events bitmap and the worker thread is notified. This timeout 1172 * event will be used by the worker thread to invoke the actual timeout 1173 * handler routine, lpfc_rrq_handler. Any periodical operations will 1174 * be performed in the timeout handler and the RRQ timeout event bit shall 1175 * be cleared by the worker thread after it has taken the event bitmap out. 1176 **/ 1177 static void 1178 lpfc_rrq_timeout(struct timer_list *t) 1179 { 1180 struct lpfc_hba *phba; 1181 unsigned long iflag; 1182 1183 phba = from_timer(phba, t, rrq_tmr); 1184 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1185 if (!(phba->pport->load_flag & FC_UNLOADING)) 1186 phba->hba_flag |= HBA_RRQ_ACTIVE; 1187 else 1188 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1189 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1190 1191 if (!(phba->pport->load_flag & FC_UNLOADING)) 1192 lpfc_worker_wake_up(phba); 1193 } 1194 1195 /** 1196 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1197 * @phba: pointer to lpfc hba data structure. 1198 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1199 * 1200 * This is the callback function to the lpfc heart-beat mailbox command. 1201 * If configured, the lpfc driver issues the heart-beat mailbox command to 1202 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1203 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1204 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1205 * heart-beat outstanding state. Once the mailbox command comes back and 1206 * no error conditions detected, the heart-beat mailbox command timer is 1207 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1208 * state is cleared for the next heart-beat. If the timer expired with the 1209 * heart-beat outstanding state set, the driver will put the HBA offline. 1210 **/ 1211 static void 1212 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1213 { 1214 unsigned long drvr_flag; 1215 1216 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1217 phba->hb_outstanding = 0; 1218 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1219 1220 /* Check and reset heart-beat timer is necessary */ 1221 mempool_free(pmboxq, phba->mbox_mem_pool); 1222 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1223 !(phba->link_state == LPFC_HBA_ERROR) && 1224 !(phba->pport->load_flag & FC_UNLOADING)) 1225 mod_timer(&phba->hb_tmofunc, 1226 jiffies + 1227 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1228 return; 1229 } 1230 1231 static void 1232 lpfc_hb_eq_delay_work(struct work_struct *work) 1233 { 1234 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1235 struct lpfc_hba, eq_delay_work); 1236 struct lpfc_eq_intr_info *eqi, *eqi_new; 1237 struct lpfc_queue *eq, *eq_next; 1238 unsigned char *eqcnt = NULL; 1239 uint32_t usdelay; 1240 int i; 1241 bool update = false; 1242 1243 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1244 return; 1245 1246 if (phba->link_state == LPFC_HBA_ERROR || 1247 phba->pport->fc_flag & FC_OFFLINE_MODE) 1248 goto requeue; 1249 1250 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), 1251 GFP_KERNEL); 1252 if (!eqcnt) 1253 goto requeue; 1254 1255 if (phba->cfg_irq_chann > 1) { 1256 /* Loop thru all IRQ vectors */ 1257 for (i = 0; i < phba->cfg_irq_chann; i++) { 1258 /* Get the EQ corresponding to the IRQ vector */ 1259 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1260 if (!eq) 1261 continue; 1262 if (eq->q_mode) { 1263 update = true; 1264 break; 1265 } 1266 if (eqcnt[eq->last_cpu] < 2) 1267 eqcnt[eq->last_cpu]++; 1268 } 1269 } else 1270 update = true; 1271 1272 for_each_present_cpu(i) { 1273 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1274 if (!update && eqcnt[i] < 2) { 1275 eqi->icnt = 0; 1276 continue; 1277 } 1278 1279 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * 1280 LPFC_EQ_DELAY_STEP; 1281 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1282 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1283 1284 eqi->icnt = 0; 1285 1286 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1287 if (eq->last_cpu != i) { 1288 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1289 eq->last_cpu); 1290 list_move_tail(&eq->cpu_list, &eqi_new->list); 1291 continue; 1292 } 1293 if (usdelay != eq->q_mode) 1294 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1295 usdelay); 1296 } 1297 } 1298 1299 kfree(eqcnt); 1300 1301 requeue: 1302 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1303 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1304 } 1305 1306 /** 1307 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1308 * @phba: pointer to lpfc hba data structure. 1309 * 1310 * For each heartbeat, this routine does some heuristic methods to adjust 1311 * XRI distribution. The goal is to fully utilize free XRIs. 1312 **/ 1313 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1314 { 1315 u32 i; 1316 u32 hwq_count; 1317 1318 hwq_count = phba->cfg_hdw_queue; 1319 for (i = 0; i < hwq_count; i++) { 1320 /* Adjust XRIs in private pool */ 1321 lpfc_adjust_pvt_pool_count(phba, i); 1322 1323 /* Adjust high watermark */ 1324 lpfc_adjust_high_watermark(phba, i); 1325 1326 #ifdef LPFC_MXP_STAT 1327 /* Snapshot pbl, pvt and busy count */ 1328 lpfc_snapshot_mxp(phba, i); 1329 #endif 1330 } 1331 } 1332 1333 /** 1334 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1335 * @phba: pointer to lpfc hba data structure. 1336 * 1337 * This is the actual HBA-timer timeout handler to be invoked by the worker 1338 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1339 * handler performs any periodic operations needed for the device. If such 1340 * periodic event has already been attended to either in the interrupt handler 1341 * or by processing slow-ring or fast-ring events within the HBA-timer 1342 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1343 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1344 * is configured and there is no heart-beat mailbox command outstanding, a 1345 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1346 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1347 * to offline. 1348 **/ 1349 void 1350 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1351 { 1352 struct lpfc_vport **vports; 1353 LPFC_MBOXQ_t *pmboxq; 1354 struct lpfc_dmabuf *buf_ptr; 1355 int retval, i; 1356 struct lpfc_sli *psli = &phba->sli; 1357 LIST_HEAD(completions); 1358 1359 if (phba->cfg_xri_rebalancing) { 1360 /* Multi-XRI pools handler */ 1361 lpfc_hb_mxp_handler(phba); 1362 } 1363 1364 vports = lpfc_create_vport_work_array(phba); 1365 if (vports != NULL) 1366 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1367 lpfc_rcv_seq_check_edtov(vports[i]); 1368 lpfc_fdmi_num_disc_check(vports[i]); 1369 } 1370 lpfc_destroy_vport_work_array(phba, vports); 1371 1372 if ((phba->link_state == LPFC_HBA_ERROR) || 1373 (phba->pport->load_flag & FC_UNLOADING) || 1374 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1375 return; 1376 1377 spin_lock_irq(&phba->pport->work_port_lock); 1378 1379 if (time_after(phba->last_completion_time + 1380 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1381 jiffies)) { 1382 spin_unlock_irq(&phba->pport->work_port_lock); 1383 if (!phba->hb_outstanding) 1384 mod_timer(&phba->hb_tmofunc, 1385 jiffies + 1386 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1387 else 1388 mod_timer(&phba->hb_tmofunc, 1389 jiffies + 1390 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1391 return; 1392 } 1393 spin_unlock_irq(&phba->pport->work_port_lock); 1394 1395 if (phba->elsbuf_cnt && 1396 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1397 spin_lock_irq(&phba->hbalock); 1398 list_splice_init(&phba->elsbuf, &completions); 1399 phba->elsbuf_cnt = 0; 1400 phba->elsbuf_prev_cnt = 0; 1401 spin_unlock_irq(&phba->hbalock); 1402 1403 while (!list_empty(&completions)) { 1404 list_remove_head(&completions, buf_ptr, 1405 struct lpfc_dmabuf, list); 1406 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1407 kfree(buf_ptr); 1408 } 1409 } 1410 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1411 1412 /* If there is no heart beat outstanding, issue a heartbeat command */ 1413 if (phba->cfg_enable_hba_heartbeat) { 1414 if (!phba->hb_outstanding) { 1415 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1416 (list_empty(&psli->mboxq))) { 1417 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1418 GFP_KERNEL); 1419 if (!pmboxq) { 1420 mod_timer(&phba->hb_tmofunc, 1421 jiffies + 1422 msecs_to_jiffies(1000 * 1423 LPFC_HB_MBOX_INTERVAL)); 1424 return; 1425 } 1426 1427 lpfc_heart_beat(phba, pmboxq); 1428 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1429 pmboxq->vport = phba->pport; 1430 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1431 MBX_NOWAIT); 1432 1433 if (retval != MBX_BUSY && 1434 retval != MBX_SUCCESS) { 1435 mempool_free(pmboxq, 1436 phba->mbox_mem_pool); 1437 mod_timer(&phba->hb_tmofunc, 1438 jiffies + 1439 msecs_to_jiffies(1000 * 1440 LPFC_HB_MBOX_INTERVAL)); 1441 return; 1442 } 1443 phba->skipped_hb = 0; 1444 phba->hb_outstanding = 1; 1445 } else if (time_before_eq(phba->last_completion_time, 1446 phba->skipped_hb)) { 1447 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1448 "2857 Last completion time not " 1449 " updated in %d ms\n", 1450 jiffies_to_msecs(jiffies 1451 - phba->last_completion_time)); 1452 } else 1453 phba->skipped_hb = jiffies; 1454 1455 mod_timer(&phba->hb_tmofunc, 1456 jiffies + 1457 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1458 return; 1459 } else { 1460 /* 1461 * If heart beat timeout called with hb_outstanding set 1462 * we need to give the hb mailbox cmd a chance to 1463 * complete or TMO. 1464 */ 1465 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1466 "0459 Adapter heartbeat still out" 1467 "standing:last compl time was %d ms.\n", 1468 jiffies_to_msecs(jiffies 1469 - phba->last_completion_time)); 1470 mod_timer(&phba->hb_tmofunc, 1471 jiffies + 1472 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1473 } 1474 } else { 1475 mod_timer(&phba->hb_tmofunc, 1476 jiffies + 1477 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1478 } 1479 } 1480 1481 /** 1482 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1483 * @phba: pointer to lpfc hba data structure. 1484 * 1485 * This routine is called to bring the HBA offline when HBA hardware error 1486 * other than Port Error 6 has been detected. 1487 **/ 1488 static void 1489 lpfc_offline_eratt(struct lpfc_hba *phba) 1490 { 1491 struct lpfc_sli *psli = &phba->sli; 1492 1493 spin_lock_irq(&phba->hbalock); 1494 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1495 spin_unlock_irq(&phba->hbalock); 1496 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1497 1498 lpfc_offline(phba); 1499 lpfc_reset_barrier(phba); 1500 spin_lock_irq(&phba->hbalock); 1501 lpfc_sli_brdreset(phba); 1502 spin_unlock_irq(&phba->hbalock); 1503 lpfc_hba_down_post(phba); 1504 lpfc_sli_brdready(phba, HS_MBRDY); 1505 lpfc_unblock_mgmt_io(phba); 1506 phba->link_state = LPFC_HBA_ERROR; 1507 return; 1508 } 1509 1510 /** 1511 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1512 * @phba: pointer to lpfc hba data structure. 1513 * 1514 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1515 * other than Port Error 6 has been detected. 1516 **/ 1517 void 1518 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1519 { 1520 spin_lock_irq(&phba->hbalock); 1521 phba->link_state = LPFC_HBA_ERROR; 1522 spin_unlock_irq(&phba->hbalock); 1523 1524 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1525 lpfc_sli_flush_io_rings(phba); 1526 lpfc_offline(phba); 1527 lpfc_hba_down_post(phba); 1528 lpfc_unblock_mgmt_io(phba); 1529 } 1530 1531 /** 1532 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1533 * @phba: pointer to lpfc hba data structure. 1534 * 1535 * This routine is invoked to handle the deferred HBA hardware error 1536 * conditions. This type of error is indicated by HBA by setting ER1 1537 * and another ER bit in the host status register. The driver will 1538 * wait until the ER1 bit clears before handling the error condition. 1539 **/ 1540 static void 1541 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1542 { 1543 uint32_t old_host_status = phba->work_hs; 1544 struct lpfc_sli *psli = &phba->sli; 1545 1546 /* If the pci channel is offline, ignore possible errors, 1547 * since we cannot communicate with the pci card anyway. 1548 */ 1549 if (pci_channel_offline(phba->pcidev)) { 1550 spin_lock_irq(&phba->hbalock); 1551 phba->hba_flag &= ~DEFER_ERATT; 1552 spin_unlock_irq(&phba->hbalock); 1553 return; 1554 } 1555 1556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1557 "0479 Deferred Adapter Hardware Error " 1558 "Data: x%x x%x x%x\n", 1559 phba->work_hs, 1560 phba->work_status[0], phba->work_status[1]); 1561 1562 spin_lock_irq(&phba->hbalock); 1563 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1564 spin_unlock_irq(&phba->hbalock); 1565 1566 1567 /* 1568 * Firmware stops when it triggred erratt. That could cause the I/Os 1569 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1570 * SCSI layer retry it after re-establishing link. 1571 */ 1572 lpfc_sli_abort_fcp_rings(phba); 1573 1574 /* 1575 * There was a firmware error. Take the hba offline and then 1576 * attempt to restart it. 1577 */ 1578 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1579 lpfc_offline(phba); 1580 1581 /* Wait for the ER1 bit to clear.*/ 1582 while (phba->work_hs & HS_FFER1) { 1583 msleep(100); 1584 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1585 phba->work_hs = UNPLUG_ERR ; 1586 break; 1587 } 1588 /* If driver is unloading let the worker thread continue */ 1589 if (phba->pport->load_flag & FC_UNLOADING) { 1590 phba->work_hs = 0; 1591 break; 1592 } 1593 } 1594 1595 /* 1596 * This is to ptrotect against a race condition in which 1597 * first write to the host attention register clear the 1598 * host status register. 1599 */ 1600 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1601 phba->work_hs = old_host_status & ~HS_FFER1; 1602 1603 spin_lock_irq(&phba->hbalock); 1604 phba->hba_flag &= ~DEFER_ERATT; 1605 spin_unlock_irq(&phba->hbalock); 1606 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1607 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1608 } 1609 1610 static void 1611 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1612 { 1613 struct lpfc_board_event_header board_event; 1614 struct Scsi_Host *shost; 1615 1616 board_event.event_type = FC_REG_BOARD_EVENT; 1617 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1618 shost = lpfc_shost_from_vport(phba->pport); 1619 fc_host_post_vendor_event(shost, fc_get_event_number(), 1620 sizeof(board_event), 1621 (char *) &board_event, 1622 LPFC_NL_VENDOR_ID); 1623 } 1624 1625 /** 1626 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1627 * @phba: pointer to lpfc hba data structure. 1628 * 1629 * This routine is invoked to handle the following HBA hardware error 1630 * conditions: 1631 * 1 - HBA error attention interrupt 1632 * 2 - DMA ring index out of range 1633 * 3 - Mailbox command came back as unknown 1634 **/ 1635 static void 1636 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1637 { 1638 struct lpfc_vport *vport = phba->pport; 1639 struct lpfc_sli *psli = &phba->sli; 1640 uint32_t event_data; 1641 unsigned long temperature; 1642 struct temp_event temp_event_data; 1643 struct Scsi_Host *shost; 1644 1645 /* If the pci channel is offline, ignore possible errors, 1646 * since we cannot communicate with the pci card anyway. 1647 */ 1648 if (pci_channel_offline(phba->pcidev)) { 1649 spin_lock_irq(&phba->hbalock); 1650 phba->hba_flag &= ~DEFER_ERATT; 1651 spin_unlock_irq(&phba->hbalock); 1652 return; 1653 } 1654 1655 /* If resets are disabled then leave the HBA alone and return */ 1656 if (!phba->cfg_enable_hba_reset) 1657 return; 1658 1659 /* Send an internal error event to mgmt application */ 1660 lpfc_board_errevt_to_mgmt(phba); 1661 1662 if (phba->hba_flag & DEFER_ERATT) 1663 lpfc_handle_deferred_eratt(phba); 1664 1665 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1666 if (phba->work_hs & HS_FFER6) 1667 /* Re-establishing Link */ 1668 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1669 "1301 Re-establishing Link " 1670 "Data: x%x x%x x%x\n", 1671 phba->work_hs, phba->work_status[0], 1672 phba->work_status[1]); 1673 if (phba->work_hs & HS_FFER8) 1674 /* Device Zeroization */ 1675 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1676 "2861 Host Authentication device " 1677 "zeroization Data:x%x x%x x%x\n", 1678 phba->work_hs, phba->work_status[0], 1679 phba->work_status[1]); 1680 1681 spin_lock_irq(&phba->hbalock); 1682 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1683 spin_unlock_irq(&phba->hbalock); 1684 1685 /* 1686 * Firmware stops when it triggled erratt with HS_FFER6. 1687 * That could cause the I/Os dropped by the firmware. 1688 * Error iocb (I/O) on txcmplq and let the SCSI layer 1689 * retry it after re-establishing link. 1690 */ 1691 lpfc_sli_abort_fcp_rings(phba); 1692 1693 /* 1694 * There was a firmware error. Take the hba offline and then 1695 * attempt to restart it. 1696 */ 1697 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1698 lpfc_offline(phba); 1699 lpfc_sli_brdrestart(phba); 1700 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1701 lpfc_unblock_mgmt_io(phba); 1702 return; 1703 } 1704 lpfc_unblock_mgmt_io(phba); 1705 } else if (phba->work_hs & HS_CRIT_TEMP) { 1706 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1707 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1708 temp_event_data.event_code = LPFC_CRIT_TEMP; 1709 temp_event_data.data = (uint32_t)temperature; 1710 1711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1712 "0406 Adapter maximum temperature exceeded " 1713 "(%ld), taking this port offline " 1714 "Data: x%x x%x x%x\n", 1715 temperature, phba->work_hs, 1716 phba->work_status[0], phba->work_status[1]); 1717 1718 shost = lpfc_shost_from_vport(phba->pport); 1719 fc_host_post_vendor_event(shost, fc_get_event_number(), 1720 sizeof(temp_event_data), 1721 (char *) &temp_event_data, 1722 SCSI_NL_VID_TYPE_PCI 1723 | PCI_VENDOR_ID_EMULEX); 1724 1725 spin_lock_irq(&phba->hbalock); 1726 phba->over_temp_state = HBA_OVER_TEMP; 1727 spin_unlock_irq(&phba->hbalock); 1728 lpfc_offline_eratt(phba); 1729 1730 } else { 1731 /* The if clause above forces this code path when the status 1732 * failure is a value other than FFER6. Do not call the offline 1733 * twice. This is the adapter hardware error path. 1734 */ 1735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1736 "0457 Adapter Hardware Error " 1737 "Data: x%x x%x x%x\n", 1738 phba->work_hs, 1739 phba->work_status[0], phba->work_status[1]); 1740 1741 event_data = FC_REG_DUMP_EVENT; 1742 shost = lpfc_shost_from_vport(vport); 1743 fc_host_post_vendor_event(shost, fc_get_event_number(), 1744 sizeof(event_data), (char *) &event_data, 1745 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1746 1747 lpfc_offline_eratt(phba); 1748 } 1749 return; 1750 } 1751 1752 /** 1753 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1754 * @phba: pointer to lpfc hba data structure. 1755 * @mbx_action: flag for mailbox shutdown action. 1756 * 1757 * This routine is invoked to perform an SLI4 port PCI function reset in 1758 * response to port status register polling attention. It waits for port 1759 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1760 * During this process, interrupt vectors are freed and later requested 1761 * for handling possible port resource change. 1762 **/ 1763 static int 1764 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1765 bool en_rn_msg) 1766 { 1767 int rc; 1768 uint32_t intr_mode; 1769 1770 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1771 LPFC_SLI_INTF_IF_TYPE_2) { 1772 /* 1773 * On error status condition, driver need to wait for port 1774 * ready before performing reset. 1775 */ 1776 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1777 if (rc) 1778 return rc; 1779 } 1780 1781 /* need reset: attempt for port recovery */ 1782 if (en_rn_msg) 1783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1784 "2887 Reset Needed: Attempting Port " 1785 "Recovery...\n"); 1786 lpfc_offline_prep(phba, mbx_action); 1787 lpfc_sli_flush_io_rings(phba); 1788 lpfc_offline(phba); 1789 /* release interrupt for possible resource change */ 1790 lpfc_sli4_disable_intr(phba); 1791 rc = lpfc_sli_brdrestart(phba); 1792 if (rc) { 1793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1794 "6309 Failed to restart board\n"); 1795 return rc; 1796 } 1797 /* request and enable interrupt */ 1798 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1799 if (intr_mode == LPFC_INTR_ERROR) { 1800 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1801 "3175 Failed to enable interrupt\n"); 1802 return -EIO; 1803 } 1804 phba->intr_mode = intr_mode; 1805 rc = lpfc_online(phba); 1806 if (rc == 0) 1807 lpfc_unblock_mgmt_io(phba); 1808 1809 return rc; 1810 } 1811 1812 /** 1813 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1814 * @phba: pointer to lpfc hba data structure. 1815 * 1816 * This routine is invoked to handle the SLI4 HBA hardware error attention 1817 * conditions. 1818 **/ 1819 static void 1820 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1821 { 1822 struct lpfc_vport *vport = phba->pport; 1823 uint32_t event_data; 1824 struct Scsi_Host *shost; 1825 uint32_t if_type; 1826 struct lpfc_register portstat_reg = {0}; 1827 uint32_t reg_err1, reg_err2; 1828 uint32_t uerrlo_reg, uemasklo_reg; 1829 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1830 bool en_rn_msg = true; 1831 struct temp_event temp_event_data; 1832 struct lpfc_register portsmphr_reg; 1833 int rc, i; 1834 1835 /* If the pci channel is offline, ignore possible errors, since 1836 * we cannot communicate with the pci card anyway. 1837 */ 1838 if (pci_channel_offline(phba->pcidev)) { 1839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1840 "3166 pci channel is offline\n"); 1841 lpfc_sli4_offline_eratt(phba); 1842 return; 1843 } 1844 1845 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1846 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1847 switch (if_type) { 1848 case LPFC_SLI_INTF_IF_TYPE_0: 1849 pci_rd_rc1 = lpfc_readl( 1850 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1851 &uerrlo_reg); 1852 pci_rd_rc2 = lpfc_readl( 1853 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1854 &uemasklo_reg); 1855 /* consider PCI bus read error as pci_channel_offline */ 1856 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1857 return; 1858 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1859 lpfc_sli4_offline_eratt(phba); 1860 return; 1861 } 1862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1863 "7623 Checking UE recoverable"); 1864 1865 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1866 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1867 &portsmphr_reg.word0)) 1868 continue; 1869 1870 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1871 &portsmphr_reg); 1872 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1873 LPFC_PORT_SEM_UE_RECOVERABLE) 1874 break; 1875 /*Sleep for 1Sec, before checking SEMAPHORE */ 1876 msleep(1000); 1877 } 1878 1879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1880 "4827 smphr_port_status x%x : Waited %dSec", 1881 smphr_port_status, i); 1882 1883 /* Recoverable UE, reset the HBA device */ 1884 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1885 LPFC_PORT_SEM_UE_RECOVERABLE) { 1886 for (i = 0; i < 20; i++) { 1887 msleep(1000); 1888 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1889 &portsmphr_reg.word0) && 1890 (LPFC_POST_STAGE_PORT_READY == 1891 bf_get(lpfc_port_smphr_port_status, 1892 &portsmphr_reg))) { 1893 rc = lpfc_sli4_port_sta_fn_reset(phba, 1894 LPFC_MBX_NO_WAIT, en_rn_msg); 1895 if (rc == 0) 1896 return; 1897 lpfc_printf_log(phba, 1898 KERN_ERR, LOG_INIT, 1899 "4215 Failed to recover UE"); 1900 break; 1901 } 1902 } 1903 } 1904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1905 "7624 Firmware not ready: Failing UE recovery," 1906 " waited %dSec", i); 1907 phba->link_state = LPFC_HBA_ERROR; 1908 break; 1909 1910 case LPFC_SLI_INTF_IF_TYPE_2: 1911 case LPFC_SLI_INTF_IF_TYPE_6: 1912 pci_rd_rc1 = lpfc_readl( 1913 phba->sli4_hba.u.if_type2.STATUSregaddr, 1914 &portstat_reg.word0); 1915 /* consider PCI bus read error as pci_channel_offline */ 1916 if (pci_rd_rc1 == -EIO) { 1917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1918 "3151 PCI bus read access failure: x%x\n", 1919 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1920 lpfc_sli4_offline_eratt(phba); 1921 return; 1922 } 1923 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1924 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1925 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1927 "2889 Port Overtemperature event, " 1928 "taking port offline Data: x%x x%x\n", 1929 reg_err1, reg_err2); 1930 1931 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1932 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1933 temp_event_data.event_code = LPFC_CRIT_TEMP; 1934 temp_event_data.data = 0xFFFFFFFF; 1935 1936 shost = lpfc_shost_from_vport(phba->pport); 1937 fc_host_post_vendor_event(shost, fc_get_event_number(), 1938 sizeof(temp_event_data), 1939 (char *)&temp_event_data, 1940 SCSI_NL_VID_TYPE_PCI 1941 | PCI_VENDOR_ID_EMULEX); 1942 1943 spin_lock_irq(&phba->hbalock); 1944 phba->over_temp_state = HBA_OVER_TEMP; 1945 spin_unlock_irq(&phba->hbalock); 1946 lpfc_sli4_offline_eratt(phba); 1947 return; 1948 } 1949 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1950 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1952 "3143 Port Down: Firmware Update " 1953 "Detected\n"); 1954 en_rn_msg = false; 1955 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1956 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1958 "3144 Port Down: Debug Dump\n"); 1959 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1960 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1962 "3145 Port Down: Provisioning\n"); 1963 1964 /* If resets are disabled then leave the HBA alone and return */ 1965 if (!phba->cfg_enable_hba_reset) 1966 return; 1967 1968 /* Check port status register for function reset */ 1969 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1970 en_rn_msg); 1971 if (rc == 0) { 1972 /* don't report event on forced debug dump */ 1973 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1974 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1975 return; 1976 else 1977 break; 1978 } 1979 /* fall through for not able to recover */ 1980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1981 "3152 Unrecoverable error\n"); 1982 phba->link_state = LPFC_HBA_ERROR; 1983 break; 1984 case LPFC_SLI_INTF_IF_TYPE_1: 1985 default: 1986 break; 1987 } 1988 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1989 "3123 Report dump event to upper layer\n"); 1990 /* Send an internal error event to mgmt application */ 1991 lpfc_board_errevt_to_mgmt(phba); 1992 1993 event_data = FC_REG_DUMP_EVENT; 1994 shost = lpfc_shost_from_vport(vport); 1995 fc_host_post_vendor_event(shost, fc_get_event_number(), 1996 sizeof(event_data), (char *) &event_data, 1997 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1998 } 1999 2000 /** 2001 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2002 * @phba: pointer to lpfc HBA data structure. 2003 * 2004 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2005 * routine from the API jump table function pointer from the lpfc_hba struct. 2006 * 2007 * Return codes 2008 * 0 - success. 2009 * Any other value - error. 2010 **/ 2011 void 2012 lpfc_handle_eratt(struct lpfc_hba *phba) 2013 { 2014 (*phba->lpfc_handle_eratt)(phba); 2015 } 2016 2017 /** 2018 * lpfc_handle_latt - The HBA link event handler 2019 * @phba: pointer to lpfc hba data structure. 2020 * 2021 * This routine is invoked from the worker thread to handle a HBA host 2022 * attention link event. SLI3 only. 2023 **/ 2024 void 2025 lpfc_handle_latt(struct lpfc_hba *phba) 2026 { 2027 struct lpfc_vport *vport = phba->pport; 2028 struct lpfc_sli *psli = &phba->sli; 2029 LPFC_MBOXQ_t *pmb; 2030 volatile uint32_t control; 2031 struct lpfc_dmabuf *mp; 2032 int rc = 0; 2033 2034 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2035 if (!pmb) { 2036 rc = 1; 2037 goto lpfc_handle_latt_err_exit; 2038 } 2039 2040 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2041 if (!mp) { 2042 rc = 2; 2043 goto lpfc_handle_latt_free_pmb; 2044 } 2045 2046 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2047 if (!mp->virt) { 2048 rc = 3; 2049 goto lpfc_handle_latt_free_mp; 2050 } 2051 2052 /* Cleanup any outstanding ELS commands */ 2053 lpfc_els_flush_all_cmd(phba); 2054 2055 psli->slistat.link_event++; 2056 lpfc_read_topology(phba, pmb, mp); 2057 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2058 pmb->vport = vport; 2059 /* Block ELS IOCBs until we have processed this mbox command */ 2060 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2061 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2062 if (rc == MBX_NOT_FINISHED) { 2063 rc = 4; 2064 goto lpfc_handle_latt_free_mbuf; 2065 } 2066 2067 /* Clear Link Attention in HA REG */ 2068 spin_lock_irq(&phba->hbalock); 2069 writel(HA_LATT, phba->HAregaddr); 2070 readl(phba->HAregaddr); /* flush */ 2071 spin_unlock_irq(&phba->hbalock); 2072 2073 return; 2074 2075 lpfc_handle_latt_free_mbuf: 2076 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2077 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2078 lpfc_handle_latt_free_mp: 2079 kfree(mp); 2080 lpfc_handle_latt_free_pmb: 2081 mempool_free(pmb, phba->mbox_mem_pool); 2082 lpfc_handle_latt_err_exit: 2083 /* Enable Link attention interrupts */ 2084 spin_lock_irq(&phba->hbalock); 2085 psli->sli_flag |= LPFC_PROCESS_LA; 2086 control = readl(phba->HCregaddr); 2087 control |= HC_LAINT_ENA; 2088 writel(control, phba->HCregaddr); 2089 readl(phba->HCregaddr); /* flush */ 2090 2091 /* Clear Link Attention in HA REG */ 2092 writel(HA_LATT, phba->HAregaddr); 2093 readl(phba->HAregaddr); /* flush */ 2094 spin_unlock_irq(&phba->hbalock); 2095 lpfc_linkdown(phba); 2096 phba->link_state = LPFC_HBA_ERROR; 2097 2098 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2099 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2100 2101 return; 2102 } 2103 2104 /** 2105 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2106 * @phba: pointer to lpfc hba data structure. 2107 * @vpd: pointer to the vital product data. 2108 * @len: length of the vital product data in bytes. 2109 * 2110 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2111 * an array of characters. In this routine, the ModelName, ProgramType, and 2112 * ModelDesc, etc. fields of the phba data structure will be populated. 2113 * 2114 * Return codes 2115 * 0 - pointer to the VPD passed in is NULL 2116 * 1 - success 2117 **/ 2118 int 2119 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2120 { 2121 uint8_t lenlo, lenhi; 2122 int Length; 2123 int i, j; 2124 int finished = 0; 2125 int index = 0; 2126 2127 if (!vpd) 2128 return 0; 2129 2130 /* Vital Product */ 2131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2132 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2133 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2134 (uint32_t) vpd[3]); 2135 while (!finished && (index < (len - 4))) { 2136 switch (vpd[index]) { 2137 case 0x82: 2138 case 0x91: 2139 index += 1; 2140 lenlo = vpd[index]; 2141 index += 1; 2142 lenhi = vpd[index]; 2143 index += 1; 2144 i = ((((unsigned short)lenhi) << 8) + lenlo); 2145 index += i; 2146 break; 2147 case 0x90: 2148 index += 1; 2149 lenlo = vpd[index]; 2150 index += 1; 2151 lenhi = vpd[index]; 2152 index += 1; 2153 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2154 if (Length > len - index) 2155 Length = len - index; 2156 while (Length > 0) { 2157 /* Look for Serial Number */ 2158 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2159 index += 2; 2160 i = vpd[index]; 2161 index += 1; 2162 j = 0; 2163 Length -= (3+i); 2164 while(i--) { 2165 phba->SerialNumber[j++] = vpd[index++]; 2166 if (j == 31) 2167 break; 2168 } 2169 phba->SerialNumber[j] = 0; 2170 continue; 2171 } 2172 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2173 phba->vpd_flag |= VPD_MODEL_DESC; 2174 index += 2; 2175 i = vpd[index]; 2176 index += 1; 2177 j = 0; 2178 Length -= (3+i); 2179 while(i--) { 2180 phba->ModelDesc[j++] = vpd[index++]; 2181 if (j == 255) 2182 break; 2183 } 2184 phba->ModelDesc[j] = 0; 2185 continue; 2186 } 2187 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2188 phba->vpd_flag |= VPD_MODEL_NAME; 2189 index += 2; 2190 i = vpd[index]; 2191 index += 1; 2192 j = 0; 2193 Length -= (3+i); 2194 while(i--) { 2195 phba->ModelName[j++] = vpd[index++]; 2196 if (j == 79) 2197 break; 2198 } 2199 phba->ModelName[j] = 0; 2200 continue; 2201 } 2202 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2203 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2204 index += 2; 2205 i = vpd[index]; 2206 index += 1; 2207 j = 0; 2208 Length -= (3+i); 2209 while(i--) { 2210 phba->ProgramType[j++] = vpd[index++]; 2211 if (j == 255) 2212 break; 2213 } 2214 phba->ProgramType[j] = 0; 2215 continue; 2216 } 2217 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2218 phba->vpd_flag |= VPD_PORT; 2219 index += 2; 2220 i = vpd[index]; 2221 index += 1; 2222 j = 0; 2223 Length -= (3+i); 2224 while(i--) { 2225 if ((phba->sli_rev == LPFC_SLI_REV4) && 2226 (phba->sli4_hba.pport_name_sta == 2227 LPFC_SLI4_PPNAME_GET)) { 2228 j++; 2229 index++; 2230 } else 2231 phba->Port[j++] = vpd[index++]; 2232 if (j == 19) 2233 break; 2234 } 2235 if ((phba->sli_rev != LPFC_SLI_REV4) || 2236 (phba->sli4_hba.pport_name_sta == 2237 LPFC_SLI4_PPNAME_NON)) 2238 phba->Port[j] = 0; 2239 continue; 2240 } 2241 else { 2242 index += 2; 2243 i = vpd[index]; 2244 index += 1; 2245 index += i; 2246 Length -= (3 + i); 2247 } 2248 } 2249 finished = 0; 2250 break; 2251 case 0x78: 2252 finished = 1; 2253 break; 2254 default: 2255 index ++; 2256 break; 2257 } 2258 } 2259 2260 return(1); 2261 } 2262 2263 /** 2264 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2265 * @phba: pointer to lpfc hba data structure. 2266 * @mdp: pointer to the data structure to hold the derived model name. 2267 * @descp: pointer to the data structure to hold the derived description. 2268 * 2269 * This routine retrieves HBA's description based on its registered PCI device 2270 * ID. The @descp passed into this function points to an array of 256 chars. It 2271 * shall be returned with the model name, maximum speed, and the host bus type. 2272 * The @mdp passed into this function points to an array of 80 chars. When the 2273 * function returns, the @mdp will be filled with the model name. 2274 **/ 2275 static void 2276 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2277 { 2278 lpfc_vpd_t *vp; 2279 uint16_t dev_id = phba->pcidev->device; 2280 int max_speed; 2281 int GE = 0; 2282 int oneConnect = 0; /* default is not a oneConnect */ 2283 struct { 2284 char *name; 2285 char *bus; 2286 char *function; 2287 } m = {"<Unknown>", "", ""}; 2288 2289 if (mdp && mdp[0] != '\0' 2290 && descp && descp[0] != '\0') 2291 return; 2292 2293 if (phba->lmt & LMT_64Gb) 2294 max_speed = 64; 2295 else if (phba->lmt & LMT_32Gb) 2296 max_speed = 32; 2297 else if (phba->lmt & LMT_16Gb) 2298 max_speed = 16; 2299 else if (phba->lmt & LMT_10Gb) 2300 max_speed = 10; 2301 else if (phba->lmt & LMT_8Gb) 2302 max_speed = 8; 2303 else if (phba->lmt & LMT_4Gb) 2304 max_speed = 4; 2305 else if (phba->lmt & LMT_2Gb) 2306 max_speed = 2; 2307 else if (phba->lmt & LMT_1Gb) 2308 max_speed = 1; 2309 else 2310 max_speed = 0; 2311 2312 vp = &phba->vpd; 2313 2314 switch (dev_id) { 2315 case PCI_DEVICE_ID_FIREFLY: 2316 m = (typeof(m)){"LP6000", "PCI", 2317 "Obsolete, Unsupported Fibre Channel Adapter"}; 2318 break; 2319 case PCI_DEVICE_ID_SUPERFLY: 2320 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2321 m = (typeof(m)){"LP7000", "PCI", ""}; 2322 else 2323 m = (typeof(m)){"LP7000E", "PCI", ""}; 2324 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2325 break; 2326 case PCI_DEVICE_ID_DRAGONFLY: 2327 m = (typeof(m)){"LP8000", "PCI", 2328 "Obsolete, Unsupported Fibre Channel Adapter"}; 2329 break; 2330 case PCI_DEVICE_ID_CENTAUR: 2331 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2332 m = (typeof(m)){"LP9002", "PCI", ""}; 2333 else 2334 m = (typeof(m)){"LP9000", "PCI", ""}; 2335 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2336 break; 2337 case PCI_DEVICE_ID_RFLY: 2338 m = (typeof(m)){"LP952", "PCI", 2339 "Obsolete, Unsupported Fibre Channel Adapter"}; 2340 break; 2341 case PCI_DEVICE_ID_PEGASUS: 2342 m = (typeof(m)){"LP9802", "PCI-X", 2343 "Obsolete, Unsupported Fibre Channel Adapter"}; 2344 break; 2345 case PCI_DEVICE_ID_THOR: 2346 m = (typeof(m)){"LP10000", "PCI-X", 2347 "Obsolete, Unsupported Fibre Channel Adapter"}; 2348 break; 2349 case PCI_DEVICE_ID_VIPER: 2350 m = (typeof(m)){"LPX1000", "PCI-X", 2351 "Obsolete, Unsupported Fibre Channel Adapter"}; 2352 break; 2353 case PCI_DEVICE_ID_PFLY: 2354 m = (typeof(m)){"LP982", "PCI-X", 2355 "Obsolete, Unsupported Fibre Channel Adapter"}; 2356 break; 2357 case PCI_DEVICE_ID_TFLY: 2358 m = (typeof(m)){"LP1050", "PCI-X", 2359 "Obsolete, Unsupported Fibre Channel Adapter"}; 2360 break; 2361 case PCI_DEVICE_ID_HELIOS: 2362 m = (typeof(m)){"LP11000", "PCI-X2", 2363 "Obsolete, Unsupported Fibre Channel Adapter"}; 2364 break; 2365 case PCI_DEVICE_ID_HELIOS_SCSP: 2366 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_HELIOS_DCSP: 2370 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2371 "Obsolete, Unsupported Fibre Channel Adapter"}; 2372 break; 2373 case PCI_DEVICE_ID_NEPTUNE: 2374 m = (typeof(m)){"LPe1000", "PCIe", 2375 "Obsolete, Unsupported Fibre Channel Adapter"}; 2376 break; 2377 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2378 m = (typeof(m)){"LPe1000-SP", "PCIe", 2379 "Obsolete, Unsupported Fibre Channel Adapter"}; 2380 break; 2381 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2382 m = (typeof(m)){"LPe1002-SP", "PCIe", 2383 "Obsolete, Unsupported Fibre Channel Adapter"}; 2384 break; 2385 case PCI_DEVICE_ID_BMID: 2386 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2387 break; 2388 case PCI_DEVICE_ID_BSMB: 2389 m = (typeof(m)){"LP111", "PCI-X2", 2390 "Obsolete, Unsupported Fibre Channel Adapter"}; 2391 break; 2392 case PCI_DEVICE_ID_ZEPHYR: 2393 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2394 break; 2395 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2396 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2397 break; 2398 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2399 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2400 GE = 1; 2401 break; 2402 case PCI_DEVICE_ID_ZMID: 2403 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2404 break; 2405 case PCI_DEVICE_ID_ZSMB: 2406 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2407 break; 2408 case PCI_DEVICE_ID_LP101: 2409 m = (typeof(m)){"LP101", "PCI-X", 2410 "Obsolete, Unsupported Fibre Channel Adapter"}; 2411 break; 2412 case PCI_DEVICE_ID_LP10000S: 2413 m = (typeof(m)){"LP10000-S", "PCI", 2414 "Obsolete, Unsupported Fibre Channel Adapter"}; 2415 break; 2416 case PCI_DEVICE_ID_LP11000S: 2417 m = (typeof(m)){"LP11000-S", "PCI-X2", 2418 "Obsolete, Unsupported Fibre Channel Adapter"}; 2419 break; 2420 case PCI_DEVICE_ID_LPE11000S: 2421 m = (typeof(m)){"LPe11000-S", "PCIe", 2422 "Obsolete, Unsupported Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_SAT: 2425 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2426 break; 2427 case PCI_DEVICE_ID_SAT_MID: 2428 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2429 break; 2430 case PCI_DEVICE_ID_SAT_SMB: 2431 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2432 break; 2433 case PCI_DEVICE_ID_SAT_DCSP: 2434 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2435 break; 2436 case PCI_DEVICE_ID_SAT_SCSP: 2437 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2438 break; 2439 case PCI_DEVICE_ID_SAT_S: 2440 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2441 break; 2442 case PCI_DEVICE_ID_HORNET: 2443 m = (typeof(m)){"LP21000", "PCIe", 2444 "Obsolete, Unsupported FCoE Adapter"}; 2445 GE = 1; 2446 break; 2447 case PCI_DEVICE_ID_PROTEUS_VF: 2448 m = (typeof(m)){"LPev12000", "PCIe IOV", 2449 "Obsolete, Unsupported Fibre Channel Adapter"}; 2450 break; 2451 case PCI_DEVICE_ID_PROTEUS_PF: 2452 m = (typeof(m)){"LPev12000", "PCIe IOV", 2453 "Obsolete, Unsupported Fibre Channel Adapter"}; 2454 break; 2455 case PCI_DEVICE_ID_PROTEUS_S: 2456 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2457 "Obsolete, Unsupported Fibre Channel Adapter"}; 2458 break; 2459 case PCI_DEVICE_ID_TIGERSHARK: 2460 oneConnect = 1; 2461 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2462 break; 2463 case PCI_DEVICE_ID_TOMCAT: 2464 oneConnect = 1; 2465 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2466 break; 2467 case PCI_DEVICE_ID_FALCON: 2468 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2469 "EmulexSecure Fibre"}; 2470 break; 2471 case PCI_DEVICE_ID_BALIUS: 2472 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2473 "Obsolete, Unsupported Fibre Channel Adapter"}; 2474 break; 2475 case PCI_DEVICE_ID_LANCER_FC: 2476 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2477 break; 2478 case PCI_DEVICE_ID_LANCER_FC_VF: 2479 m = (typeof(m)){"LPe16000", "PCIe", 2480 "Obsolete, Unsupported Fibre Channel Adapter"}; 2481 break; 2482 case PCI_DEVICE_ID_LANCER_FCOE: 2483 oneConnect = 1; 2484 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2485 break; 2486 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2487 oneConnect = 1; 2488 m = (typeof(m)){"OCe15100", "PCIe", 2489 "Obsolete, Unsupported FCoE"}; 2490 break; 2491 case PCI_DEVICE_ID_LANCER_G6_FC: 2492 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2493 break; 2494 case PCI_DEVICE_ID_LANCER_G7_FC: 2495 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2496 break; 2497 case PCI_DEVICE_ID_SKYHAWK: 2498 case PCI_DEVICE_ID_SKYHAWK_VF: 2499 oneConnect = 1; 2500 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2501 break; 2502 default: 2503 m = (typeof(m)){"Unknown", "", ""}; 2504 break; 2505 } 2506 2507 if (mdp && mdp[0] == '\0') 2508 snprintf(mdp, 79,"%s", m.name); 2509 /* 2510 * oneConnect hba requires special processing, they are all initiators 2511 * and we put the port number on the end 2512 */ 2513 if (descp && descp[0] == '\0') { 2514 if (oneConnect) 2515 snprintf(descp, 255, 2516 "Emulex OneConnect %s, %s Initiator %s", 2517 m.name, m.function, 2518 phba->Port); 2519 else if (max_speed == 0) 2520 snprintf(descp, 255, 2521 "Emulex %s %s %s", 2522 m.name, m.bus, m.function); 2523 else 2524 snprintf(descp, 255, 2525 "Emulex %s %d%s %s %s", 2526 m.name, max_speed, (GE) ? "GE" : "Gb", 2527 m.bus, m.function); 2528 } 2529 } 2530 2531 /** 2532 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2533 * @phba: pointer to lpfc hba data structure. 2534 * @pring: pointer to a IOCB ring. 2535 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2536 * 2537 * This routine posts a given number of IOCBs with the associated DMA buffer 2538 * descriptors specified by the cnt argument to the given IOCB ring. 2539 * 2540 * Return codes 2541 * The number of IOCBs NOT able to be posted to the IOCB ring. 2542 **/ 2543 int 2544 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2545 { 2546 IOCB_t *icmd; 2547 struct lpfc_iocbq *iocb; 2548 struct lpfc_dmabuf *mp1, *mp2; 2549 2550 cnt += pring->missbufcnt; 2551 2552 /* While there are buffers to post */ 2553 while (cnt > 0) { 2554 /* Allocate buffer for command iocb */ 2555 iocb = lpfc_sli_get_iocbq(phba); 2556 if (iocb == NULL) { 2557 pring->missbufcnt = cnt; 2558 return cnt; 2559 } 2560 icmd = &iocb->iocb; 2561 2562 /* 2 buffers can be posted per command */ 2563 /* Allocate buffer to post */ 2564 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2565 if (mp1) 2566 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2567 if (!mp1 || !mp1->virt) { 2568 kfree(mp1); 2569 lpfc_sli_release_iocbq(phba, iocb); 2570 pring->missbufcnt = cnt; 2571 return cnt; 2572 } 2573 2574 INIT_LIST_HEAD(&mp1->list); 2575 /* Allocate buffer to post */ 2576 if (cnt > 1) { 2577 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2578 if (mp2) 2579 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2580 &mp2->phys); 2581 if (!mp2 || !mp2->virt) { 2582 kfree(mp2); 2583 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2584 kfree(mp1); 2585 lpfc_sli_release_iocbq(phba, iocb); 2586 pring->missbufcnt = cnt; 2587 return cnt; 2588 } 2589 2590 INIT_LIST_HEAD(&mp2->list); 2591 } else { 2592 mp2 = NULL; 2593 } 2594 2595 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2596 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2597 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2598 icmd->ulpBdeCount = 1; 2599 cnt--; 2600 if (mp2) { 2601 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2602 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2603 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2604 cnt--; 2605 icmd->ulpBdeCount = 2; 2606 } 2607 2608 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2609 icmd->ulpLe = 1; 2610 2611 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2612 IOCB_ERROR) { 2613 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2614 kfree(mp1); 2615 cnt++; 2616 if (mp2) { 2617 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2618 kfree(mp2); 2619 cnt++; 2620 } 2621 lpfc_sli_release_iocbq(phba, iocb); 2622 pring->missbufcnt = cnt; 2623 return cnt; 2624 } 2625 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2626 if (mp2) 2627 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2628 } 2629 pring->missbufcnt = 0; 2630 return 0; 2631 } 2632 2633 /** 2634 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2635 * @phba: pointer to lpfc hba data structure. 2636 * 2637 * This routine posts initial receive IOCB buffers to the ELS ring. The 2638 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2639 * set to 64 IOCBs. SLI3 only. 2640 * 2641 * Return codes 2642 * 0 - success (currently always success) 2643 **/ 2644 static int 2645 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2646 { 2647 struct lpfc_sli *psli = &phba->sli; 2648 2649 /* Ring 0, ELS / CT buffers */ 2650 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2651 /* Ring 2 - FCP no buffers needed */ 2652 2653 return 0; 2654 } 2655 2656 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2657 2658 /** 2659 * lpfc_sha_init - Set up initial array of hash table entries 2660 * @HashResultPointer: pointer to an array as hash table. 2661 * 2662 * This routine sets up the initial values to the array of hash table entries 2663 * for the LC HBAs. 2664 **/ 2665 static void 2666 lpfc_sha_init(uint32_t * HashResultPointer) 2667 { 2668 HashResultPointer[0] = 0x67452301; 2669 HashResultPointer[1] = 0xEFCDAB89; 2670 HashResultPointer[2] = 0x98BADCFE; 2671 HashResultPointer[3] = 0x10325476; 2672 HashResultPointer[4] = 0xC3D2E1F0; 2673 } 2674 2675 /** 2676 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2677 * @HashResultPointer: pointer to an initial/result hash table. 2678 * @HashWorkingPointer: pointer to an working hash table. 2679 * 2680 * This routine iterates an initial hash table pointed by @HashResultPointer 2681 * with the values from the working hash table pointeed by @HashWorkingPointer. 2682 * The results are putting back to the initial hash table, returned through 2683 * the @HashResultPointer as the result hash table. 2684 **/ 2685 static void 2686 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2687 { 2688 int t; 2689 uint32_t TEMP; 2690 uint32_t A, B, C, D, E; 2691 t = 16; 2692 do { 2693 HashWorkingPointer[t] = 2694 S(1, 2695 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2696 8] ^ 2697 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2698 } while (++t <= 79); 2699 t = 0; 2700 A = HashResultPointer[0]; 2701 B = HashResultPointer[1]; 2702 C = HashResultPointer[2]; 2703 D = HashResultPointer[3]; 2704 E = HashResultPointer[4]; 2705 2706 do { 2707 if (t < 20) { 2708 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2709 } else if (t < 40) { 2710 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2711 } else if (t < 60) { 2712 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2713 } else { 2714 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2715 } 2716 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2717 E = D; 2718 D = C; 2719 C = S(30, B); 2720 B = A; 2721 A = TEMP; 2722 } while (++t <= 79); 2723 2724 HashResultPointer[0] += A; 2725 HashResultPointer[1] += B; 2726 HashResultPointer[2] += C; 2727 HashResultPointer[3] += D; 2728 HashResultPointer[4] += E; 2729 2730 } 2731 2732 /** 2733 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2734 * @RandomChallenge: pointer to the entry of host challenge random number array. 2735 * @HashWorking: pointer to the entry of the working hash array. 2736 * 2737 * This routine calculates the working hash array referred by @HashWorking 2738 * from the challenge random numbers associated with the host, referred by 2739 * @RandomChallenge. The result is put into the entry of the working hash 2740 * array and returned by reference through @HashWorking. 2741 **/ 2742 static void 2743 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2744 { 2745 *HashWorking = (*RandomChallenge ^ *HashWorking); 2746 } 2747 2748 /** 2749 * lpfc_hba_init - Perform special handling for LC HBA initialization 2750 * @phba: pointer to lpfc hba data structure. 2751 * @hbainit: pointer to an array of unsigned 32-bit integers. 2752 * 2753 * This routine performs the special handling for LC HBA initialization. 2754 **/ 2755 void 2756 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2757 { 2758 int t; 2759 uint32_t *HashWorking; 2760 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2761 2762 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2763 if (!HashWorking) 2764 return; 2765 2766 HashWorking[0] = HashWorking[78] = *pwwnn++; 2767 HashWorking[1] = HashWorking[79] = *pwwnn; 2768 2769 for (t = 0; t < 7; t++) 2770 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2771 2772 lpfc_sha_init(hbainit); 2773 lpfc_sha_iterate(hbainit, HashWorking); 2774 kfree(HashWorking); 2775 } 2776 2777 /** 2778 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2779 * @vport: pointer to a virtual N_Port data structure. 2780 * 2781 * This routine performs the necessary cleanups before deleting the @vport. 2782 * It invokes the discovery state machine to perform necessary state 2783 * transitions and to release the ndlps associated with the @vport. Note, 2784 * the physical port is treated as @vport 0. 2785 **/ 2786 void 2787 lpfc_cleanup(struct lpfc_vport *vport) 2788 { 2789 struct lpfc_hba *phba = vport->phba; 2790 struct lpfc_nodelist *ndlp, *next_ndlp; 2791 int i = 0; 2792 2793 if (phba->link_state > LPFC_LINK_DOWN) 2794 lpfc_port_link_failure(vport); 2795 2796 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2797 if (!NLP_CHK_NODE_ACT(ndlp)) { 2798 ndlp = lpfc_enable_node(vport, ndlp, 2799 NLP_STE_UNUSED_NODE); 2800 if (!ndlp) 2801 continue; 2802 spin_lock_irq(&phba->ndlp_lock); 2803 NLP_SET_FREE_REQ(ndlp); 2804 spin_unlock_irq(&phba->ndlp_lock); 2805 /* Trigger the release of the ndlp memory */ 2806 lpfc_nlp_put(ndlp); 2807 continue; 2808 } 2809 spin_lock_irq(&phba->ndlp_lock); 2810 if (NLP_CHK_FREE_REQ(ndlp)) { 2811 /* The ndlp should not be in memory free mode already */ 2812 spin_unlock_irq(&phba->ndlp_lock); 2813 continue; 2814 } else 2815 /* Indicate request for freeing ndlp memory */ 2816 NLP_SET_FREE_REQ(ndlp); 2817 spin_unlock_irq(&phba->ndlp_lock); 2818 2819 if (vport->port_type != LPFC_PHYSICAL_PORT && 2820 ndlp->nlp_DID == Fabric_DID) { 2821 /* Just free up ndlp with Fabric_DID for vports */ 2822 lpfc_nlp_put(ndlp); 2823 continue; 2824 } 2825 2826 /* take care of nodes in unused state before the state 2827 * machine taking action. 2828 */ 2829 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2830 lpfc_nlp_put(ndlp); 2831 continue; 2832 } 2833 2834 if (ndlp->nlp_type & NLP_FABRIC) 2835 lpfc_disc_state_machine(vport, ndlp, NULL, 2836 NLP_EVT_DEVICE_RECOVERY); 2837 2838 lpfc_disc_state_machine(vport, ndlp, NULL, 2839 NLP_EVT_DEVICE_RM); 2840 } 2841 2842 /* At this point, ALL ndlp's should be gone 2843 * because of the previous NLP_EVT_DEVICE_RM. 2844 * Lets wait for this to happen, if needed. 2845 */ 2846 while (!list_empty(&vport->fc_nodes)) { 2847 if (i++ > 3000) { 2848 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2849 "0233 Nodelist not empty\n"); 2850 list_for_each_entry_safe(ndlp, next_ndlp, 2851 &vport->fc_nodes, nlp_listp) { 2852 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2853 LOG_NODE, 2854 "0282 did:x%x ndlp:x%px " 2855 "usgmap:x%x refcnt:%d\n", 2856 ndlp->nlp_DID, (void *)ndlp, 2857 ndlp->nlp_usg_map, 2858 kref_read(&ndlp->kref)); 2859 } 2860 break; 2861 } 2862 2863 /* Wait for any activity on ndlps to settle */ 2864 msleep(10); 2865 } 2866 lpfc_cleanup_vports_rrqs(vport, NULL); 2867 } 2868 2869 /** 2870 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2871 * @vport: pointer to a virtual N_Port data structure. 2872 * 2873 * This routine stops all the timers associated with a @vport. This function 2874 * is invoked before disabling or deleting a @vport. Note that the physical 2875 * port is treated as @vport 0. 2876 **/ 2877 void 2878 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2879 { 2880 del_timer_sync(&vport->els_tmofunc); 2881 del_timer_sync(&vport->delayed_disc_tmo); 2882 lpfc_can_disctmo(vport); 2883 return; 2884 } 2885 2886 /** 2887 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2888 * @phba: pointer to lpfc hba data structure. 2889 * 2890 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2891 * caller of this routine should already hold the host lock. 2892 **/ 2893 void 2894 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2895 { 2896 /* Clear pending FCF rediscovery wait flag */ 2897 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2898 2899 /* Now, try to stop the timer */ 2900 del_timer(&phba->fcf.redisc_wait); 2901 } 2902 2903 /** 2904 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2905 * @phba: pointer to lpfc hba data structure. 2906 * 2907 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2908 * checks whether the FCF rediscovery wait timer is pending with the host 2909 * lock held before proceeding with disabling the timer and clearing the 2910 * wait timer pendig flag. 2911 **/ 2912 void 2913 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2914 { 2915 spin_lock_irq(&phba->hbalock); 2916 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2917 /* FCF rediscovery timer already fired or stopped */ 2918 spin_unlock_irq(&phba->hbalock); 2919 return; 2920 } 2921 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2922 /* Clear failover in progress flags */ 2923 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2924 spin_unlock_irq(&phba->hbalock); 2925 } 2926 2927 /** 2928 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2929 * @phba: pointer to lpfc hba data structure. 2930 * 2931 * This routine stops all the timers associated with a HBA. This function is 2932 * invoked before either putting a HBA offline or unloading the driver. 2933 **/ 2934 void 2935 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2936 { 2937 if (phba->pport) 2938 lpfc_stop_vport_timers(phba->pport); 2939 cancel_delayed_work_sync(&phba->eq_delay_work); 2940 del_timer_sync(&phba->sli.mbox_tmo); 2941 del_timer_sync(&phba->fabric_block_timer); 2942 del_timer_sync(&phba->eratt_poll); 2943 del_timer_sync(&phba->hb_tmofunc); 2944 if (phba->sli_rev == LPFC_SLI_REV4) { 2945 del_timer_sync(&phba->rrq_tmr); 2946 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2947 } 2948 phba->hb_outstanding = 0; 2949 2950 switch (phba->pci_dev_grp) { 2951 case LPFC_PCI_DEV_LP: 2952 /* Stop any LightPulse device specific driver timers */ 2953 del_timer_sync(&phba->fcp_poll_timer); 2954 break; 2955 case LPFC_PCI_DEV_OC: 2956 /* Stop any OneConnect device specific driver timers */ 2957 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2958 break; 2959 default: 2960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2961 "0297 Invalid device group (x%x)\n", 2962 phba->pci_dev_grp); 2963 break; 2964 } 2965 return; 2966 } 2967 2968 /** 2969 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2970 * @phba: pointer to lpfc hba data structure. 2971 * 2972 * This routine marks a HBA's management interface as blocked. Once the HBA's 2973 * management interface is marked as blocked, all the user space access to 2974 * the HBA, whether they are from sysfs interface or libdfc interface will 2975 * all be blocked. The HBA is set to block the management interface when the 2976 * driver prepares the HBA interface for online or offline. 2977 **/ 2978 static void 2979 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2980 { 2981 unsigned long iflag; 2982 uint8_t actcmd = MBX_HEARTBEAT; 2983 unsigned long timeout; 2984 2985 spin_lock_irqsave(&phba->hbalock, iflag); 2986 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2987 spin_unlock_irqrestore(&phba->hbalock, iflag); 2988 if (mbx_action == LPFC_MBX_NO_WAIT) 2989 return; 2990 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2991 spin_lock_irqsave(&phba->hbalock, iflag); 2992 if (phba->sli.mbox_active) { 2993 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2994 /* Determine how long we might wait for the active mailbox 2995 * command to be gracefully completed by firmware. 2996 */ 2997 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2998 phba->sli.mbox_active) * 1000) + jiffies; 2999 } 3000 spin_unlock_irqrestore(&phba->hbalock, iflag); 3001 3002 /* Wait for the outstnading mailbox command to complete */ 3003 while (phba->sli.mbox_active) { 3004 /* Check active mailbox complete status every 2ms */ 3005 msleep(2); 3006 if (time_after(jiffies, timeout)) { 3007 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3008 "2813 Mgmt IO is Blocked %x " 3009 "- mbox cmd %x still active\n", 3010 phba->sli.sli_flag, actcmd); 3011 break; 3012 } 3013 } 3014 } 3015 3016 /** 3017 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3018 * @phba: pointer to lpfc hba data structure. 3019 * 3020 * Allocate RPIs for all active remote nodes. This is needed whenever 3021 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3022 * is to fixup the temporary rpi assignments. 3023 **/ 3024 void 3025 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3026 { 3027 struct lpfc_nodelist *ndlp, *next_ndlp; 3028 struct lpfc_vport **vports; 3029 int i, rpi; 3030 unsigned long flags; 3031 3032 if (phba->sli_rev != LPFC_SLI_REV4) 3033 return; 3034 3035 vports = lpfc_create_vport_work_array(phba); 3036 if (vports == NULL) 3037 return; 3038 3039 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3040 if (vports[i]->load_flag & FC_UNLOADING) 3041 continue; 3042 3043 list_for_each_entry_safe(ndlp, next_ndlp, 3044 &vports[i]->fc_nodes, 3045 nlp_listp) { 3046 if (!NLP_CHK_NODE_ACT(ndlp)) 3047 continue; 3048 rpi = lpfc_sli4_alloc_rpi(phba); 3049 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3050 spin_lock_irqsave(&phba->ndlp_lock, flags); 3051 NLP_CLR_NODE_ACT(ndlp); 3052 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3053 continue; 3054 } 3055 ndlp->nlp_rpi = rpi; 3056 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3057 "0009 rpi:%x DID:%x " 3058 "flg:%x map:%x x%px\n", ndlp->nlp_rpi, 3059 ndlp->nlp_DID, ndlp->nlp_flag, 3060 ndlp->nlp_usg_map, ndlp); 3061 } 3062 } 3063 lpfc_destroy_vport_work_array(phba, vports); 3064 } 3065 3066 /** 3067 * lpfc_create_expedite_pool - create expedite pool 3068 * @phba: pointer to lpfc hba data structure. 3069 * 3070 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3071 * to expedite pool. Mark them as expedite. 3072 **/ 3073 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3074 { 3075 struct lpfc_sli4_hdw_queue *qp; 3076 struct lpfc_io_buf *lpfc_ncmd; 3077 struct lpfc_io_buf *lpfc_ncmd_next; 3078 struct lpfc_epd_pool *epd_pool; 3079 unsigned long iflag; 3080 3081 epd_pool = &phba->epd_pool; 3082 qp = &phba->sli4_hba.hdwq[0]; 3083 3084 spin_lock_init(&epd_pool->lock); 3085 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3086 spin_lock(&epd_pool->lock); 3087 INIT_LIST_HEAD(&epd_pool->list); 3088 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3089 &qp->lpfc_io_buf_list_put, list) { 3090 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3091 lpfc_ncmd->expedite = true; 3092 qp->put_io_bufs--; 3093 epd_pool->count++; 3094 if (epd_pool->count >= XRI_BATCH) 3095 break; 3096 } 3097 spin_unlock(&epd_pool->lock); 3098 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3099 } 3100 3101 /** 3102 * lpfc_destroy_expedite_pool - destroy expedite pool 3103 * @phba: pointer to lpfc hba data structure. 3104 * 3105 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3106 * of HWQ 0. Clear the mark. 3107 **/ 3108 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3109 { 3110 struct lpfc_sli4_hdw_queue *qp; 3111 struct lpfc_io_buf *lpfc_ncmd; 3112 struct lpfc_io_buf *lpfc_ncmd_next; 3113 struct lpfc_epd_pool *epd_pool; 3114 unsigned long iflag; 3115 3116 epd_pool = &phba->epd_pool; 3117 qp = &phba->sli4_hba.hdwq[0]; 3118 3119 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3120 spin_lock(&epd_pool->lock); 3121 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3122 &epd_pool->list, list) { 3123 list_move_tail(&lpfc_ncmd->list, 3124 &qp->lpfc_io_buf_list_put); 3125 lpfc_ncmd->flags = false; 3126 qp->put_io_bufs++; 3127 epd_pool->count--; 3128 } 3129 spin_unlock(&epd_pool->lock); 3130 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3131 } 3132 3133 /** 3134 * lpfc_create_multixri_pools - create multi-XRI pools 3135 * @phba: pointer to lpfc hba data structure. 3136 * 3137 * This routine initialize public, private per HWQ. Then, move XRIs from 3138 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3139 * Initialized. 3140 **/ 3141 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3142 { 3143 u32 i, j; 3144 u32 hwq_count; 3145 u32 count_per_hwq; 3146 struct lpfc_io_buf *lpfc_ncmd; 3147 struct lpfc_io_buf *lpfc_ncmd_next; 3148 unsigned long iflag; 3149 struct lpfc_sli4_hdw_queue *qp; 3150 struct lpfc_multixri_pool *multixri_pool; 3151 struct lpfc_pbl_pool *pbl_pool; 3152 struct lpfc_pvt_pool *pvt_pool; 3153 3154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3155 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3156 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3157 phba->sli4_hba.io_xri_cnt); 3158 3159 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3160 lpfc_create_expedite_pool(phba); 3161 3162 hwq_count = phba->cfg_hdw_queue; 3163 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3164 3165 for (i = 0; i < hwq_count; i++) { 3166 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3167 3168 if (!multixri_pool) { 3169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3170 "1238 Failed to allocate memory for " 3171 "multixri_pool\n"); 3172 3173 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3174 lpfc_destroy_expedite_pool(phba); 3175 3176 j = 0; 3177 while (j < i) { 3178 qp = &phba->sli4_hba.hdwq[j]; 3179 kfree(qp->p_multixri_pool); 3180 j++; 3181 } 3182 phba->cfg_xri_rebalancing = 0; 3183 return; 3184 } 3185 3186 qp = &phba->sli4_hba.hdwq[i]; 3187 qp->p_multixri_pool = multixri_pool; 3188 3189 multixri_pool->xri_limit = count_per_hwq; 3190 multixri_pool->rrb_next_hwqid = i; 3191 3192 /* Deal with public free xri pool */ 3193 pbl_pool = &multixri_pool->pbl_pool; 3194 spin_lock_init(&pbl_pool->lock); 3195 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3196 spin_lock(&pbl_pool->lock); 3197 INIT_LIST_HEAD(&pbl_pool->list); 3198 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3199 &qp->lpfc_io_buf_list_put, list) { 3200 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3201 qp->put_io_bufs--; 3202 pbl_pool->count++; 3203 } 3204 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3205 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3206 pbl_pool->count, i); 3207 spin_unlock(&pbl_pool->lock); 3208 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3209 3210 /* Deal with private free xri pool */ 3211 pvt_pool = &multixri_pool->pvt_pool; 3212 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3213 pvt_pool->low_watermark = XRI_BATCH; 3214 spin_lock_init(&pvt_pool->lock); 3215 spin_lock_irqsave(&pvt_pool->lock, iflag); 3216 INIT_LIST_HEAD(&pvt_pool->list); 3217 pvt_pool->count = 0; 3218 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3219 } 3220 } 3221 3222 /** 3223 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3224 * @phba: pointer to lpfc hba data structure. 3225 * 3226 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3227 **/ 3228 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3229 { 3230 u32 i; 3231 u32 hwq_count; 3232 struct lpfc_io_buf *lpfc_ncmd; 3233 struct lpfc_io_buf *lpfc_ncmd_next; 3234 unsigned long iflag; 3235 struct lpfc_sli4_hdw_queue *qp; 3236 struct lpfc_multixri_pool *multixri_pool; 3237 struct lpfc_pbl_pool *pbl_pool; 3238 struct lpfc_pvt_pool *pvt_pool; 3239 3240 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3241 lpfc_destroy_expedite_pool(phba); 3242 3243 if (!(phba->pport->load_flag & FC_UNLOADING)) 3244 lpfc_sli_flush_io_rings(phba); 3245 3246 hwq_count = phba->cfg_hdw_queue; 3247 3248 for (i = 0; i < hwq_count; i++) { 3249 qp = &phba->sli4_hba.hdwq[i]; 3250 multixri_pool = qp->p_multixri_pool; 3251 if (!multixri_pool) 3252 continue; 3253 3254 qp->p_multixri_pool = NULL; 3255 3256 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3257 3258 /* Deal with public free xri pool */ 3259 pbl_pool = &multixri_pool->pbl_pool; 3260 spin_lock(&pbl_pool->lock); 3261 3262 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3263 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3264 pbl_pool->count, i); 3265 3266 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3267 &pbl_pool->list, list) { 3268 list_move_tail(&lpfc_ncmd->list, 3269 &qp->lpfc_io_buf_list_put); 3270 qp->put_io_bufs++; 3271 pbl_pool->count--; 3272 } 3273 3274 INIT_LIST_HEAD(&pbl_pool->list); 3275 pbl_pool->count = 0; 3276 3277 spin_unlock(&pbl_pool->lock); 3278 3279 /* Deal with private free xri pool */ 3280 pvt_pool = &multixri_pool->pvt_pool; 3281 spin_lock(&pvt_pool->lock); 3282 3283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3284 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3285 pvt_pool->count, i); 3286 3287 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3288 &pvt_pool->list, list) { 3289 list_move_tail(&lpfc_ncmd->list, 3290 &qp->lpfc_io_buf_list_put); 3291 qp->put_io_bufs++; 3292 pvt_pool->count--; 3293 } 3294 3295 INIT_LIST_HEAD(&pvt_pool->list); 3296 pvt_pool->count = 0; 3297 3298 spin_unlock(&pvt_pool->lock); 3299 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3300 3301 kfree(multixri_pool); 3302 } 3303 } 3304 3305 /** 3306 * lpfc_online - Initialize and bring a HBA online 3307 * @phba: pointer to lpfc hba data structure. 3308 * 3309 * This routine initializes the HBA and brings a HBA online. During this 3310 * process, the management interface is blocked to prevent user space access 3311 * to the HBA interfering with the driver initialization. 3312 * 3313 * Return codes 3314 * 0 - successful 3315 * 1 - failed 3316 **/ 3317 int 3318 lpfc_online(struct lpfc_hba *phba) 3319 { 3320 struct lpfc_vport *vport; 3321 struct lpfc_vport **vports; 3322 int i, error = 0; 3323 bool vpis_cleared = false; 3324 3325 if (!phba) 3326 return 0; 3327 vport = phba->pport; 3328 3329 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3330 return 0; 3331 3332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3333 "0458 Bring Adapter online\n"); 3334 3335 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3336 3337 if (phba->sli_rev == LPFC_SLI_REV4) { 3338 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3339 lpfc_unblock_mgmt_io(phba); 3340 return 1; 3341 } 3342 spin_lock_irq(&phba->hbalock); 3343 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3344 vpis_cleared = true; 3345 spin_unlock_irq(&phba->hbalock); 3346 3347 /* Reestablish the local initiator port. 3348 * The offline process destroyed the previous lport. 3349 */ 3350 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3351 !phba->nvmet_support) { 3352 error = lpfc_nvme_create_localport(phba->pport); 3353 if (error) 3354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3355 "6132 NVME restore reg failed " 3356 "on nvmei error x%x\n", error); 3357 } 3358 } else { 3359 lpfc_sli_queue_init(phba); 3360 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3361 lpfc_unblock_mgmt_io(phba); 3362 return 1; 3363 } 3364 } 3365 3366 vports = lpfc_create_vport_work_array(phba); 3367 if (vports != NULL) { 3368 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3369 struct Scsi_Host *shost; 3370 shost = lpfc_shost_from_vport(vports[i]); 3371 spin_lock_irq(shost->host_lock); 3372 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3373 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3374 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3375 if (phba->sli_rev == LPFC_SLI_REV4) { 3376 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3377 if ((vpis_cleared) && 3378 (vports[i]->port_type != 3379 LPFC_PHYSICAL_PORT)) 3380 vports[i]->vpi = 0; 3381 } 3382 spin_unlock_irq(shost->host_lock); 3383 } 3384 } 3385 lpfc_destroy_vport_work_array(phba, vports); 3386 3387 if (phba->cfg_xri_rebalancing) 3388 lpfc_create_multixri_pools(phba); 3389 3390 lpfc_unblock_mgmt_io(phba); 3391 return 0; 3392 } 3393 3394 /** 3395 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3396 * @phba: pointer to lpfc hba data structure. 3397 * 3398 * This routine marks a HBA's management interface as not blocked. Once the 3399 * HBA's management interface is marked as not blocked, all the user space 3400 * access to the HBA, whether they are from sysfs interface or libdfc 3401 * interface will be allowed. The HBA is set to block the management interface 3402 * when the driver prepares the HBA interface for online or offline and then 3403 * set to unblock the management interface afterwards. 3404 **/ 3405 void 3406 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3407 { 3408 unsigned long iflag; 3409 3410 spin_lock_irqsave(&phba->hbalock, iflag); 3411 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3412 spin_unlock_irqrestore(&phba->hbalock, iflag); 3413 } 3414 3415 /** 3416 * lpfc_offline_prep - Prepare a HBA to be brought offline 3417 * @phba: pointer to lpfc hba data structure. 3418 * 3419 * This routine is invoked to prepare a HBA to be brought offline. It performs 3420 * unregistration login to all the nodes on all vports and flushes the mailbox 3421 * queue to make it ready to be brought offline. 3422 **/ 3423 void 3424 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3425 { 3426 struct lpfc_vport *vport = phba->pport; 3427 struct lpfc_nodelist *ndlp, *next_ndlp; 3428 struct lpfc_vport **vports; 3429 struct Scsi_Host *shost; 3430 int i; 3431 3432 if (vport->fc_flag & FC_OFFLINE_MODE) 3433 return; 3434 3435 lpfc_block_mgmt_io(phba, mbx_action); 3436 3437 lpfc_linkdown(phba); 3438 3439 /* Issue an unreg_login to all nodes on all vports */ 3440 vports = lpfc_create_vport_work_array(phba); 3441 if (vports != NULL) { 3442 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3443 if (vports[i]->load_flag & FC_UNLOADING) 3444 continue; 3445 shost = lpfc_shost_from_vport(vports[i]); 3446 spin_lock_irq(shost->host_lock); 3447 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3448 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3449 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3450 spin_unlock_irq(shost->host_lock); 3451 3452 shost = lpfc_shost_from_vport(vports[i]); 3453 list_for_each_entry_safe(ndlp, next_ndlp, 3454 &vports[i]->fc_nodes, 3455 nlp_listp) { 3456 if (!NLP_CHK_NODE_ACT(ndlp)) 3457 continue; 3458 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3459 continue; 3460 if (ndlp->nlp_type & NLP_FABRIC) { 3461 lpfc_disc_state_machine(vports[i], ndlp, 3462 NULL, NLP_EVT_DEVICE_RECOVERY); 3463 lpfc_disc_state_machine(vports[i], ndlp, 3464 NULL, NLP_EVT_DEVICE_RM); 3465 } 3466 spin_lock_irq(shost->host_lock); 3467 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3468 spin_unlock_irq(shost->host_lock); 3469 /* 3470 * Whenever an SLI4 port goes offline, free the 3471 * RPI. Get a new RPI when the adapter port 3472 * comes back online. 3473 */ 3474 if (phba->sli_rev == LPFC_SLI_REV4) { 3475 lpfc_printf_vlog(ndlp->vport, 3476 KERN_INFO, LOG_NODE, 3477 "0011 lpfc_offline: " 3478 "ndlp:x%px did %x " 3479 "usgmap:x%x rpi:%x\n", 3480 ndlp, ndlp->nlp_DID, 3481 ndlp->nlp_usg_map, 3482 ndlp->nlp_rpi); 3483 3484 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3485 } 3486 lpfc_unreg_rpi(vports[i], ndlp); 3487 } 3488 } 3489 } 3490 lpfc_destroy_vport_work_array(phba, vports); 3491 3492 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3493 3494 if (phba->wq) 3495 flush_workqueue(phba->wq); 3496 } 3497 3498 /** 3499 * lpfc_offline - Bring a HBA offline 3500 * @phba: pointer to lpfc hba data structure. 3501 * 3502 * This routine actually brings a HBA offline. It stops all the timers 3503 * associated with the HBA, brings down the SLI layer, and eventually 3504 * marks the HBA as in offline state for the upper layer protocol. 3505 **/ 3506 void 3507 lpfc_offline(struct lpfc_hba *phba) 3508 { 3509 struct Scsi_Host *shost; 3510 struct lpfc_vport **vports; 3511 int i; 3512 3513 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3514 return; 3515 3516 /* stop port and all timers associated with this hba */ 3517 lpfc_stop_port(phba); 3518 3519 /* Tear down the local and target port registrations. The 3520 * nvme transports need to cleanup. 3521 */ 3522 lpfc_nvmet_destroy_targetport(phba); 3523 lpfc_nvme_destroy_localport(phba->pport); 3524 3525 vports = lpfc_create_vport_work_array(phba); 3526 if (vports != NULL) 3527 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3528 lpfc_stop_vport_timers(vports[i]); 3529 lpfc_destroy_vport_work_array(phba, vports); 3530 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3531 "0460 Bring Adapter offline\n"); 3532 /* Bring down the SLI Layer and cleanup. The HBA is offline 3533 now. */ 3534 lpfc_sli_hba_down(phba); 3535 spin_lock_irq(&phba->hbalock); 3536 phba->work_ha = 0; 3537 spin_unlock_irq(&phba->hbalock); 3538 vports = lpfc_create_vport_work_array(phba); 3539 if (vports != NULL) 3540 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3541 shost = lpfc_shost_from_vport(vports[i]); 3542 spin_lock_irq(shost->host_lock); 3543 vports[i]->work_port_events = 0; 3544 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3545 spin_unlock_irq(shost->host_lock); 3546 } 3547 lpfc_destroy_vport_work_array(phba, vports); 3548 3549 if (phba->cfg_xri_rebalancing) 3550 lpfc_destroy_multixri_pools(phba); 3551 } 3552 3553 /** 3554 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3555 * @phba: pointer to lpfc hba data structure. 3556 * 3557 * This routine is to free all the SCSI buffers and IOCBs from the driver 3558 * list back to kernel. It is called from lpfc_pci_remove_one to free 3559 * the internal resources before the device is removed from the system. 3560 **/ 3561 static void 3562 lpfc_scsi_free(struct lpfc_hba *phba) 3563 { 3564 struct lpfc_io_buf *sb, *sb_next; 3565 3566 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3567 return; 3568 3569 spin_lock_irq(&phba->hbalock); 3570 3571 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3572 3573 spin_lock(&phba->scsi_buf_list_put_lock); 3574 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3575 list) { 3576 list_del(&sb->list); 3577 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3578 sb->dma_handle); 3579 kfree(sb); 3580 phba->total_scsi_bufs--; 3581 } 3582 spin_unlock(&phba->scsi_buf_list_put_lock); 3583 3584 spin_lock(&phba->scsi_buf_list_get_lock); 3585 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3586 list) { 3587 list_del(&sb->list); 3588 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3589 sb->dma_handle); 3590 kfree(sb); 3591 phba->total_scsi_bufs--; 3592 } 3593 spin_unlock(&phba->scsi_buf_list_get_lock); 3594 spin_unlock_irq(&phba->hbalock); 3595 } 3596 3597 /** 3598 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3599 * @phba: pointer to lpfc hba data structure. 3600 * 3601 * This routine is to free all the IO buffers and IOCBs from the driver 3602 * list back to kernel. It is called from lpfc_pci_remove_one to free 3603 * the internal resources before the device is removed from the system. 3604 **/ 3605 void 3606 lpfc_io_free(struct lpfc_hba *phba) 3607 { 3608 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3609 struct lpfc_sli4_hdw_queue *qp; 3610 int idx; 3611 3612 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3613 qp = &phba->sli4_hba.hdwq[idx]; 3614 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3615 spin_lock(&qp->io_buf_list_put_lock); 3616 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3617 &qp->lpfc_io_buf_list_put, 3618 list) { 3619 list_del(&lpfc_ncmd->list); 3620 qp->put_io_bufs--; 3621 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3622 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3623 if (phba->cfg_xpsgl && !phba->nvmet_support) 3624 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3625 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3626 kfree(lpfc_ncmd); 3627 qp->total_io_bufs--; 3628 } 3629 spin_unlock(&qp->io_buf_list_put_lock); 3630 3631 spin_lock(&qp->io_buf_list_get_lock); 3632 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3633 &qp->lpfc_io_buf_list_get, 3634 list) { 3635 list_del(&lpfc_ncmd->list); 3636 qp->get_io_bufs--; 3637 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3638 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3639 if (phba->cfg_xpsgl && !phba->nvmet_support) 3640 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3641 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3642 kfree(lpfc_ncmd); 3643 qp->total_io_bufs--; 3644 } 3645 spin_unlock(&qp->io_buf_list_get_lock); 3646 } 3647 } 3648 3649 /** 3650 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3651 * @phba: pointer to lpfc hba data structure. 3652 * 3653 * This routine first calculates the sizes of the current els and allocated 3654 * scsi sgl lists, and then goes through all sgls to updates the physical 3655 * XRIs assigned due to port function reset. During port initialization, the 3656 * current els and allocated scsi sgl lists are 0s. 3657 * 3658 * Return codes 3659 * 0 - successful (for now, it always returns 0) 3660 **/ 3661 int 3662 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3663 { 3664 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3665 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3666 LIST_HEAD(els_sgl_list); 3667 int rc; 3668 3669 /* 3670 * update on pci function's els xri-sgl list 3671 */ 3672 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3673 3674 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3675 /* els xri-sgl expanded */ 3676 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3677 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3678 "3157 ELS xri-sgl count increased from " 3679 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3680 els_xri_cnt); 3681 /* allocate the additional els sgls */ 3682 for (i = 0; i < xri_cnt; i++) { 3683 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3684 GFP_KERNEL); 3685 if (sglq_entry == NULL) { 3686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3687 "2562 Failure to allocate an " 3688 "ELS sgl entry:%d\n", i); 3689 rc = -ENOMEM; 3690 goto out_free_mem; 3691 } 3692 sglq_entry->buff_type = GEN_BUFF_TYPE; 3693 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3694 &sglq_entry->phys); 3695 if (sglq_entry->virt == NULL) { 3696 kfree(sglq_entry); 3697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3698 "2563 Failure to allocate an " 3699 "ELS mbuf:%d\n", i); 3700 rc = -ENOMEM; 3701 goto out_free_mem; 3702 } 3703 sglq_entry->sgl = sglq_entry->virt; 3704 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3705 sglq_entry->state = SGL_FREED; 3706 list_add_tail(&sglq_entry->list, &els_sgl_list); 3707 } 3708 spin_lock_irq(&phba->hbalock); 3709 spin_lock(&phba->sli4_hba.sgl_list_lock); 3710 list_splice_init(&els_sgl_list, 3711 &phba->sli4_hba.lpfc_els_sgl_list); 3712 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3713 spin_unlock_irq(&phba->hbalock); 3714 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3715 /* els xri-sgl shrinked */ 3716 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3717 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3718 "3158 ELS xri-sgl count decreased from " 3719 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3720 els_xri_cnt); 3721 spin_lock_irq(&phba->hbalock); 3722 spin_lock(&phba->sli4_hba.sgl_list_lock); 3723 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3724 &els_sgl_list); 3725 /* release extra els sgls from list */ 3726 for (i = 0; i < xri_cnt; i++) { 3727 list_remove_head(&els_sgl_list, 3728 sglq_entry, struct lpfc_sglq, list); 3729 if (sglq_entry) { 3730 __lpfc_mbuf_free(phba, sglq_entry->virt, 3731 sglq_entry->phys); 3732 kfree(sglq_entry); 3733 } 3734 } 3735 list_splice_init(&els_sgl_list, 3736 &phba->sli4_hba.lpfc_els_sgl_list); 3737 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3738 spin_unlock_irq(&phba->hbalock); 3739 } else 3740 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3741 "3163 ELS xri-sgl count unchanged: %d\n", 3742 els_xri_cnt); 3743 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3744 3745 /* update xris to els sgls on the list */ 3746 sglq_entry = NULL; 3747 sglq_entry_next = NULL; 3748 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3749 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3750 lxri = lpfc_sli4_next_xritag(phba); 3751 if (lxri == NO_XRI) { 3752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3753 "2400 Failed to allocate xri for " 3754 "ELS sgl\n"); 3755 rc = -ENOMEM; 3756 goto out_free_mem; 3757 } 3758 sglq_entry->sli4_lxritag = lxri; 3759 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3760 } 3761 return 0; 3762 3763 out_free_mem: 3764 lpfc_free_els_sgl_list(phba); 3765 return rc; 3766 } 3767 3768 /** 3769 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3770 * @phba: pointer to lpfc hba data structure. 3771 * 3772 * This routine first calculates the sizes of the current els and allocated 3773 * scsi sgl lists, and then goes through all sgls to updates the physical 3774 * XRIs assigned due to port function reset. During port initialization, the 3775 * current els and allocated scsi sgl lists are 0s. 3776 * 3777 * Return codes 3778 * 0 - successful (for now, it always returns 0) 3779 **/ 3780 int 3781 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3782 { 3783 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3784 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3785 uint16_t nvmet_xri_cnt; 3786 LIST_HEAD(nvmet_sgl_list); 3787 int rc; 3788 3789 /* 3790 * update on pci function's nvmet xri-sgl list 3791 */ 3792 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3793 3794 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3795 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3796 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3797 /* els xri-sgl expanded */ 3798 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3799 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3800 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3801 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3802 /* allocate the additional nvmet sgls */ 3803 for (i = 0; i < xri_cnt; i++) { 3804 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3805 GFP_KERNEL); 3806 if (sglq_entry == NULL) { 3807 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3808 "6303 Failure to allocate an " 3809 "NVMET sgl entry:%d\n", i); 3810 rc = -ENOMEM; 3811 goto out_free_mem; 3812 } 3813 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3814 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3815 &sglq_entry->phys); 3816 if (sglq_entry->virt == NULL) { 3817 kfree(sglq_entry); 3818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3819 "6304 Failure to allocate an " 3820 "NVMET buf:%d\n", i); 3821 rc = -ENOMEM; 3822 goto out_free_mem; 3823 } 3824 sglq_entry->sgl = sglq_entry->virt; 3825 memset(sglq_entry->sgl, 0, 3826 phba->cfg_sg_dma_buf_size); 3827 sglq_entry->state = SGL_FREED; 3828 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3829 } 3830 spin_lock_irq(&phba->hbalock); 3831 spin_lock(&phba->sli4_hba.sgl_list_lock); 3832 list_splice_init(&nvmet_sgl_list, 3833 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3834 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3835 spin_unlock_irq(&phba->hbalock); 3836 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3837 /* nvmet xri-sgl shrunk */ 3838 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3839 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3840 "6305 NVMET xri-sgl count decreased from " 3841 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3842 nvmet_xri_cnt); 3843 spin_lock_irq(&phba->hbalock); 3844 spin_lock(&phba->sli4_hba.sgl_list_lock); 3845 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3846 &nvmet_sgl_list); 3847 /* release extra nvmet sgls from list */ 3848 for (i = 0; i < xri_cnt; i++) { 3849 list_remove_head(&nvmet_sgl_list, 3850 sglq_entry, struct lpfc_sglq, list); 3851 if (sglq_entry) { 3852 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3853 sglq_entry->phys); 3854 kfree(sglq_entry); 3855 } 3856 } 3857 list_splice_init(&nvmet_sgl_list, 3858 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3859 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3860 spin_unlock_irq(&phba->hbalock); 3861 } else 3862 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3863 "6306 NVMET xri-sgl count unchanged: %d\n", 3864 nvmet_xri_cnt); 3865 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3866 3867 /* update xris to nvmet sgls on the list */ 3868 sglq_entry = NULL; 3869 sglq_entry_next = NULL; 3870 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3871 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3872 lxri = lpfc_sli4_next_xritag(phba); 3873 if (lxri == NO_XRI) { 3874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3875 "6307 Failed to allocate xri for " 3876 "NVMET sgl\n"); 3877 rc = -ENOMEM; 3878 goto out_free_mem; 3879 } 3880 sglq_entry->sli4_lxritag = lxri; 3881 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3882 } 3883 return 0; 3884 3885 out_free_mem: 3886 lpfc_free_nvmet_sgl_list(phba); 3887 return rc; 3888 } 3889 3890 int 3891 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3892 { 3893 LIST_HEAD(blist); 3894 struct lpfc_sli4_hdw_queue *qp; 3895 struct lpfc_io_buf *lpfc_cmd; 3896 struct lpfc_io_buf *iobufp, *prev_iobufp; 3897 int idx, cnt, xri, inserted; 3898 3899 cnt = 0; 3900 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3901 qp = &phba->sli4_hba.hdwq[idx]; 3902 spin_lock_irq(&qp->io_buf_list_get_lock); 3903 spin_lock(&qp->io_buf_list_put_lock); 3904 3905 /* Take everything off the get and put lists */ 3906 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3907 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3908 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3909 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3910 cnt += qp->get_io_bufs + qp->put_io_bufs; 3911 qp->get_io_bufs = 0; 3912 qp->put_io_bufs = 0; 3913 qp->total_io_bufs = 0; 3914 spin_unlock(&qp->io_buf_list_put_lock); 3915 spin_unlock_irq(&qp->io_buf_list_get_lock); 3916 } 3917 3918 /* 3919 * Take IO buffers off blist and put on cbuf sorted by XRI. 3920 * This is because POST_SGL takes a sequential range of XRIs 3921 * to post to the firmware. 3922 */ 3923 for (idx = 0; idx < cnt; idx++) { 3924 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3925 if (!lpfc_cmd) 3926 return cnt; 3927 if (idx == 0) { 3928 list_add_tail(&lpfc_cmd->list, cbuf); 3929 continue; 3930 } 3931 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3932 inserted = 0; 3933 prev_iobufp = NULL; 3934 list_for_each_entry(iobufp, cbuf, list) { 3935 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3936 if (prev_iobufp) 3937 list_add(&lpfc_cmd->list, 3938 &prev_iobufp->list); 3939 else 3940 list_add(&lpfc_cmd->list, cbuf); 3941 inserted = 1; 3942 break; 3943 } 3944 prev_iobufp = iobufp; 3945 } 3946 if (!inserted) 3947 list_add_tail(&lpfc_cmd->list, cbuf); 3948 } 3949 return cnt; 3950 } 3951 3952 int 3953 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3954 { 3955 struct lpfc_sli4_hdw_queue *qp; 3956 struct lpfc_io_buf *lpfc_cmd; 3957 int idx, cnt; 3958 3959 qp = phba->sli4_hba.hdwq; 3960 cnt = 0; 3961 while (!list_empty(cbuf)) { 3962 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3963 list_remove_head(cbuf, lpfc_cmd, 3964 struct lpfc_io_buf, list); 3965 if (!lpfc_cmd) 3966 return cnt; 3967 cnt++; 3968 qp = &phba->sli4_hba.hdwq[idx]; 3969 lpfc_cmd->hdwq_no = idx; 3970 lpfc_cmd->hdwq = qp; 3971 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3972 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3973 spin_lock(&qp->io_buf_list_put_lock); 3974 list_add_tail(&lpfc_cmd->list, 3975 &qp->lpfc_io_buf_list_put); 3976 qp->put_io_bufs++; 3977 qp->total_io_bufs++; 3978 spin_unlock(&qp->io_buf_list_put_lock); 3979 } 3980 } 3981 return cnt; 3982 } 3983 3984 /** 3985 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3986 * @phba: pointer to lpfc hba data structure. 3987 * 3988 * This routine first calculates the sizes of the current els and allocated 3989 * scsi sgl lists, and then goes through all sgls to updates the physical 3990 * XRIs assigned due to port function reset. During port initialization, the 3991 * current els and allocated scsi sgl lists are 0s. 3992 * 3993 * Return codes 3994 * 0 - successful (for now, it always returns 0) 3995 **/ 3996 int 3997 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 3998 { 3999 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4000 uint16_t i, lxri, els_xri_cnt; 4001 uint16_t io_xri_cnt, io_xri_max; 4002 LIST_HEAD(io_sgl_list); 4003 int rc, cnt; 4004 4005 /* 4006 * update on pci function's allocated nvme xri-sgl list 4007 */ 4008 4009 /* maximum number of xris available for nvme buffers */ 4010 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4011 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4012 phba->sli4_hba.io_xri_max = io_xri_max; 4013 4014 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4015 "6074 Current allocated XRI sgl count:%d, " 4016 "maximum XRI count:%d\n", 4017 phba->sli4_hba.io_xri_cnt, 4018 phba->sli4_hba.io_xri_max); 4019 4020 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4021 4022 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4023 /* max nvme xri shrunk below the allocated nvme buffers */ 4024 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4025 phba->sli4_hba.io_xri_max; 4026 /* release the extra allocated nvme buffers */ 4027 for (i = 0; i < io_xri_cnt; i++) { 4028 list_remove_head(&io_sgl_list, lpfc_ncmd, 4029 struct lpfc_io_buf, list); 4030 if (lpfc_ncmd) { 4031 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4032 lpfc_ncmd->data, 4033 lpfc_ncmd->dma_handle); 4034 kfree(lpfc_ncmd); 4035 } 4036 } 4037 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4038 } 4039 4040 /* update xris associated to remaining allocated nvme buffers */ 4041 lpfc_ncmd = NULL; 4042 lpfc_ncmd_next = NULL; 4043 phba->sli4_hba.io_xri_cnt = cnt; 4044 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4045 &io_sgl_list, list) { 4046 lxri = lpfc_sli4_next_xritag(phba); 4047 if (lxri == NO_XRI) { 4048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4049 "6075 Failed to allocate xri for " 4050 "nvme buffer\n"); 4051 rc = -ENOMEM; 4052 goto out_free_mem; 4053 } 4054 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4055 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4056 } 4057 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4058 return 0; 4059 4060 out_free_mem: 4061 lpfc_io_free(phba); 4062 return rc; 4063 } 4064 4065 /** 4066 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4067 * @vport: The virtual port for which this call being executed. 4068 * @num_to_allocate: The requested number of buffers to allocate. 4069 * 4070 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4071 * the nvme buffer contains all the necessary information needed to initiate 4072 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4073 * them on a list, it post them to the port by using SGL block post. 4074 * 4075 * Return codes: 4076 * int - number of IO buffers that were allocated and posted. 4077 * 0 = failure, less than num_to_alloc is a partial failure. 4078 **/ 4079 int 4080 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4081 { 4082 struct lpfc_io_buf *lpfc_ncmd; 4083 struct lpfc_iocbq *pwqeq; 4084 uint16_t iotag, lxri = 0; 4085 int bcnt, num_posted; 4086 LIST_HEAD(prep_nblist); 4087 LIST_HEAD(post_nblist); 4088 LIST_HEAD(nvme_nblist); 4089 4090 phba->sli4_hba.io_xri_cnt = 0; 4091 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4092 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4093 if (!lpfc_ncmd) 4094 break; 4095 /* 4096 * Get memory from the pci pool to map the virt space to 4097 * pci bus space for an I/O. The DMA buffer includes the 4098 * number of SGE's necessary to support the sg_tablesize. 4099 */ 4100 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4101 GFP_KERNEL, 4102 &lpfc_ncmd->dma_handle); 4103 if (!lpfc_ncmd->data) { 4104 kfree(lpfc_ncmd); 4105 break; 4106 } 4107 4108 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4109 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4110 } else { 4111 /* 4112 * 4K Page alignment is CRITICAL to BlockGuard, double 4113 * check to be sure. 4114 */ 4115 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4116 (((unsigned long)(lpfc_ncmd->data) & 4117 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4118 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4119 "3369 Memory alignment err: " 4120 "addr=%lx\n", 4121 (unsigned long)lpfc_ncmd->data); 4122 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4123 lpfc_ncmd->data, 4124 lpfc_ncmd->dma_handle); 4125 kfree(lpfc_ncmd); 4126 break; 4127 } 4128 } 4129 4130 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4131 4132 lxri = lpfc_sli4_next_xritag(phba); 4133 if (lxri == NO_XRI) { 4134 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4135 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4136 kfree(lpfc_ncmd); 4137 break; 4138 } 4139 pwqeq = &lpfc_ncmd->cur_iocbq; 4140 4141 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4142 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4143 if (iotag == 0) { 4144 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4145 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4146 kfree(lpfc_ncmd); 4147 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4148 "6121 Failed to allocate IOTAG for" 4149 " XRI:0x%x\n", lxri); 4150 lpfc_sli4_free_xri(phba, lxri); 4151 break; 4152 } 4153 pwqeq->sli4_lxritag = lxri; 4154 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4155 pwqeq->context1 = lpfc_ncmd; 4156 4157 /* Initialize local short-hand pointers. */ 4158 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4159 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4160 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4161 spin_lock_init(&lpfc_ncmd->buf_lock); 4162 4163 /* add the nvme buffer to a post list */ 4164 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4165 phba->sli4_hba.io_xri_cnt++; 4166 } 4167 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4168 "6114 Allocate %d out of %d requested new NVME " 4169 "buffers\n", bcnt, num_to_alloc); 4170 4171 /* post the list of nvme buffer sgls to port if available */ 4172 if (!list_empty(&post_nblist)) 4173 num_posted = lpfc_sli4_post_io_sgl_list( 4174 phba, &post_nblist, bcnt); 4175 else 4176 num_posted = 0; 4177 4178 return num_posted; 4179 } 4180 4181 static uint64_t 4182 lpfc_get_wwpn(struct lpfc_hba *phba) 4183 { 4184 uint64_t wwn; 4185 int rc; 4186 LPFC_MBOXQ_t *mboxq; 4187 MAILBOX_t *mb; 4188 4189 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4190 GFP_KERNEL); 4191 if (!mboxq) 4192 return (uint64_t)-1; 4193 4194 /* First get WWN of HBA instance */ 4195 lpfc_read_nv(phba, mboxq); 4196 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4197 if (rc != MBX_SUCCESS) { 4198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4199 "6019 Mailbox failed , mbxCmd x%x " 4200 "READ_NV, mbxStatus x%x\n", 4201 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4202 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4203 mempool_free(mboxq, phba->mbox_mem_pool); 4204 return (uint64_t) -1; 4205 } 4206 mb = &mboxq->u.mb; 4207 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4208 /* wwn is WWPN of HBA instance */ 4209 mempool_free(mboxq, phba->mbox_mem_pool); 4210 if (phba->sli_rev == LPFC_SLI_REV4) 4211 return be64_to_cpu(wwn); 4212 else 4213 return rol64(wwn, 32); 4214 } 4215 4216 /** 4217 * lpfc_create_port - Create an FC port 4218 * @phba: pointer to lpfc hba data structure. 4219 * @instance: a unique integer ID to this FC port. 4220 * @dev: pointer to the device data structure. 4221 * 4222 * This routine creates a FC port for the upper layer protocol. The FC port 4223 * can be created on top of either a physical port or a virtual port provided 4224 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4225 * and associates the FC port created before adding the shost into the SCSI 4226 * layer. 4227 * 4228 * Return codes 4229 * @vport - pointer to the virtual N_Port data structure. 4230 * NULL - port create failed. 4231 **/ 4232 struct lpfc_vport * 4233 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4234 { 4235 struct lpfc_vport *vport; 4236 struct Scsi_Host *shost = NULL; 4237 int error = 0; 4238 int i; 4239 uint64_t wwn; 4240 bool use_no_reset_hba = false; 4241 int rc; 4242 4243 if (lpfc_no_hba_reset_cnt) { 4244 if (phba->sli_rev < LPFC_SLI_REV4 && 4245 dev == &phba->pcidev->dev) { 4246 /* Reset the port first */ 4247 lpfc_sli_brdrestart(phba); 4248 rc = lpfc_sli_chipset_init(phba); 4249 if (rc) 4250 return NULL; 4251 } 4252 wwn = lpfc_get_wwpn(phba); 4253 } 4254 4255 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4256 if (wwn == lpfc_no_hba_reset[i]) { 4257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4258 "6020 Setting use_no_reset port=%llx\n", 4259 wwn); 4260 use_no_reset_hba = true; 4261 break; 4262 } 4263 } 4264 4265 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4266 if (dev != &phba->pcidev->dev) { 4267 shost = scsi_host_alloc(&lpfc_vport_template, 4268 sizeof(struct lpfc_vport)); 4269 } else { 4270 if (!use_no_reset_hba) 4271 shost = scsi_host_alloc(&lpfc_template, 4272 sizeof(struct lpfc_vport)); 4273 else 4274 shost = scsi_host_alloc(&lpfc_template_no_hr, 4275 sizeof(struct lpfc_vport)); 4276 } 4277 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4278 shost = scsi_host_alloc(&lpfc_template_nvme, 4279 sizeof(struct lpfc_vport)); 4280 } 4281 if (!shost) 4282 goto out; 4283 4284 vport = (struct lpfc_vport *) shost->hostdata; 4285 vport->phba = phba; 4286 vport->load_flag |= FC_LOADING; 4287 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4288 vport->fc_rscn_flush = 0; 4289 lpfc_get_vport_cfgparam(vport); 4290 4291 /* Adjust value in vport */ 4292 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4293 4294 shost->unique_id = instance; 4295 shost->max_id = LPFC_MAX_TARGET; 4296 shost->max_lun = vport->cfg_max_luns; 4297 shost->this_id = -1; 4298 shost->max_cmd_len = 16; 4299 4300 if (phba->sli_rev == LPFC_SLI_REV4) { 4301 if (!phba->cfg_fcp_mq_threshold || 4302 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4303 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4304 4305 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4306 phba->cfg_fcp_mq_threshold); 4307 4308 shost->dma_boundary = 4309 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4310 4311 if (phba->cfg_xpsgl && !phba->nvmet_support) 4312 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4313 else 4314 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4315 } else 4316 /* SLI-3 has a limited number of hardware queues (3), 4317 * thus there is only one for FCP processing. 4318 */ 4319 shost->nr_hw_queues = 1; 4320 4321 /* 4322 * Set initial can_queue value since 0 is no longer supported and 4323 * scsi_add_host will fail. This will be adjusted later based on the 4324 * max xri value determined in hba setup. 4325 */ 4326 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4327 if (dev != &phba->pcidev->dev) { 4328 shost->transportt = lpfc_vport_transport_template; 4329 vport->port_type = LPFC_NPIV_PORT; 4330 } else { 4331 shost->transportt = lpfc_transport_template; 4332 vport->port_type = LPFC_PHYSICAL_PORT; 4333 } 4334 4335 /* Initialize all internally managed lists. */ 4336 INIT_LIST_HEAD(&vport->fc_nodes); 4337 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4338 spin_lock_init(&vport->work_port_lock); 4339 4340 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4341 4342 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4343 4344 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4345 4346 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4347 lpfc_setup_bg(phba, shost); 4348 4349 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4350 if (error) 4351 goto out_put_shost; 4352 4353 spin_lock_irq(&phba->port_list_lock); 4354 list_add_tail(&vport->listentry, &phba->port_list); 4355 spin_unlock_irq(&phba->port_list_lock); 4356 return vport; 4357 4358 out_put_shost: 4359 scsi_host_put(shost); 4360 out: 4361 return NULL; 4362 } 4363 4364 /** 4365 * destroy_port - destroy an FC port 4366 * @vport: pointer to an lpfc virtual N_Port data structure. 4367 * 4368 * This routine destroys a FC port from the upper layer protocol. All the 4369 * resources associated with the port are released. 4370 **/ 4371 void 4372 destroy_port(struct lpfc_vport *vport) 4373 { 4374 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4375 struct lpfc_hba *phba = vport->phba; 4376 4377 lpfc_debugfs_terminate(vport); 4378 fc_remove_host(shost); 4379 scsi_remove_host(shost); 4380 4381 spin_lock_irq(&phba->port_list_lock); 4382 list_del_init(&vport->listentry); 4383 spin_unlock_irq(&phba->port_list_lock); 4384 4385 lpfc_cleanup(vport); 4386 return; 4387 } 4388 4389 /** 4390 * lpfc_get_instance - Get a unique integer ID 4391 * 4392 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4393 * uses the kernel idr facility to perform the task. 4394 * 4395 * Return codes: 4396 * instance - a unique integer ID allocated as the new instance. 4397 * -1 - lpfc get instance failed. 4398 **/ 4399 int 4400 lpfc_get_instance(void) 4401 { 4402 int ret; 4403 4404 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4405 return ret < 0 ? -1 : ret; 4406 } 4407 4408 /** 4409 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4410 * @shost: pointer to SCSI host data structure. 4411 * @time: elapsed time of the scan in jiffies. 4412 * 4413 * This routine is called by the SCSI layer with a SCSI host to determine 4414 * whether the scan host is finished. 4415 * 4416 * Note: there is no scan_start function as adapter initialization will have 4417 * asynchronously kicked off the link initialization. 4418 * 4419 * Return codes 4420 * 0 - SCSI host scan is not over yet. 4421 * 1 - SCSI host scan is over. 4422 **/ 4423 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4424 { 4425 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4426 struct lpfc_hba *phba = vport->phba; 4427 int stat = 0; 4428 4429 spin_lock_irq(shost->host_lock); 4430 4431 if (vport->load_flag & FC_UNLOADING) { 4432 stat = 1; 4433 goto finished; 4434 } 4435 if (time >= msecs_to_jiffies(30 * 1000)) { 4436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4437 "0461 Scanning longer than 30 " 4438 "seconds. Continuing initialization\n"); 4439 stat = 1; 4440 goto finished; 4441 } 4442 if (time >= msecs_to_jiffies(15 * 1000) && 4443 phba->link_state <= LPFC_LINK_DOWN) { 4444 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4445 "0465 Link down longer than 15 " 4446 "seconds. Continuing initialization\n"); 4447 stat = 1; 4448 goto finished; 4449 } 4450 4451 if (vport->port_state != LPFC_VPORT_READY) 4452 goto finished; 4453 if (vport->num_disc_nodes || vport->fc_prli_sent) 4454 goto finished; 4455 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4456 goto finished; 4457 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4458 goto finished; 4459 4460 stat = 1; 4461 4462 finished: 4463 spin_unlock_irq(shost->host_lock); 4464 return stat; 4465 } 4466 4467 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4468 { 4469 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4470 struct lpfc_hba *phba = vport->phba; 4471 4472 fc_host_supported_speeds(shost) = 0; 4473 if (phba->lmt & LMT_128Gb) 4474 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4475 if (phba->lmt & LMT_64Gb) 4476 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4477 if (phba->lmt & LMT_32Gb) 4478 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4479 if (phba->lmt & LMT_16Gb) 4480 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4481 if (phba->lmt & LMT_10Gb) 4482 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4483 if (phba->lmt & LMT_8Gb) 4484 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4485 if (phba->lmt & LMT_4Gb) 4486 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4487 if (phba->lmt & LMT_2Gb) 4488 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4489 if (phba->lmt & LMT_1Gb) 4490 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4491 } 4492 4493 /** 4494 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4495 * @shost: pointer to SCSI host data structure. 4496 * 4497 * This routine initializes a given SCSI host attributes on a FC port. The 4498 * SCSI host can be either on top of a physical port or a virtual port. 4499 **/ 4500 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4501 { 4502 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4503 struct lpfc_hba *phba = vport->phba; 4504 /* 4505 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4506 */ 4507 4508 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4509 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4510 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4511 4512 memset(fc_host_supported_fc4s(shost), 0, 4513 sizeof(fc_host_supported_fc4s(shost))); 4514 fc_host_supported_fc4s(shost)[2] = 1; 4515 fc_host_supported_fc4s(shost)[7] = 1; 4516 4517 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4518 sizeof fc_host_symbolic_name(shost)); 4519 4520 lpfc_host_supported_speeds_set(shost); 4521 4522 fc_host_maxframe_size(shost) = 4523 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4524 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4525 4526 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4527 4528 /* This value is also unchanging */ 4529 memset(fc_host_active_fc4s(shost), 0, 4530 sizeof(fc_host_active_fc4s(shost))); 4531 fc_host_active_fc4s(shost)[2] = 1; 4532 fc_host_active_fc4s(shost)[7] = 1; 4533 4534 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4535 spin_lock_irq(shost->host_lock); 4536 vport->load_flag &= ~FC_LOADING; 4537 spin_unlock_irq(shost->host_lock); 4538 } 4539 4540 /** 4541 * lpfc_stop_port_s3 - Stop SLI3 device port 4542 * @phba: pointer to lpfc hba data structure. 4543 * 4544 * This routine is invoked to stop an SLI3 device port, it stops the device 4545 * from generating interrupts and stops the device driver's timers for the 4546 * device. 4547 **/ 4548 static void 4549 lpfc_stop_port_s3(struct lpfc_hba *phba) 4550 { 4551 /* Clear all interrupt enable conditions */ 4552 writel(0, phba->HCregaddr); 4553 readl(phba->HCregaddr); /* flush */ 4554 /* Clear all pending interrupts */ 4555 writel(0xffffffff, phba->HAregaddr); 4556 readl(phba->HAregaddr); /* flush */ 4557 4558 /* Reset some HBA SLI setup states */ 4559 lpfc_stop_hba_timers(phba); 4560 phba->pport->work_port_events = 0; 4561 } 4562 4563 /** 4564 * lpfc_stop_port_s4 - Stop SLI4 device port 4565 * @phba: pointer to lpfc hba data structure. 4566 * 4567 * This routine is invoked to stop an SLI4 device port, it stops the device 4568 * from generating interrupts and stops the device driver's timers for the 4569 * device. 4570 **/ 4571 static void 4572 lpfc_stop_port_s4(struct lpfc_hba *phba) 4573 { 4574 /* Reset some HBA SLI4 setup states */ 4575 lpfc_stop_hba_timers(phba); 4576 if (phba->pport) 4577 phba->pport->work_port_events = 0; 4578 phba->sli4_hba.intr_enable = 0; 4579 } 4580 4581 /** 4582 * lpfc_stop_port - Wrapper function for stopping hba port 4583 * @phba: Pointer to HBA context object. 4584 * 4585 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4586 * the API jump table function pointer from the lpfc_hba struct. 4587 **/ 4588 void 4589 lpfc_stop_port(struct lpfc_hba *phba) 4590 { 4591 phba->lpfc_stop_port(phba); 4592 4593 if (phba->wq) 4594 flush_workqueue(phba->wq); 4595 } 4596 4597 /** 4598 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4599 * @phba: Pointer to hba for which this call is being executed. 4600 * 4601 * This routine starts the timer waiting for the FCF rediscovery to complete. 4602 **/ 4603 void 4604 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4605 { 4606 unsigned long fcf_redisc_wait_tmo = 4607 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4608 /* Start fcf rediscovery wait period timer */ 4609 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4610 spin_lock_irq(&phba->hbalock); 4611 /* Allow action to new fcf asynchronous event */ 4612 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4613 /* Mark the FCF rediscovery pending state */ 4614 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4615 spin_unlock_irq(&phba->hbalock); 4616 } 4617 4618 /** 4619 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4620 * @ptr: Map to lpfc_hba data structure pointer. 4621 * 4622 * This routine is invoked when waiting for FCF table rediscover has been 4623 * timed out. If new FCF record(s) has (have) been discovered during the 4624 * wait period, a new FCF event shall be added to the FCOE async event 4625 * list, and then worker thread shall be waked up for processing from the 4626 * worker thread context. 4627 **/ 4628 static void 4629 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4630 { 4631 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4632 4633 /* Don't send FCF rediscovery event if timer cancelled */ 4634 spin_lock_irq(&phba->hbalock); 4635 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4636 spin_unlock_irq(&phba->hbalock); 4637 return; 4638 } 4639 /* Clear FCF rediscovery timer pending flag */ 4640 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4641 /* FCF rediscovery event to worker thread */ 4642 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4643 spin_unlock_irq(&phba->hbalock); 4644 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4645 "2776 FCF rediscover quiescent timer expired\n"); 4646 /* wake up worker thread */ 4647 lpfc_worker_wake_up(phba); 4648 } 4649 4650 /** 4651 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4652 * @phba: pointer to lpfc hba data structure. 4653 * @acqe_link: pointer to the async link completion queue entry. 4654 * 4655 * This routine is to parse the SLI4 link-attention link fault code. 4656 **/ 4657 static void 4658 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4659 struct lpfc_acqe_link *acqe_link) 4660 { 4661 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4662 case LPFC_ASYNC_LINK_FAULT_NONE: 4663 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4664 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4665 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4666 break; 4667 default: 4668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4669 "0398 Unknown link fault code: x%x\n", 4670 bf_get(lpfc_acqe_link_fault, acqe_link)); 4671 break; 4672 } 4673 } 4674 4675 /** 4676 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4677 * @phba: pointer to lpfc hba data structure. 4678 * @acqe_link: pointer to the async link completion queue entry. 4679 * 4680 * This routine is to parse the SLI4 link attention type and translate it 4681 * into the base driver's link attention type coding. 4682 * 4683 * Return: Link attention type in terms of base driver's coding. 4684 **/ 4685 static uint8_t 4686 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4687 struct lpfc_acqe_link *acqe_link) 4688 { 4689 uint8_t att_type; 4690 4691 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4692 case LPFC_ASYNC_LINK_STATUS_DOWN: 4693 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4694 att_type = LPFC_ATT_LINK_DOWN; 4695 break; 4696 case LPFC_ASYNC_LINK_STATUS_UP: 4697 /* Ignore physical link up events - wait for logical link up */ 4698 att_type = LPFC_ATT_RESERVED; 4699 break; 4700 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4701 att_type = LPFC_ATT_LINK_UP; 4702 break; 4703 default: 4704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4705 "0399 Invalid link attention type: x%x\n", 4706 bf_get(lpfc_acqe_link_status, acqe_link)); 4707 att_type = LPFC_ATT_RESERVED; 4708 break; 4709 } 4710 return att_type; 4711 } 4712 4713 /** 4714 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4715 * @phba: pointer to lpfc hba data structure. 4716 * 4717 * This routine is to get an SLI3 FC port's link speed in Mbps. 4718 * 4719 * Return: link speed in terms of Mbps. 4720 **/ 4721 uint32_t 4722 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4723 { 4724 uint32_t link_speed; 4725 4726 if (!lpfc_is_link_up(phba)) 4727 return 0; 4728 4729 if (phba->sli_rev <= LPFC_SLI_REV3) { 4730 switch (phba->fc_linkspeed) { 4731 case LPFC_LINK_SPEED_1GHZ: 4732 link_speed = 1000; 4733 break; 4734 case LPFC_LINK_SPEED_2GHZ: 4735 link_speed = 2000; 4736 break; 4737 case LPFC_LINK_SPEED_4GHZ: 4738 link_speed = 4000; 4739 break; 4740 case LPFC_LINK_SPEED_8GHZ: 4741 link_speed = 8000; 4742 break; 4743 case LPFC_LINK_SPEED_10GHZ: 4744 link_speed = 10000; 4745 break; 4746 case LPFC_LINK_SPEED_16GHZ: 4747 link_speed = 16000; 4748 break; 4749 default: 4750 link_speed = 0; 4751 } 4752 } else { 4753 if (phba->sli4_hba.link_state.logical_speed) 4754 link_speed = 4755 phba->sli4_hba.link_state.logical_speed; 4756 else 4757 link_speed = phba->sli4_hba.link_state.speed; 4758 } 4759 return link_speed; 4760 } 4761 4762 /** 4763 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4764 * @phba: pointer to lpfc hba data structure. 4765 * @evt_code: asynchronous event code. 4766 * @speed_code: asynchronous event link speed code. 4767 * 4768 * This routine is to parse the giving SLI4 async event link speed code into 4769 * value of Mbps for the link speed. 4770 * 4771 * Return: link speed in terms of Mbps. 4772 **/ 4773 static uint32_t 4774 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4775 uint8_t speed_code) 4776 { 4777 uint32_t port_speed; 4778 4779 switch (evt_code) { 4780 case LPFC_TRAILER_CODE_LINK: 4781 switch (speed_code) { 4782 case LPFC_ASYNC_LINK_SPEED_ZERO: 4783 port_speed = 0; 4784 break; 4785 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4786 port_speed = 10; 4787 break; 4788 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4789 port_speed = 100; 4790 break; 4791 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4792 port_speed = 1000; 4793 break; 4794 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4795 port_speed = 10000; 4796 break; 4797 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4798 port_speed = 20000; 4799 break; 4800 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4801 port_speed = 25000; 4802 break; 4803 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4804 port_speed = 40000; 4805 break; 4806 default: 4807 port_speed = 0; 4808 } 4809 break; 4810 case LPFC_TRAILER_CODE_FC: 4811 switch (speed_code) { 4812 case LPFC_FC_LA_SPEED_UNKNOWN: 4813 port_speed = 0; 4814 break; 4815 case LPFC_FC_LA_SPEED_1G: 4816 port_speed = 1000; 4817 break; 4818 case LPFC_FC_LA_SPEED_2G: 4819 port_speed = 2000; 4820 break; 4821 case LPFC_FC_LA_SPEED_4G: 4822 port_speed = 4000; 4823 break; 4824 case LPFC_FC_LA_SPEED_8G: 4825 port_speed = 8000; 4826 break; 4827 case LPFC_FC_LA_SPEED_10G: 4828 port_speed = 10000; 4829 break; 4830 case LPFC_FC_LA_SPEED_16G: 4831 port_speed = 16000; 4832 break; 4833 case LPFC_FC_LA_SPEED_32G: 4834 port_speed = 32000; 4835 break; 4836 case LPFC_FC_LA_SPEED_64G: 4837 port_speed = 64000; 4838 break; 4839 case LPFC_FC_LA_SPEED_128G: 4840 port_speed = 128000; 4841 break; 4842 default: 4843 port_speed = 0; 4844 } 4845 break; 4846 default: 4847 port_speed = 0; 4848 } 4849 return port_speed; 4850 } 4851 4852 /** 4853 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4854 * @phba: pointer to lpfc hba data structure. 4855 * @acqe_link: pointer to the async link completion queue entry. 4856 * 4857 * This routine is to handle the SLI4 asynchronous FCoE link event. 4858 **/ 4859 static void 4860 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4861 struct lpfc_acqe_link *acqe_link) 4862 { 4863 struct lpfc_dmabuf *mp; 4864 LPFC_MBOXQ_t *pmb; 4865 MAILBOX_t *mb; 4866 struct lpfc_mbx_read_top *la; 4867 uint8_t att_type; 4868 int rc; 4869 4870 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4871 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4872 return; 4873 phba->fcoe_eventtag = acqe_link->event_tag; 4874 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4875 if (!pmb) { 4876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4877 "0395 The mboxq allocation failed\n"); 4878 return; 4879 } 4880 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4881 if (!mp) { 4882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4883 "0396 The lpfc_dmabuf allocation failed\n"); 4884 goto out_free_pmb; 4885 } 4886 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4887 if (!mp->virt) { 4888 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4889 "0397 The mbuf allocation failed\n"); 4890 goto out_free_dmabuf; 4891 } 4892 4893 /* Cleanup any outstanding ELS commands */ 4894 lpfc_els_flush_all_cmd(phba); 4895 4896 /* Block ELS IOCBs until we have done process link event */ 4897 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4898 4899 /* Update link event statistics */ 4900 phba->sli.slistat.link_event++; 4901 4902 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4903 lpfc_read_topology(phba, pmb, mp); 4904 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4905 pmb->vport = phba->pport; 4906 4907 /* Keep the link status for extra SLI4 state machine reference */ 4908 phba->sli4_hba.link_state.speed = 4909 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4910 bf_get(lpfc_acqe_link_speed, acqe_link)); 4911 phba->sli4_hba.link_state.duplex = 4912 bf_get(lpfc_acqe_link_duplex, acqe_link); 4913 phba->sli4_hba.link_state.status = 4914 bf_get(lpfc_acqe_link_status, acqe_link); 4915 phba->sli4_hba.link_state.type = 4916 bf_get(lpfc_acqe_link_type, acqe_link); 4917 phba->sli4_hba.link_state.number = 4918 bf_get(lpfc_acqe_link_number, acqe_link); 4919 phba->sli4_hba.link_state.fault = 4920 bf_get(lpfc_acqe_link_fault, acqe_link); 4921 phba->sli4_hba.link_state.logical_speed = 4922 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4923 4924 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4925 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4926 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4927 "Logical speed:%dMbps Fault:%d\n", 4928 phba->sli4_hba.link_state.speed, 4929 phba->sli4_hba.link_state.topology, 4930 phba->sli4_hba.link_state.status, 4931 phba->sli4_hba.link_state.type, 4932 phba->sli4_hba.link_state.number, 4933 phba->sli4_hba.link_state.logical_speed, 4934 phba->sli4_hba.link_state.fault); 4935 /* 4936 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4937 * topology info. Note: Optional for non FC-AL ports. 4938 */ 4939 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4940 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4941 if (rc == MBX_NOT_FINISHED) 4942 goto out_free_dmabuf; 4943 return; 4944 } 4945 /* 4946 * For FCoE Mode: fill in all the topology information we need and call 4947 * the READ_TOPOLOGY completion routine to continue without actually 4948 * sending the READ_TOPOLOGY mailbox command to the port. 4949 */ 4950 /* Initialize completion status */ 4951 mb = &pmb->u.mb; 4952 mb->mbxStatus = MBX_SUCCESS; 4953 4954 /* Parse port fault information field */ 4955 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4956 4957 /* Parse and translate link attention fields */ 4958 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4959 la->eventTag = acqe_link->event_tag; 4960 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4961 bf_set(lpfc_mbx_read_top_link_spd, la, 4962 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4963 4964 /* Fake the the following irrelvant fields */ 4965 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4966 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4967 bf_set(lpfc_mbx_read_top_il, la, 0); 4968 bf_set(lpfc_mbx_read_top_pb, la, 0); 4969 bf_set(lpfc_mbx_read_top_fa, la, 0); 4970 bf_set(lpfc_mbx_read_top_mm, la, 0); 4971 4972 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4973 lpfc_mbx_cmpl_read_topology(phba, pmb); 4974 4975 return; 4976 4977 out_free_dmabuf: 4978 kfree(mp); 4979 out_free_pmb: 4980 mempool_free(pmb, phba->mbox_mem_pool); 4981 } 4982 4983 /** 4984 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 4985 * topology. 4986 * @phba: pointer to lpfc hba data structure. 4987 * @evt_code: asynchronous event code. 4988 * @speed_code: asynchronous event link speed code. 4989 * 4990 * This routine is to parse the giving SLI4 async event link speed code into 4991 * value of Read topology link speed. 4992 * 4993 * Return: link speed in terms of Read topology. 4994 **/ 4995 static uint8_t 4996 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 4997 { 4998 uint8_t port_speed; 4999 5000 switch (speed_code) { 5001 case LPFC_FC_LA_SPEED_1G: 5002 port_speed = LPFC_LINK_SPEED_1GHZ; 5003 break; 5004 case LPFC_FC_LA_SPEED_2G: 5005 port_speed = LPFC_LINK_SPEED_2GHZ; 5006 break; 5007 case LPFC_FC_LA_SPEED_4G: 5008 port_speed = LPFC_LINK_SPEED_4GHZ; 5009 break; 5010 case LPFC_FC_LA_SPEED_8G: 5011 port_speed = LPFC_LINK_SPEED_8GHZ; 5012 break; 5013 case LPFC_FC_LA_SPEED_16G: 5014 port_speed = LPFC_LINK_SPEED_16GHZ; 5015 break; 5016 case LPFC_FC_LA_SPEED_32G: 5017 port_speed = LPFC_LINK_SPEED_32GHZ; 5018 break; 5019 case LPFC_FC_LA_SPEED_64G: 5020 port_speed = LPFC_LINK_SPEED_64GHZ; 5021 break; 5022 case LPFC_FC_LA_SPEED_128G: 5023 port_speed = LPFC_LINK_SPEED_128GHZ; 5024 break; 5025 case LPFC_FC_LA_SPEED_256G: 5026 port_speed = LPFC_LINK_SPEED_256GHZ; 5027 break; 5028 default: 5029 port_speed = 0; 5030 break; 5031 } 5032 5033 return port_speed; 5034 } 5035 5036 #define trunk_link_status(__idx)\ 5037 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5038 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5039 "Link up" : "Link down") : "NA" 5040 /* Did port __idx reported an error */ 5041 #define trunk_port_fault(__idx)\ 5042 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5043 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5044 5045 static void 5046 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5047 struct lpfc_acqe_fc_la *acqe_fc) 5048 { 5049 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5050 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5051 5052 phba->sli4_hba.link_state.speed = 5053 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5054 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5055 5056 phba->sli4_hba.link_state.logical_speed = 5057 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5058 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5059 phba->fc_linkspeed = 5060 lpfc_async_link_speed_to_read_top( 5061 phba, 5062 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5063 5064 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5065 phba->trunk_link.link0.state = 5066 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5067 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5068 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5069 } 5070 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5071 phba->trunk_link.link1.state = 5072 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5073 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5074 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5075 } 5076 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5077 phba->trunk_link.link2.state = 5078 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5079 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5080 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5081 } 5082 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5083 phba->trunk_link.link3.state = 5084 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5085 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5086 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5087 } 5088 5089 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5090 "2910 Async FC Trunking Event - Speed:%d\n" 5091 "\tLogical speed:%d " 5092 "port0: %s port1: %s port2: %s port3: %s\n", 5093 phba->sli4_hba.link_state.speed, 5094 phba->sli4_hba.link_state.logical_speed, 5095 trunk_link_status(0), trunk_link_status(1), 5096 trunk_link_status(2), trunk_link_status(3)); 5097 5098 if (port_fault) 5099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5100 "3202 trunk error:0x%x (%s) seen on port0:%s " 5101 /* 5102 * SLI-4: We have only 0xA error codes 5103 * defined as of now. print an appropriate 5104 * message in case driver needs to be updated. 5105 */ 5106 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5107 "UNDEFINED. update driver." : trunk_errmsg[err], 5108 trunk_port_fault(0), trunk_port_fault(1), 5109 trunk_port_fault(2), trunk_port_fault(3)); 5110 } 5111 5112 5113 /** 5114 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5115 * @phba: pointer to lpfc hba data structure. 5116 * @acqe_fc: pointer to the async fc completion queue entry. 5117 * 5118 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5119 * that the event was received and then issue a read_topology mailbox command so 5120 * that the rest of the driver will treat it the same as SLI3. 5121 **/ 5122 static void 5123 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5124 { 5125 struct lpfc_dmabuf *mp; 5126 LPFC_MBOXQ_t *pmb; 5127 MAILBOX_t *mb; 5128 struct lpfc_mbx_read_top *la; 5129 int rc; 5130 5131 if (bf_get(lpfc_trailer_type, acqe_fc) != 5132 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5134 "2895 Non FC link Event detected.(%d)\n", 5135 bf_get(lpfc_trailer_type, acqe_fc)); 5136 return; 5137 } 5138 5139 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5140 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5141 lpfc_update_trunk_link_status(phba, acqe_fc); 5142 return; 5143 } 5144 5145 /* Keep the link status for extra SLI4 state machine reference */ 5146 phba->sli4_hba.link_state.speed = 5147 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5148 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5149 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5150 phba->sli4_hba.link_state.topology = 5151 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5152 phba->sli4_hba.link_state.status = 5153 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5154 phba->sli4_hba.link_state.type = 5155 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5156 phba->sli4_hba.link_state.number = 5157 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5158 phba->sli4_hba.link_state.fault = 5159 bf_get(lpfc_acqe_link_fault, acqe_fc); 5160 5161 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5162 LPFC_FC_LA_TYPE_LINK_DOWN) 5163 phba->sli4_hba.link_state.logical_speed = 0; 5164 else if (!phba->sli4_hba.conf_trunk) 5165 phba->sli4_hba.link_state.logical_speed = 5166 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5167 5168 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5169 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5170 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5171 "%dMbps Fault:%d\n", 5172 phba->sli4_hba.link_state.speed, 5173 phba->sli4_hba.link_state.topology, 5174 phba->sli4_hba.link_state.status, 5175 phba->sli4_hba.link_state.type, 5176 phba->sli4_hba.link_state.number, 5177 phba->sli4_hba.link_state.logical_speed, 5178 phba->sli4_hba.link_state.fault); 5179 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5180 if (!pmb) { 5181 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5182 "2897 The mboxq allocation failed\n"); 5183 return; 5184 } 5185 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5186 if (!mp) { 5187 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5188 "2898 The lpfc_dmabuf allocation failed\n"); 5189 goto out_free_pmb; 5190 } 5191 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5192 if (!mp->virt) { 5193 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5194 "2899 The mbuf allocation failed\n"); 5195 goto out_free_dmabuf; 5196 } 5197 5198 /* Cleanup any outstanding ELS commands */ 5199 lpfc_els_flush_all_cmd(phba); 5200 5201 /* Block ELS IOCBs until we have done process link event */ 5202 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5203 5204 /* Update link event statistics */ 5205 phba->sli.slistat.link_event++; 5206 5207 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5208 lpfc_read_topology(phba, pmb, mp); 5209 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5210 pmb->vport = phba->pport; 5211 5212 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5213 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5214 5215 switch (phba->sli4_hba.link_state.status) { 5216 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5217 phba->link_flag |= LS_MDS_LINK_DOWN; 5218 break; 5219 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5220 phba->link_flag |= LS_MDS_LOOPBACK; 5221 break; 5222 default: 5223 break; 5224 } 5225 5226 /* Initialize completion status */ 5227 mb = &pmb->u.mb; 5228 mb->mbxStatus = MBX_SUCCESS; 5229 5230 /* Parse port fault information field */ 5231 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5232 5233 /* Parse and translate link attention fields */ 5234 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5235 la->eventTag = acqe_fc->event_tag; 5236 5237 if (phba->sli4_hba.link_state.status == 5238 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5239 bf_set(lpfc_mbx_read_top_att_type, la, 5240 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5241 } else { 5242 bf_set(lpfc_mbx_read_top_att_type, la, 5243 LPFC_FC_LA_TYPE_LINK_DOWN); 5244 } 5245 /* Invoke the mailbox command callback function */ 5246 lpfc_mbx_cmpl_read_topology(phba, pmb); 5247 5248 return; 5249 } 5250 5251 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5252 if (rc == MBX_NOT_FINISHED) 5253 goto out_free_dmabuf; 5254 return; 5255 5256 out_free_dmabuf: 5257 kfree(mp); 5258 out_free_pmb: 5259 mempool_free(pmb, phba->mbox_mem_pool); 5260 } 5261 5262 /** 5263 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5264 * @phba: pointer to lpfc hba data structure. 5265 * @acqe_fc: pointer to the async SLI completion queue entry. 5266 * 5267 * This routine is to handle the SLI4 asynchronous SLI events. 5268 **/ 5269 static void 5270 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5271 { 5272 char port_name; 5273 char message[128]; 5274 uint8_t status; 5275 uint8_t evt_type; 5276 uint8_t operational = 0; 5277 struct temp_event temp_event_data; 5278 struct lpfc_acqe_misconfigured_event *misconfigured; 5279 struct Scsi_Host *shost; 5280 struct lpfc_vport **vports; 5281 int rc, i; 5282 5283 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5284 5285 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5286 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 5287 "x%08x SLI Event Type:%d\n", 5288 acqe_sli->event_data1, acqe_sli->event_data2, 5289 evt_type); 5290 5291 port_name = phba->Port[0]; 5292 if (port_name == 0x00) 5293 port_name = '?'; /* get port name is empty */ 5294 5295 switch (evt_type) { 5296 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5297 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5298 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5299 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5300 5301 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5302 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5303 acqe_sli->event_data1, port_name); 5304 5305 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5306 shost = lpfc_shost_from_vport(phba->pport); 5307 fc_host_post_vendor_event(shost, fc_get_event_number(), 5308 sizeof(temp_event_data), 5309 (char *)&temp_event_data, 5310 SCSI_NL_VID_TYPE_PCI 5311 | PCI_VENDOR_ID_EMULEX); 5312 break; 5313 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5314 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5315 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5316 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5317 5318 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5319 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5320 acqe_sli->event_data1, port_name); 5321 5322 shost = lpfc_shost_from_vport(phba->pport); 5323 fc_host_post_vendor_event(shost, fc_get_event_number(), 5324 sizeof(temp_event_data), 5325 (char *)&temp_event_data, 5326 SCSI_NL_VID_TYPE_PCI 5327 | PCI_VENDOR_ID_EMULEX); 5328 break; 5329 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5330 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5331 &acqe_sli->event_data1; 5332 5333 /* fetch the status for this port */ 5334 switch (phba->sli4_hba.lnk_info.lnk_no) { 5335 case LPFC_LINK_NUMBER_0: 5336 status = bf_get(lpfc_sli_misconfigured_port0_state, 5337 &misconfigured->theEvent); 5338 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5339 &misconfigured->theEvent); 5340 break; 5341 case LPFC_LINK_NUMBER_1: 5342 status = bf_get(lpfc_sli_misconfigured_port1_state, 5343 &misconfigured->theEvent); 5344 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5345 &misconfigured->theEvent); 5346 break; 5347 case LPFC_LINK_NUMBER_2: 5348 status = bf_get(lpfc_sli_misconfigured_port2_state, 5349 &misconfigured->theEvent); 5350 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5351 &misconfigured->theEvent); 5352 break; 5353 case LPFC_LINK_NUMBER_3: 5354 status = bf_get(lpfc_sli_misconfigured_port3_state, 5355 &misconfigured->theEvent); 5356 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5357 &misconfigured->theEvent); 5358 break; 5359 default: 5360 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5361 "3296 " 5362 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5363 "event: Invalid link %d", 5364 phba->sli4_hba.lnk_info.lnk_no); 5365 return; 5366 } 5367 5368 /* Skip if optic state unchanged */ 5369 if (phba->sli4_hba.lnk_info.optic_state == status) 5370 return; 5371 5372 switch (status) { 5373 case LPFC_SLI_EVENT_STATUS_VALID: 5374 sprintf(message, "Physical Link is functional"); 5375 break; 5376 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5377 sprintf(message, "Optics faulted/incorrectly " 5378 "installed/not installed - Reseat optics, " 5379 "if issue not resolved, replace."); 5380 break; 5381 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5382 sprintf(message, 5383 "Optics of two types installed - Remove one " 5384 "optic or install matching pair of optics."); 5385 break; 5386 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5387 sprintf(message, "Incompatible optics - Replace with " 5388 "compatible optics for card to function."); 5389 break; 5390 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5391 sprintf(message, "Unqualified optics - Replace with " 5392 "Avago optics for Warranty and Technical " 5393 "Support - Link is%s operational", 5394 (operational) ? " not" : ""); 5395 break; 5396 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5397 sprintf(message, "Uncertified optics - Replace with " 5398 "Avago-certified optics to enable link " 5399 "operation - Link is%s operational", 5400 (operational) ? " not" : ""); 5401 break; 5402 default: 5403 /* firmware is reporting a status we don't know about */ 5404 sprintf(message, "Unknown event status x%02x", status); 5405 break; 5406 } 5407 5408 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5409 rc = lpfc_sli4_read_config(phba); 5410 if (rc) { 5411 phba->lmt = 0; 5412 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5413 "3194 Unable to retrieve supported " 5414 "speeds, rc = 0x%x\n", rc); 5415 } 5416 vports = lpfc_create_vport_work_array(phba); 5417 if (vports != NULL) { 5418 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5419 i++) { 5420 shost = lpfc_shost_from_vport(vports[i]); 5421 lpfc_host_supported_speeds_set(shost); 5422 } 5423 } 5424 lpfc_destroy_vport_work_array(phba, vports); 5425 5426 phba->sli4_hba.lnk_info.optic_state = status; 5427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5428 "3176 Port Name %c %s\n", port_name, message); 5429 break; 5430 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5431 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5432 "3192 Remote DPort Test Initiated - " 5433 "Event Data1:x%08x Event Data2: x%08x\n", 5434 acqe_sli->event_data1, acqe_sli->event_data2); 5435 break; 5436 default: 5437 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5438 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 5439 "x%08x SLI Event Type:%d\n", 5440 acqe_sli->event_data1, acqe_sli->event_data2, 5441 evt_type); 5442 break; 5443 } 5444 } 5445 5446 /** 5447 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5448 * @vport: pointer to vport data structure. 5449 * 5450 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5451 * response to a CVL event. 5452 * 5453 * Return the pointer to the ndlp with the vport if successful, otherwise 5454 * return NULL. 5455 **/ 5456 static struct lpfc_nodelist * 5457 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5458 { 5459 struct lpfc_nodelist *ndlp; 5460 struct Scsi_Host *shost; 5461 struct lpfc_hba *phba; 5462 5463 if (!vport) 5464 return NULL; 5465 phba = vport->phba; 5466 if (!phba) 5467 return NULL; 5468 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5469 if (!ndlp) { 5470 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5471 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5472 if (!ndlp) 5473 return 0; 5474 /* Set the node type */ 5475 ndlp->nlp_type |= NLP_FABRIC; 5476 /* Put ndlp onto node list */ 5477 lpfc_enqueue_node(vport, ndlp); 5478 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5479 /* re-setup ndlp without removing from node list */ 5480 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5481 if (!ndlp) 5482 return 0; 5483 } 5484 if ((phba->pport->port_state < LPFC_FLOGI) && 5485 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5486 return NULL; 5487 /* If virtual link is not yet instantiated ignore CVL */ 5488 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5489 && (vport->port_state != LPFC_VPORT_FAILED)) 5490 return NULL; 5491 shost = lpfc_shost_from_vport(vport); 5492 if (!shost) 5493 return NULL; 5494 lpfc_linkdown_port(vport); 5495 lpfc_cleanup_pending_mbox(vport); 5496 spin_lock_irq(shost->host_lock); 5497 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5498 spin_unlock_irq(shost->host_lock); 5499 5500 return ndlp; 5501 } 5502 5503 /** 5504 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5505 * @vport: pointer to lpfc hba data structure. 5506 * 5507 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5508 * response to a FCF dead event. 5509 **/ 5510 static void 5511 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5512 { 5513 struct lpfc_vport **vports; 5514 int i; 5515 5516 vports = lpfc_create_vport_work_array(phba); 5517 if (vports) 5518 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5519 lpfc_sli4_perform_vport_cvl(vports[i]); 5520 lpfc_destroy_vport_work_array(phba, vports); 5521 } 5522 5523 /** 5524 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5525 * @phba: pointer to lpfc hba data structure. 5526 * @acqe_link: pointer to the async fcoe completion queue entry. 5527 * 5528 * This routine is to handle the SLI4 asynchronous fcoe event. 5529 **/ 5530 static void 5531 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5532 struct lpfc_acqe_fip *acqe_fip) 5533 { 5534 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5535 int rc; 5536 struct lpfc_vport *vport; 5537 struct lpfc_nodelist *ndlp; 5538 struct Scsi_Host *shost; 5539 int active_vlink_present; 5540 struct lpfc_vport **vports; 5541 int i; 5542 5543 phba->fc_eventTag = acqe_fip->event_tag; 5544 phba->fcoe_eventtag = acqe_fip->event_tag; 5545 switch (event_type) { 5546 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5547 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5548 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5549 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5550 LOG_DISCOVERY, 5551 "2546 New FCF event, evt_tag:x%x, " 5552 "index:x%x\n", 5553 acqe_fip->event_tag, 5554 acqe_fip->index); 5555 else 5556 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5557 LOG_DISCOVERY, 5558 "2788 FCF param modified event, " 5559 "evt_tag:x%x, index:x%x\n", 5560 acqe_fip->event_tag, 5561 acqe_fip->index); 5562 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5563 /* 5564 * During period of FCF discovery, read the FCF 5565 * table record indexed by the event to update 5566 * FCF roundrobin failover eligible FCF bmask. 5567 */ 5568 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5569 LOG_DISCOVERY, 5570 "2779 Read FCF (x%x) for updating " 5571 "roundrobin FCF failover bmask\n", 5572 acqe_fip->index); 5573 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5574 } 5575 5576 /* If the FCF discovery is in progress, do nothing. */ 5577 spin_lock_irq(&phba->hbalock); 5578 if (phba->hba_flag & FCF_TS_INPROG) { 5579 spin_unlock_irq(&phba->hbalock); 5580 break; 5581 } 5582 /* If fast FCF failover rescan event is pending, do nothing */ 5583 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5584 spin_unlock_irq(&phba->hbalock); 5585 break; 5586 } 5587 5588 /* If the FCF has been in discovered state, do nothing. */ 5589 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5590 spin_unlock_irq(&phba->hbalock); 5591 break; 5592 } 5593 spin_unlock_irq(&phba->hbalock); 5594 5595 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5596 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5597 "2770 Start FCF table scan per async FCF " 5598 "event, evt_tag:x%x, index:x%x\n", 5599 acqe_fip->event_tag, acqe_fip->index); 5600 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5601 LPFC_FCOE_FCF_GET_FIRST); 5602 if (rc) 5603 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5604 "2547 Issue FCF scan read FCF mailbox " 5605 "command failed (x%x)\n", rc); 5606 break; 5607 5608 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5609 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5610 "2548 FCF Table full count 0x%x tag 0x%x\n", 5611 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5612 acqe_fip->event_tag); 5613 break; 5614 5615 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5616 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5617 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5618 "2549 FCF (x%x) disconnected from network, " 5619 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5620 /* 5621 * If we are in the middle of FCF failover process, clear 5622 * the corresponding FCF bit in the roundrobin bitmap. 5623 */ 5624 spin_lock_irq(&phba->hbalock); 5625 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5626 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5627 spin_unlock_irq(&phba->hbalock); 5628 /* Update FLOGI FCF failover eligible FCF bmask */ 5629 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5630 break; 5631 } 5632 spin_unlock_irq(&phba->hbalock); 5633 5634 /* If the event is not for currently used fcf do nothing */ 5635 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5636 break; 5637 5638 /* 5639 * Otherwise, request the port to rediscover the entire FCF 5640 * table for a fast recovery from case that the current FCF 5641 * is no longer valid as we are not in the middle of FCF 5642 * failover process already. 5643 */ 5644 spin_lock_irq(&phba->hbalock); 5645 /* Mark the fast failover process in progress */ 5646 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5647 spin_unlock_irq(&phba->hbalock); 5648 5649 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5650 "2771 Start FCF fast failover process due to " 5651 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5652 "\n", acqe_fip->event_tag, acqe_fip->index); 5653 rc = lpfc_sli4_redisc_fcf_table(phba); 5654 if (rc) { 5655 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5656 LOG_DISCOVERY, 5657 "2772 Issue FCF rediscover mailbox " 5658 "command failed, fail through to FCF " 5659 "dead event\n"); 5660 spin_lock_irq(&phba->hbalock); 5661 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5662 spin_unlock_irq(&phba->hbalock); 5663 /* 5664 * Last resort will fail over by treating this 5665 * as a link down to FCF registration. 5666 */ 5667 lpfc_sli4_fcf_dead_failthrough(phba); 5668 } else { 5669 /* Reset FCF roundrobin bmask for new discovery */ 5670 lpfc_sli4_clear_fcf_rr_bmask(phba); 5671 /* 5672 * Handling fast FCF failover to a DEAD FCF event is 5673 * considered equalivant to receiving CVL to all vports. 5674 */ 5675 lpfc_sli4_perform_all_vport_cvl(phba); 5676 } 5677 break; 5678 case LPFC_FIP_EVENT_TYPE_CVL: 5679 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5680 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5681 "2718 Clear Virtual Link Received for VPI 0x%x" 5682 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5683 5684 vport = lpfc_find_vport_by_vpid(phba, 5685 acqe_fip->index); 5686 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5687 if (!ndlp) 5688 break; 5689 active_vlink_present = 0; 5690 5691 vports = lpfc_create_vport_work_array(phba); 5692 if (vports) { 5693 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5694 i++) { 5695 if ((!(vports[i]->fc_flag & 5696 FC_VPORT_CVL_RCVD)) && 5697 (vports[i]->port_state > LPFC_FDISC)) { 5698 active_vlink_present = 1; 5699 break; 5700 } 5701 } 5702 lpfc_destroy_vport_work_array(phba, vports); 5703 } 5704 5705 /* 5706 * Don't re-instantiate if vport is marked for deletion. 5707 * If we are here first then vport_delete is going to wait 5708 * for discovery to complete. 5709 */ 5710 if (!(vport->load_flag & FC_UNLOADING) && 5711 active_vlink_present) { 5712 /* 5713 * If there are other active VLinks present, 5714 * re-instantiate the Vlink using FDISC. 5715 */ 5716 mod_timer(&ndlp->nlp_delayfunc, 5717 jiffies + msecs_to_jiffies(1000)); 5718 shost = lpfc_shost_from_vport(vport); 5719 spin_lock_irq(shost->host_lock); 5720 ndlp->nlp_flag |= NLP_DELAY_TMO; 5721 spin_unlock_irq(shost->host_lock); 5722 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5723 vport->port_state = LPFC_FDISC; 5724 } else { 5725 /* 5726 * Otherwise, we request port to rediscover 5727 * the entire FCF table for a fast recovery 5728 * from possible case that the current FCF 5729 * is no longer valid if we are not already 5730 * in the FCF failover process. 5731 */ 5732 spin_lock_irq(&phba->hbalock); 5733 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5734 spin_unlock_irq(&phba->hbalock); 5735 break; 5736 } 5737 /* Mark the fast failover process in progress */ 5738 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5739 spin_unlock_irq(&phba->hbalock); 5740 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5741 LOG_DISCOVERY, 5742 "2773 Start FCF failover per CVL, " 5743 "evt_tag:x%x\n", acqe_fip->event_tag); 5744 rc = lpfc_sli4_redisc_fcf_table(phba); 5745 if (rc) { 5746 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5747 LOG_DISCOVERY, 5748 "2774 Issue FCF rediscover " 5749 "mailbox command failed, " 5750 "through to CVL event\n"); 5751 spin_lock_irq(&phba->hbalock); 5752 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5753 spin_unlock_irq(&phba->hbalock); 5754 /* 5755 * Last resort will be re-try on the 5756 * the current registered FCF entry. 5757 */ 5758 lpfc_retry_pport_discovery(phba); 5759 } else 5760 /* 5761 * Reset FCF roundrobin bmask for new 5762 * discovery. 5763 */ 5764 lpfc_sli4_clear_fcf_rr_bmask(phba); 5765 } 5766 break; 5767 default: 5768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5769 "0288 Unknown FCoE event type 0x%x event tag " 5770 "0x%x\n", event_type, acqe_fip->event_tag); 5771 break; 5772 } 5773 } 5774 5775 /** 5776 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5777 * @phba: pointer to lpfc hba data structure. 5778 * @acqe_link: pointer to the async dcbx completion queue entry. 5779 * 5780 * This routine is to handle the SLI4 asynchronous dcbx event. 5781 **/ 5782 static void 5783 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5784 struct lpfc_acqe_dcbx *acqe_dcbx) 5785 { 5786 phba->fc_eventTag = acqe_dcbx->event_tag; 5787 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5788 "0290 The SLI4 DCBX asynchronous event is not " 5789 "handled yet\n"); 5790 } 5791 5792 /** 5793 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5794 * @phba: pointer to lpfc hba data structure. 5795 * @acqe_link: pointer to the async grp5 completion queue entry. 5796 * 5797 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5798 * is an asynchronous notified of a logical link speed change. The Port 5799 * reports the logical link speed in units of 10Mbps. 5800 **/ 5801 static void 5802 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5803 struct lpfc_acqe_grp5 *acqe_grp5) 5804 { 5805 uint16_t prev_ll_spd; 5806 5807 phba->fc_eventTag = acqe_grp5->event_tag; 5808 phba->fcoe_eventtag = acqe_grp5->event_tag; 5809 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5810 phba->sli4_hba.link_state.logical_speed = 5811 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5812 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5813 "2789 GRP5 Async Event: Updating logical link speed " 5814 "from %dMbps to %dMbps\n", prev_ll_spd, 5815 phba->sli4_hba.link_state.logical_speed); 5816 } 5817 5818 /** 5819 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5820 * @phba: pointer to lpfc hba data structure. 5821 * 5822 * This routine is invoked by the worker thread to process all the pending 5823 * SLI4 asynchronous events. 5824 **/ 5825 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5826 { 5827 struct lpfc_cq_event *cq_event; 5828 5829 /* First, declare the async event has been handled */ 5830 spin_lock_irq(&phba->hbalock); 5831 phba->hba_flag &= ~ASYNC_EVENT; 5832 spin_unlock_irq(&phba->hbalock); 5833 /* Now, handle all the async events */ 5834 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5835 /* Get the first event from the head of the event queue */ 5836 spin_lock_irq(&phba->hbalock); 5837 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5838 cq_event, struct lpfc_cq_event, list); 5839 spin_unlock_irq(&phba->hbalock); 5840 /* Process the asynchronous event */ 5841 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5842 case LPFC_TRAILER_CODE_LINK: 5843 lpfc_sli4_async_link_evt(phba, 5844 &cq_event->cqe.acqe_link); 5845 break; 5846 case LPFC_TRAILER_CODE_FCOE: 5847 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5848 break; 5849 case LPFC_TRAILER_CODE_DCBX: 5850 lpfc_sli4_async_dcbx_evt(phba, 5851 &cq_event->cqe.acqe_dcbx); 5852 break; 5853 case LPFC_TRAILER_CODE_GRP5: 5854 lpfc_sli4_async_grp5_evt(phba, 5855 &cq_event->cqe.acqe_grp5); 5856 break; 5857 case LPFC_TRAILER_CODE_FC: 5858 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5859 break; 5860 case LPFC_TRAILER_CODE_SLI: 5861 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5862 break; 5863 default: 5864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5865 "1804 Invalid asynchrous event code: " 5866 "x%x\n", bf_get(lpfc_trailer_code, 5867 &cq_event->cqe.mcqe_cmpl)); 5868 break; 5869 } 5870 /* Free the completion event processed to the free pool */ 5871 lpfc_sli4_cq_event_release(phba, cq_event); 5872 } 5873 } 5874 5875 /** 5876 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5877 * @phba: pointer to lpfc hba data structure. 5878 * 5879 * This routine is invoked by the worker thread to process FCF table 5880 * rediscovery pending completion event. 5881 **/ 5882 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5883 { 5884 int rc; 5885 5886 spin_lock_irq(&phba->hbalock); 5887 /* Clear FCF rediscovery timeout event */ 5888 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5889 /* Clear driver fast failover FCF record flag */ 5890 phba->fcf.failover_rec.flag = 0; 5891 /* Set state for FCF fast failover */ 5892 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5893 spin_unlock_irq(&phba->hbalock); 5894 5895 /* Scan FCF table from the first entry to re-discover SAN */ 5896 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5897 "2777 Start post-quiescent FCF table scan\n"); 5898 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5899 if (rc) 5900 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5901 "2747 Issue FCF scan read FCF mailbox " 5902 "command failed 0x%x\n", rc); 5903 } 5904 5905 /** 5906 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5907 * @phba: pointer to lpfc hba data structure. 5908 * @dev_grp: The HBA PCI-Device group number. 5909 * 5910 * This routine is invoked to set up the per HBA PCI-Device group function 5911 * API jump table entries. 5912 * 5913 * Return: 0 if success, otherwise -ENODEV 5914 **/ 5915 int 5916 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5917 { 5918 int rc; 5919 5920 /* Set up lpfc PCI-device group */ 5921 phba->pci_dev_grp = dev_grp; 5922 5923 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5924 if (dev_grp == LPFC_PCI_DEV_OC) 5925 phba->sli_rev = LPFC_SLI_REV4; 5926 5927 /* Set up device INIT API function jump table */ 5928 rc = lpfc_init_api_table_setup(phba, dev_grp); 5929 if (rc) 5930 return -ENODEV; 5931 /* Set up SCSI API function jump table */ 5932 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5933 if (rc) 5934 return -ENODEV; 5935 /* Set up SLI API function jump table */ 5936 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5937 if (rc) 5938 return -ENODEV; 5939 /* Set up MBOX API function jump table */ 5940 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5941 if (rc) 5942 return -ENODEV; 5943 5944 return 0; 5945 } 5946 5947 /** 5948 * lpfc_log_intr_mode - Log the active interrupt mode 5949 * @phba: pointer to lpfc hba data structure. 5950 * @intr_mode: active interrupt mode adopted. 5951 * 5952 * This routine it invoked to log the currently used active interrupt mode 5953 * to the device. 5954 **/ 5955 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5956 { 5957 switch (intr_mode) { 5958 case 0: 5959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5960 "0470 Enable INTx interrupt mode.\n"); 5961 break; 5962 case 1: 5963 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5964 "0481 Enabled MSI interrupt mode.\n"); 5965 break; 5966 case 2: 5967 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5968 "0480 Enabled MSI-X interrupt mode.\n"); 5969 break; 5970 default: 5971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5972 "0482 Illegal interrupt mode.\n"); 5973 break; 5974 } 5975 return; 5976 } 5977 5978 /** 5979 * lpfc_enable_pci_dev - Enable a generic PCI device. 5980 * @phba: pointer to lpfc hba data structure. 5981 * 5982 * This routine is invoked to enable the PCI device that is common to all 5983 * PCI devices. 5984 * 5985 * Return codes 5986 * 0 - successful 5987 * other values - error 5988 **/ 5989 static int 5990 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5991 { 5992 struct pci_dev *pdev; 5993 5994 /* Obtain PCI device reference */ 5995 if (!phba->pcidev) 5996 goto out_error; 5997 else 5998 pdev = phba->pcidev; 5999 /* Enable PCI device */ 6000 if (pci_enable_device_mem(pdev)) 6001 goto out_error; 6002 /* Request PCI resource for the device */ 6003 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6004 goto out_disable_device; 6005 /* Set up device as PCI master and save state for EEH */ 6006 pci_set_master(pdev); 6007 pci_try_set_mwi(pdev); 6008 pci_save_state(pdev); 6009 6010 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6011 if (pci_is_pcie(pdev)) 6012 pdev->needs_freset = 1; 6013 6014 return 0; 6015 6016 out_disable_device: 6017 pci_disable_device(pdev); 6018 out_error: 6019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6020 "1401 Failed to enable pci device\n"); 6021 return -ENODEV; 6022 } 6023 6024 /** 6025 * lpfc_disable_pci_dev - Disable a generic PCI device. 6026 * @phba: pointer to lpfc hba data structure. 6027 * 6028 * This routine is invoked to disable the PCI device that is common to all 6029 * PCI devices. 6030 **/ 6031 static void 6032 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6033 { 6034 struct pci_dev *pdev; 6035 6036 /* Obtain PCI device reference */ 6037 if (!phba->pcidev) 6038 return; 6039 else 6040 pdev = phba->pcidev; 6041 /* Release PCI resource and disable PCI device */ 6042 pci_release_mem_regions(pdev); 6043 pci_disable_device(pdev); 6044 6045 return; 6046 } 6047 6048 /** 6049 * lpfc_reset_hba - Reset a hba 6050 * @phba: pointer to lpfc hba data structure. 6051 * 6052 * This routine is invoked to reset a hba device. It brings the HBA 6053 * offline, performs a board restart, and then brings the board back 6054 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6055 * on outstanding mailbox commands. 6056 **/ 6057 void 6058 lpfc_reset_hba(struct lpfc_hba *phba) 6059 { 6060 /* If resets are disabled then set error state and return. */ 6061 if (!phba->cfg_enable_hba_reset) { 6062 phba->link_state = LPFC_HBA_ERROR; 6063 return; 6064 } 6065 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6066 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6067 else 6068 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6069 lpfc_offline(phba); 6070 lpfc_sli_brdrestart(phba); 6071 lpfc_online(phba); 6072 lpfc_unblock_mgmt_io(phba); 6073 } 6074 6075 /** 6076 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6077 * @phba: pointer to lpfc hba data structure. 6078 * 6079 * This function enables the PCI SR-IOV virtual functions to a physical 6080 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6081 * enable the number of virtual functions to the physical function. As 6082 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6083 * API call does not considered as an error condition for most of the device. 6084 **/ 6085 uint16_t 6086 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6087 { 6088 struct pci_dev *pdev = phba->pcidev; 6089 uint16_t nr_virtfn; 6090 int pos; 6091 6092 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6093 if (pos == 0) 6094 return 0; 6095 6096 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6097 return nr_virtfn; 6098 } 6099 6100 /** 6101 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6102 * @phba: pointer to lpfc hba data structure. 6103 * @nr_vfn: number of virtual functions to be enabled. 6104 * 6105 * This function enables the PCI SR-IOV virtual functions to a physical 6106 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6107 * enable the number of virtual functions to the physical function. As 6108 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6109 * API call does not considered as an error condition for most of the device. 6110 **/ 6111 int 6112 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6113 { 6114 struct pci_dev *pdev = phba->pcidev; 6115 uint16_t max_nr_vfn; 6116 int rc; 6117 6118 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6119 if (nr_vfn > max_nr_vfn) { 6120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6121 "3057 Requested vfs (%d) greater than " 6122 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6123 return -EINVAL; 6124 } 6125 6126 rc = pci_enable_sriov(pdev, nr_vfn); 6127 if (rc) { 6128 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6129 "2806 Failed to enable sriov on this device " 6130 "with vfn number nr_vf:%d, rc:%d\n", 6131 nr_vfn, rc); 6132 } else 6133 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6134 "2807 Successful enable sriov on this device " 6135 "with vfn number nr_vf:%d\n", nr_vfn); 6136 return rc; 6137 } 6138 6139 /** 6140 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6141 * @phba: pointer to lpfc hba data structure. 6142 * 6143 * This routine is invoked to set up the driver internal resources before the 6144 * device specific resource setup to support the HBA device it attached to. 6145 * 6146 * Return codes 6147 * 0 - successful 6148 * other values - error 6149 **/ 6150 static int 6151 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6152 { 6153 struct lpfc_sli *psli = &phba->sli; 6154 6155 /* 6156 * Driver resources common to all SLI revisions 6157 */ 6158 atomic_set(&phba->fast_event_count, 0); 6159 spin_lock_init(&phba->hbalock); 6160 6161 /* Initialize ndlp management spinlock */ 6162 spin_lock_init(&phba->ndlp_lock); 6163 6164 /* Initialize port_list spinlock */ 6165 spin_lock_init(&phba->port_list_lock); 6166 INIT_LIST_HEAD(&phba->port_list); 6167 6168 INIT_LIST_HEAD(&phba->work_list); 6169 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6170 6171 /* Initialize the wait queue head for the kernel thread */ 6172 init_waitqueue_head(&phba->work_waitq); 6173 6174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6175 "1403 Protocols supported %s %s %s\n", 6176 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6177 "SCSI" : " "), 6178 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6179 "NVME" : " "), 6180 (phba->nvmet_support ? "NVMET" : " ")); 6181 6182 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6183 spin_lock_init(&phba->scsi_buf_list_get_lock); 6184 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6185 spin_lock_init(&phba->scsi_buf_list_put_lock); 6186 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6187 6188 /* Initialize the fabric iocb list */ 6189 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6190 6191 /* Initialize list to save ELS buffers */ 6192 INIT_LIST_HEAD(&phba->elsbuf); 6193 6194 /* Initialize FCF connection rec list */ 6195 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6196 6197 /* Initialize OAS configuration list */ 6198 spin_lock_init(&phba->devicelock); 6199 INIT_LIST_HEAD(&phba->luns); 6200 6201 /* MBOX heartbeat timer */ 6202 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6203 /* Fabric block timer */ 6204 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6205 /* EA polling mode timer */ 6206 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6207 /* Heartbeat timer */ 6208 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6209 6210 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6211 6212 return 0; 6213 } 6214 6215 /** 6216 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6217 * @phba: pointer to lpfc hba data structure. 6218 * 6219 * This routine is invoked to set up the driver internal resources specific to 6220 * support the SLI-3 HBA device it attached to. 6221 * 6222 * Return codes 6223 * 0 - successful 6224 * other values - error 6225 **/ 6226 static int 6227 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6228 { 6229 int rc, entry_sz; 6230 6231 /* 6232 * Initialize timers used by driver 6233 */ 6234 6235 /* FCP polling mode timer */ 6236 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6237 6238 /* Host attention work mask setup */ 6239 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6240 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6241 6242 /* Get all the module params for configuring this host */ 6243 lpfc_get_cfgparam(phba); 6244 /* Set up phase-1 common device driver resources */ 6245 6246 rc = lpfc_setup_driver_resource_phase1(phba); 6247 if (rc) 6248 return -ENODEV; 6249 6250 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6251 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6252 /* check for menlo minimum sg count */ 6253 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6254 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6255 } 6256 6257 if (!phba->sli.sli3_ring) 6258 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6259 sizeof(struct lpfc_sli_ring), 6260 GFP_KERNEL); 6261 if (!phba->sli.sli3_ring) 6262 return -ENOMEM; 6263 6264 /* 6265 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6266 * used to create the sg_dma_buf_pool must be dynamically calculated. 6267 */ 6268 6269 /* Initialize the host templates the configured values. */ 6270 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6271 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6272 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6273 6274 if (phba->sli_rev == LPFC_SLI_REV4) 6275 entry_sz = sizeof(struct sli4_sge); 6276 else 6277 entry_sz = sizeof(struct ulp_bde64); 6278 6279 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6280 if (phba->cfg_enable_bg) { 6281 /* 6282 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6283 * the FCP rsp, and a BDE for each. Sice we have no control 6284 * over how many protection data segments the SCSI Layer 6285 * will hand us (ie: there could be one for every block 6286 * in the IO), we just allocate enough BDEs to accomidate 6287 * our max amount and we need to limit lpfc_sg_seg_cnt to 6288 * minimize the risk of running out. 6289 */ 6290 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6291 sizeof(struct fcp_rsp) + 6292 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6293 6294 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6295 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6296 6297 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6298 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6299 } else { 6300 /* 6301 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6302 * the FCP rsp, a BDE for each, and a BDE for up to 6303 * cfg_sg_seg_cnt data segments. 6304 */ 6305 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6306 sizeof(struct fcp_rsp) + 6307 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6308 6309 /* Total BDEs in BPL for scsi_sg_list */ 6310 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6311 } 6312 6313 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6314 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6315 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6316 phba->cfg_total_seg_cnt); 6317 6318 phba->max_vpi = LPFC_MAX_VPI; 6319 /* This will be set to correct value after config_port mbox */ 6320 phba->max_vports = 0; 6321 6322 /* 6323 * Initialize the SLI Layer to run with lpfc HBAs. 6324 */ 6325 lpfc_sli_setup(phba); 6326 lpfc_sli_queue_init(phba); 6327 6328 /* Allocate device driver memory */ 6329 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6330 return -ENOMEM; 6331 6332 phba->lpfc_sg_dma_buf_pool = 6333 dma_pool_create("lpfc_sg_dma_buf_pool", 6334 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 6335 BPL_ALIGN_SZ, 0); 6336 6337 if (!phba->lpfc_sg_dma_buf_pool) 6338 goto fail_free_mem; 6339 6340 phba->lpfc_cmd_rsp_buf_pool = 6341 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6342 &phba->pcidev->dev, 6343 sizeof(struct fcp_cmnd) + 6344 sizeof(struct fcp_rsp), 6345 BPL_ALIGN_SZ, 0); 6346 6347 if (!phba->lpfc_cmd_rsp_buf_pool) 6348 goto fail_free_dma_buf_pool; 6349 6350 /* 6351 * Enable sr-iov virtual functions if supported and configured 6352 * through the module parameter. 6353 */ 6354 if (phba->cfg_sriov_nr_virtfn > 0) { 6355 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6356 phba->cfg_sriov_nr_virtfn); 6357 if (rc) { 6358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6359 "2808 Requested number of SR-IOV " 6360 "virtual functions (%d) is not " 6361 "supported\n", 6362 phba->cfg_sriov_nr_virtfn); 6363 phba->cfg_sriov_nr_virtfn = 0; 6364 } 6365 } 6366 6367 return 0; 6368 6369 fail_free_dma_buf_pool: 6370 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6371 phba->lpfc_sg_dma_buf_pool = NULL; 6372 fail_free_mem: 6373 lpfc_mem_free(phba); 6374 return -ENOMEM; 6375 } 6376 6377 /** 6378 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6379 * @phba: pointer to lpfc hba data structure. 6380 * 6381 * This routine is invoked to unset the driver internal resources set up 6382 * specific for supporting the SLI-3 HBA device it attached to. 6383 **/ 6384 static void 6385 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6386 { 6387 /* Free device driver memory allocated */ 6388 lpfc_mem_free_all(phba); 6389 6390 return; 6391 } 6392 6393 /** 6394 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6395 * @phba: pointer to lpfc hba data structure. 6396 * 6397 * This routine is invoked to set up the driver internal resources specific to 6398 * support the SLI-4 HBA device it attached to. 6399 * 6400 * Return codes 6401 * 0 - successful 6402 * other values - error 6403 **/ 6404 static int 6405 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6406 { 6407 LPFC_MBOXQ_t *mboxq; 6408 MAILBOX_t *mb; 6409 int rc, i, max_buf_size; 6410 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6411 struct lpfc_mqe *mqe; 6412 int longs; 6413 int extra; 6414 uint64_t wwn; 6415 u32 if_type; 6416 u32 if_fam; 6417 6418 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6419 phba->sli4_hba.num_possible_cpu = num_possible_cpus(); 6420 phba->sli4_hba.curr_disp_cpu = 0; 6421 6422 /* Get all the module params for configuring this host */ 6423 lpfc_get_cfgparam(phba); 6424 6425 /* Set up phase-1 common device driver resources */ 6426 rc = lpfc_setup_driver_resource_phase1(phba); 6427 if (rc) 6428 return -ENODEV; 6429 6430 /* Before proceed, wait for POST done and device ready */ 6431 rc = lpfc_sli4_post_status_check(phba); 6432 if (rc) 6433 return -ENODEV; 6434 6435 /* Allocate all driver workqueues here */ 6436 6437 /* The lpfc_wq workqueue for deferred irq use */ 6438 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6439 6440 /* 6441 * Initialize timers used by driver 6442 */ 6443 6444 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6445 6446 /* FCF rediscover timer */ 6447 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6448 6449 /* 6450 * Control structure for handling external multi-buffer mailbox 6451 * command pass-through. 6452 */ 6453 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6454 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6455 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6456 6457 phba->max_vpi = LPFC_MAX_VPI; 6458 6459 /* This will be set to correct value after the read_config mbox */ 6460 phba->max_vports = 0; 6461 6462 /* Program the default value of vlan_id and fc_map */ 6463 phba->valid_vlan = 0; 6464 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6465 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6466 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6467 6468 /* 6469 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6470 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6471 * The WQ create will allocate the ring. 6472 */ 6473 6474 /* Initialize buffer queue management fields */ 6475 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6476 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6477 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6478 6479 /* 6480 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6481 */ 6482 /* Initialize the Abort buffer list used by driver */ 6483 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 6484 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 6485 6486 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6487 /* Initialize the Abort nvme buffer list used by driver */ 6488 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6489 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6490 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6491 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6492 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6493 } 6494 6495 /* This abort list used by worker thread */ 6496 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6497 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6498 6499 /* 6500 * Initialize driver internal slow-path work queues 6501 */ 6502 6503 /* Driver internel slow-path CQ Event pool */ 6504 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6505 /* Response IOCB work queue list */ 6506 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6507 /* Asynchronous event CQ Event work queue list */ 6508 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6509 /* Fast-path XRI aborted CQ Event work queue list */ 6510 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6511 /* Slow-path XRI aborted CQ Event work queue list */ 6512 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6513 /* Receive queue CQ Event work queue list */ 6514 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6515 6516 /* Initialize extent block lists. */ 6517 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6518 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6519 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6520 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6521 6522 /* Initialize mboxq lists. If the early init routines fail 6523 * these lists need to be correctly initialized. 6524 */ 6525 INIT_LIST_HEAD(&phba->sli.mboxq); 6526 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6527 6528 /* initialize optic_state to 0xFF */ 6529 phba->sli4_hba.lnk_info.optic_state = 0xff; 6530 6531 /* Allocate device driver memory */ 6532 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6533 if (rc) 6534 return -ENOMEM; 6535 6536 /* IF Type 2 ports get initialized now. */ 6537 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6538 LPFC_SLI_INTF_IF_TYPE_2) { 6539 rc = lpfc_pci_function_reset(phba); 6540 if (unlikely(rc)) { 6541 rc = -ENODEV; 6542 goto out_free_mem; 6543 } 6544 phba->temp_sensor_support = 1; 6545 } 6546 6547 /* Create the bootstrap mailbox command */ 6548 rc = lpfc_create_bootstrap_mbox(phba); 6549 if (unlikely(rc)) 6550 goto out_free_mem; 6551 6552 /* Set up the host's endian order with the device. */ 6553 rc = lpfc_setup_endian_order(phba); 6554 if (unlikely(rc)) 6555 goto out_free_bsmbx; 6556 6557 /* Set up the hba's configuration parameters. */ 6558 rc = lpfc_sli4_read_config(phba); 6559 if (unlikely(rc)) 6560 goto out_free_bsmbx; 6561 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6562 if (unlikely(rc)) 6563 goto out_free_bsmbx; 6564 6565 /* IF Type 0 ports get initialized now. */ 6566 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6567 LPFC_SLI_INTF_IF_TYPE_0) { 6568 rc = lpfc_pci_function_reset(phba); 6569 if (unlikely(rc)) 6570 goto out_free_bsmbx; 6571 } 6572 6573 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6574 GFP_KERNEL); 6575 if (!mboxq) { 6576 rc = -ENOMEM; 6577 goto out_free_bsmbx; 6578 } 6579 6580 /* Check for NVMET being configured */ 6581 phba->nvmet_support = 0; 6582 if (lpfc_enable_nvmet_cnt) { 6583 6584 /* First get WWN of HBA instance */ 6585 lpfc_read_nv(phba, mboxq); 6586 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6587 if (rc != MBX_SUCCESS) { 6588 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6589 "6016 Mailbox failed , mbxCmd x%x " 6590 "READ_NV, mbxStatus x%x\n", 6591 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6592 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6593 mempool_free(mboxq, phba->mbox_mem_pool); 6594 rc = -EIO; 6595 goto out_free_bsmbx; 6596 } 6597 mb = &mboxq->u.mb; 6598 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6599 sizeof(uint64_t)); 6600 wwn = cpu_to_be64(wwn); 6601 phba->sli4_hba.wwnn.u.name = wwn; 6602 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6603 sizeof(uint64_t)); 6604 /* wwn is WWPN of HBA instance */ 6605 wwn = cpu_to_be64(wwn); 6606 phba->sli4_hba.wwpn.u.name = wwn; 6607 6608 /* Check to see if it matches any module parameter */ 6609 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6610 if (wwn == lpfc_enable_nvmet[i]) { 6611 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6612 if (lpfc_nvmet_mem_alloc(phba)) 6613 break; 6614 6615 phba->nvmet_support = 1; /* a match */ 6616 6617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6618 "6017 NVME Target %016llx\n", 6619 wwn); 6620 #else 6621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6622 "6021 Can't enable NVME Target." 6623 " NVME_TARGET_FC infrastructure" 6624 " is not in kernel\n"); 6625 #endif 6626 /* Not supported for NVMET */ 6627 phba->cfg_xri_rebalancing = 0; 6628 break; 6629 } 6630 } 6631 } 6632 6633 lpfc_nvme_mod_param_dep(phba); 6634 6635 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6636 lpfc_supported_pages(mboxq); 6637 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6638 if (!rc) { 6639 mqe = &mboxq->u.mqe; 6640 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6641 LPFC_MAX_SUPPORTED_PAGES); 6642 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6643 switch (pn_page[i]) { 6644 case LPFC_SLI4_PARAMETERS: 6645 phba->sli4_hba.pc_sli4_params.supported = 1; 6646 break; 6647 default: 6648 break; 6649 } 6650 } 6651 /* Read the port's SLI4 Parameters capabilities if supported. */ 6652 if (phba->sli4_hba.pc_sli4_params.supported) 6653 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6654 if (rc) { 6655 mempool_free(mboxq, phba->mbox_mem_pool); 6656 rc = -EIO; 6657 goto out_free_bsmbx; 6658 } 6659 } 6660 6661 /* 6662 * Get sli4 parameters that override parameters from Port capabilities. 6663 * If this call fails, it isn't critical unless the SLI4 parameters come 6664 * back in conflict. 6665 */ 6666 rc = lpfc_get_sli4_parameters(phba, mboxq); 6667 if (rc) { 6668 if_type = bf_get(lpfc_sli_intf_if_type, 6669 &phba->sli4_hba.sli_intf); 6670 if_fam = bf_get(lpfc_sli_intf_sli_family, 6671 &phba->sli4_hba.sli_intf); 6672 if (phba->sli4_hba.extents_in_use && 6673 phba->sli4_hba.rpi_hdrs_in_use) { 6674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6675 "2999 Unsupported SLI4 Parameters " 6676 "Extents and RPI headers enabled.\n"); 6677 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6678 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6679 mempool_free(mboxq, phba->mbox_mem_pool); 6680 rc = -EIO; 6681 goto out_free_bsmbx; 6682 } 6683 } 6684 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6685 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6686 mempool_free(mboxq, phba->mbox_mem_pool); 6687 rc = -EIO; 6688 goto out_free_bsmbx; 6689 } 6690 } 6691 6692 /* 6693 * 1 for cmd, 1 for rsp, NVME adds an extra one 6694 * for boundary conditions in its max_sgl_segment template. 6695 */ 6696 extra = 2; 6697 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6698 extra++; 6699 6700 /* 6701 * It doesn't matter what family our adapter is in, we are 6702 * limited to 2 Pages, 512 SGEs, for our SGL. 6703 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6704 */ 6705 max_buf_size = (2 * SLI4_PAGE_SIZE); 6706 6707 /* 6708 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6709 * used to create the sg_dma_buf_pool must be calculated. 6710 */ 6711 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6712 /* Both cfg_enable_bg and cfg_external_dif code paths */ 6713 6714 /* 6715 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6716 * the FCP rsp, and a SGE. Sice we have no control 6717 * over how many protection segments the SCSI Layer 6718 * will hand us (ie: there could be one for every block 6719 * in the IO), just allocate enough SGEs to accomidate 6720 * our max amount and we need to limit lpfc_sg_seg_cnt 6721 * to minimize the risk of running out. 6722 */ 6723 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6724 sizeof(struct fcp_rsp) + max_buf_size; 6725 6726 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6727 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6728 6729 /* 6730 * If supporting DIF, reduce the seg count for scsi to 6731 * allow room for the DIF sges. 6732 */ 6733 if (phba->cfg_enable_bg && 6734 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6735 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6736 else 6737 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6738 6739 } else { 6740 /* 6741 * The scsi_buf for a regular I/O holds the FCP cmnd, 6742 * the FCP rsp, a SGE for each, and a SGE for up to 6743 * cfg_sg_seg_cnt data segments. 6744 */ 6745 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6746 sizeof(struct fcp_rsp) + 6747 ((phba->cfg_sg_seg_cnt + extra) * 6748 sizeof(struct sli4_sge)); 6749 6750 /* Total SGEs for scsi_sg_list */ 6751 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6752 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6753 6754 /* 6755 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6756 * need to post 1 page for the SGL. 6757 */ 6758 } 6759 6760 if (phba->cfg_xpsgl && !phba->nvmet_support) 6761 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 6762 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6763 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6764 else 6765 phba->cfg_sg_dma_buf_size = 6766 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6767 6768 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 6769 sizeof(struct sli4_sge); 6770 6771 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6772 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6773 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6774 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6775 "6300 Reducing NVME sg segment " 6776 "cnt to %d\n", 6777 LPFC_MAX_NVME_SEG_CNT); 6778 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6779 } else 6780 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6781 } 6782 6783 /* Initialize the host templates with the updated values. */ 6784 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6785 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6786 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6787 6788 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6789 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6790 "total:%d scsi:%d nvme:%d\n", 6791 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6792 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6793 phba->cfg_nvme_seg_cnt); 6794 6795 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 6796 i = phba->cfg_sg_dma_buf_size; 6797 else 6798 i = SLI4_PAGE_SIZE; 6799 6800 phba->lpfc_sg_dma_buf_pool = 6801 dma_pool_create("lpfc_sg_dma_buf_pool", 6802 &phba->pcidev->dev, 6803 phba->cfg_sg_dma_buf_size, 6804 i, 0); 6805 if (!phba->lpfc_sg_dma_buf_pool) 6806 goto out_free_bsmbx; 6807 6808 phba->lpfc_cmd_rsp_buf_pool = 6809 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6810 &phba->pcidev->dev, 6811 sizeof(struct fcp_cmnd) + 6812 sizeof(struct fcp_rsp), 6813 i, 0); 6814 if (!phba->lpfc_cmd_rsp_buf_pool) 6815 goto out_free_sg_dma_buf; 6816 6817 mempool_free(mboxq, phba->mbox_mem_pool); 6818 6819 /* Verify OAS is supported */ 6820 lpfc_sli4_oas_verify(phba); 6821 6822 /* Verify RAS support on adapter */ 6823 lpfc_sli4_ras_init(phba); 6824 6825 /* Verify all the SLI4 queues */ 6826 rc = lpfc_sli4_queue_verify(phba); 6827 if (rc) 6828 goto out_free_cmd_rsp_buf; 6829 6830 /* Create driver internal CQE event pool */ 6831 rc = lpfc_sli4_cq_event_pool_create(phba); 6832 if (rc) 6833 goto out_free_cmd_rsp_buf; 6834 6835 /* Initialize sgl lists per host */ 6836 lpfc_init_sgl_list(phba); 6837 6838 /* Allocate and initialize active sgl array */ 6839 rc = lpfc_init_active_sgl_array(phba); 6840 if (rc) { 6841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6842 "1430 Failed to initialize sgl list.\n"); 6843 goto out_destroy_cq_event_pool; 6844 } 6845 rc = lpfc_sli4_init_rpi_hdrs(phba); 6846 if (rc) { 6847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6848 "1432 Failed to initialize rpi headers.\n"); 6849 goto out_free_active_sgl; 6850 } 6851 6852 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6853 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6854 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6855 GFP_KERNEL); 6856 if (!phba->fcf.fcf_rr_bmask) { 6857 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6858 "2759 Failed allocate memory for FCF round " 6859 "robin failover bmask\n"); 6860 rc = -ENOMEM; 6861 goto out_remove_rpi_hdrs; 6862 } 6863 6864 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6865 sizeof(struct lpfc_hba_eq_hdl), 6866 GFP_KERNEL); 6867 if (!phba->sli4_hba.hba_eq_hdl) { 6868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6869 "2572 Failed allocate memory for " 6870 "fast-path per-EQ handle array\n"); 6871 rc = -ENOMEM; 6872 goto out_free_fcf_rr_bmask; 6873 } 6874 6875 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6876 sizeof(struct lpfc_vector_map_info), 6877 GFP_KERNEL); 6878 if (!phba->sli4_hba.cpu_map) { 6879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6880 "3327 Failed allocate memory for msi-x " 6881 "interrupt vector mapping\n"); 6882 rc = -ENOMEM; 6883 goto out_free_hba_eq_hdl; 6884 } 6885 6886 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6887 if (!phba->sli4_hba.eq_info) { 6888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6889 "3321 Failed allocation for per_cpu stats\n"); 6890 rc = -ENOMEM; 6891 goto out_free_hba_cpu_map; 6892 } 6893 /* 6894 * Enable sr-iov virtual functions if supported and configured 6895 * through the module parameter. 6896 */ 6897 if (phba->cfg_sriov_nr_virtfn > 0) { 6898 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6899 phba->cfg_sriov_nr_virtfn); 6900 if (rc) { 6901 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6902 "3020 Requested number of SR-IOV " 6903 "virtual functions (%d) is not " 6904 "supported\n", 6905 phba->cfg_sriov_nr_virtfn); 6906 phba->cfg_sriov_nr_virtfn = 0; 6907 } 6908 } 6909 6910 return 0; 6911 6912 out_free_hba_cpu_map: 6913 kfree(phba->sli4_hba.cpu_map); 6914 out_free_hba_eq_hdl: 6915 kfree(phba->sli4_hba.hba_eq_hdl); 6916 out_free_fcf_rr_bmask: 6917 kfree(phba->fcf.fcf_rr_bmask); 6918 out_remove_rpi_hdrs: 6919 lpfc_sli4_remove_rpi_hdrs(phba); 6920 out_free_active_sgl: 6921 lpfc_free_active_sgl(phba); 6922 out_destroy_cq_event_pool: 6923 lpfc_sli4_cq_event_pool_destroy(phba); 6924 out_free_cmd_rsp_buf: 6925 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 6926 phba->lpfc_cmd_rsp_buf_pool = NULL; 6927 out_free_sg_dma_buf: 6928 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6929 phba->lpfc_sg_dma_buf_pool = NULL; 6930 out_free_bsmbx: 6931 lpfc_destroy_bootstrap_mbox(phba); 6932 out_free_mem: 6933 lpfc_mem_free(phba); 6934 return rc; 6935 } 6936 6937 /** 6938 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6939 * @phba: pointer to lpfc hba data structure. 6940 * 6941 * This routine is invoked to unset the driver internal resources set up 6942 * specific for supporting the SLI-4 HBA device it attached to. 6943 **/ 6944 static void 6945 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6946 { 6947 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6948 6949 free_percpu(phba->sli4_hba.eq_info); 6950 6951 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6952 kfree(phba->sli4_hba.cpu_map); 6953 phba->sli4_hba.num_possible_cpu = 0; 6954 phba->sli4_hba.num_present_cpu = 0; 6955 phba->sli4_hba.curr_disp_cpu = 0; 6956 6957 /* Free memory allocated for fast-path work queue handles */ 6958 kfree(phba->sli4_hba.hba_eq_hdl); 6959 6960 /* Free the allocated rpi headers. */ 6961 lpfc_sli4_remove_rpi_hdrs(phba); 6962 lpfc_sli4_remove_rpis(phba); 6963 6964 /* Free eligible FCF index bmask */ 6965 kfree(phba->fcf.fcf_rr_bmask); 6966 6967 /* Free the ELS sgl list */ 6968 lpfc_free_active_sgl(phba); 6969 lpfc_free_els_sgl_list(phba); 6970 lpfc_free_nvmet_sgl_list(phba); 6971 6972 /* Free the completion queue EQ event pool */ 6973 lpfc_sli4_cq_event_release_all(phba); 6974 lpfc_sli4_cq_event_pool_destroy(phba); 6975 6976 /* Release resource identifiers. */ 6977 lpfc_sli4_dealloc_resource_identifiers(phba); 6978 6979 /* Free the bsmbx region. */ 6980 lpfc_destroy_bootstrap_mbox(phba); 6981 6982 /* Free the SLI Layer memory with SLI4 HBAs */ 6983 lpfc_mem_free_all(phba); 6984 6985 /* Free the current connect table */ 6986 list_for_each_entry_safe(conn_entry, next_conn_entry, 6987 &phba->fcf_conn_rec_list, list) { 6988 list_del_init(&conn_entry->list); 6989 kfree(conn_entry); 6990 } 6991 6992 return; 6993 } 6994 6995 /** 6996 * lpfc_init_api_table_setup - Set up init api function jump table 6997 * @phba: The hba struct for which this call is being executed. 6998 * @dev_grp: The HBA PCI-Device group number. 6999 * 7000 * This routine sets up the device INIT interface API function jump table 7001 * in @phba struct. 7002 * 7003 * Returns: 0 - success, -ENODEV - failure. 7004 **/ 7005 int 7006 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7007 { 7008 phba->lpfc_hba_init_link = lpfc_hba_init_link; 7009 phba->lpfc_hba_down_link = lpfc_hba_down_link; 7010 phba->lpfc_selective_reset = lpfc_selective_reset; 7011 switch (dev_grp) { 7012 case LPFC_PCI_DEV_LP: 7013 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 7014 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 7015 phba->lpfc_stop_port = lpfc_stop_port_s3; 7016 break; 7017 case LPFC_PCI_DEV_OC: 7018 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 7019 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 7020 phba->lpfc_stop_port = lpfc_stop_port_s4; 7021 break; 7022 default: 7023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7024 "1431 Invalid HBA PCI-device group: 0x%x\n", 7025 dev_grp); 7026 return -ENODEV; 7027 break; 7028 } 7029 return 0; 7030 } 7031 7032 /** 7033 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 7034 * @phba: pointer to lpfc hba data structure. 7035 * 7036 * This routine is invoked to set up the driver internal resources after the 7037 * device specific resource setup to support the HBA device it attached to. 7038 * 7039 * Return codes 7040 * 0 - successful 7041 * other values - error 7042 **/ 7043 static int 7044 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 7045 { 7046 int error; 7047 7048 /* Startup the kernel thread for this host adapter. */ 7049 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7050 "lpfc_worker_%d", phba->brd_no); 7051 if (IS_ERR(phba->worker_thread)) { 7052 error = PTR_ERR(phba->worker_thread); 7053 return error; 7054 } 7055 7056 return 0; 7057 } 7058 7059 /** 7060 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7061 * @phba: pointer to lpfc hba data structure. 7062 * 7063 * This routine is invoked to unset the driver internal resources set up after 7064 * the device specific resource setup for supporting the HBA device it 7065 * attached to. 7066 **/ 7067 static void 7068 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7069 { 7070 if (phba->wq) { 7071 flush_workqueue(phba->wq); 7072 destroy_workqueue(phba->wq); 7073 phba->wq = NULL; 7074 } 7075 7076 /* Stop kernel worker thread */ 7077 if (phba->worker_thread) 7078 kthread_stop(phba->worker_thread); 7079 } 7080 7081 /** 7082 * lpfc_free_iocb_list - Free iocb list. 7083 * @phba: pointer to lpfc hba data structure. 7084 * 7085 * This routine is invoked to free the driver's IOCB list and memory. 7086 **/ 7087 void 7088 lpfc_free_iocb_list(struct lpfc_hba *phba) 7089 { 7090 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7091 7092 spin_lock_irq(&phba->hbalock); 7093 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7094 &phba->lpfc_iocb_list, list) { 7095 list_del(&iocbq_entry->list); 7096 kfree(iocbq_entry); 7097 phba->total_iocbq_bufs--; 7098 } 7099 spin_unlock_irq(&phba->hbalock); 7100 7101 return; 7102 } 7103 7104 /** 7105 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7106 * @phba: pointer to lpfc hba data structure. 7107 * 7108 * This routine is invoked to allocate and initizlize the driver's IOCB 7109 * list and set up the IOCB tag array accordingly. 7110 * 7111 * Return codes 7112 * 0 - successful 7113 * other values - error 7114 **/ 7115 int 7116 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7117 { 7118 struct lpfc_iocbq *iocbq_entry = NULL; 7119 uint16_t iotag; 7120 int i; 7121 7122 /* Initialize and populate the iocb list per host. */ 7123 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7124 for (i = 0; i < iocb_count; i++) { 7125 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7126 if (iocbq_entry == NULL) { 7127 printk(KERN_ERR "%s: only allocated %d iocbs of " 7128 "expected %d count. Unloading driver.\n", 7129 __func__, i, LPFC_IOCB_LIST_CNT); 7130 goto out_free_iocbq; 7131 } 7132 7133 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7134 if (iotag == 0) { 7135 kfree(iocbq_entry); 7136 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7137 "Unloading driver.\n", __func__); 7138 goto out_free_iocbq; 7139 } 7140 iocbq_entry->sli4_lxritag = NO_XRI; 7141 iocbq_entry->sli4_xritag = NO_XRI; 7142 7143 spin_lock_irq(&phba->hbalock); 7144 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7145 phba->total_iocbq_bufs++; 7146 spin_unlock_irq(&phba->hbalock); 7147 } 7148 7149 return 0; 7150 7151 out_free_iocbq: 7152 lpfc_free_iocb_list(phba); 7153 7154 return -ENOMEM; 7155 } 7156 7157 /** 7158 * lpfc_free_sgl_list - Free a given sgl list. 7159 * @phba: pointer to lpfc hba data structure. 7160 * @sglq_list: pointer to the head of sgl list. 7161 * 7162 * This routine is invoked to free a give sgl list and memory. 7163 **/ 7164 void 7165 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7166 { 7167 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7168 7169 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7170 list_del(&sglq_entry->list); 7171 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7172 kfree(sglq_entry); 7173 } 7174 } 7175 7176 /** 7177 * lpfc_free_els_sgl_list - Free els sgl list. 7178 * @phba: pointer to lpfc hba data structure. 7179 * 7180 * This routine is invoked to free the driver's els sgl list and memory. 7181 **/ 7182 static void 7183 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7184 { 7185 LIST_HEAD(sglq_list); 7186 7187 /* Retrieve all els sgls from driver list */ 7188 spin_lock_irq(&phba->hbalock); 7189 spin_lock(&phba->sli4_hba.sgl_list_lock); 7190 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7191 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7192 spin_unlock_irq(&phba->hbalock); 7193 7194 /* Now free the sgl list */ 7195 lpfc_free_sgl_list(phba, &sglq_list); 7196 } 7197 7198 /** 7199 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7200 * @phba: pointer to lpfc hba data structure. 7201 * 7202 * This routine is invoked to free the driver's nvmet sgl list and memory. 7203 **/ 7204 static void 7205 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7206 { 7207 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7208 LIST_HEAD(sglq_list); 7209 7210 /* Retrieve all nvmet sgls from driver list */ 7211 spin_lock_irq(&phba->hbalock); 7212 spin_lock(&phba->sli4_hba.sgl_list_lock); 7213 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7214 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7215 spin_unlock_irq(&phba->hbalock); 7216 7217 /* Now free the sgl list */ 7218 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7219 list_del(&sglq_entry->list); 7220 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7221 kfree(sglq_entry); 7222 } 7223 7224 /* Update the nvmet_xri_cnt to reflect no current sgls. 7225 * The next initialization cycle sets the count and allocates 7226 * the sgls over again. 7227 */ 7228 phba->sli4_hba.nvmet_xri_cnt = 0; 7229 } 7230 7231 /** 7232 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7233 * @phba: pointer to lpfc hba data structure. 7234 * 7235 * This routine is invoked to allocate the driver's active sgl memory. 7236 * This array will hold the sglq_entry's for active IOs. 7237 **/ 7238 static int 7239 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7240 { 7241 int size; 7242 size = sizeof(struct lpfc_sglq *); 7243 size *= phba->sli4_hba.max_cfg_param.max_xri; 7244 7245 phba->sli4_hba.lpfc_sglq_active_list = 7246 kzalloc(size, GFP_KERNEL); 7247 if (!phba->sli4_hba.lpfc_sglq_active_list) 7248 return -ENOMEM; 7249 return 0; 7250 } 7251 7252 /** 7253 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7254 * @phba: pointer to lpfc hba data structure. 7255 * 7256 * This routine is invoked to walk through the array of active sglq entries 7257 * and free all of the resources. 7258 * This is just a place holder for now. 7259 **/ 7260 static void 7261 lpfc_free_active_sgl(struct lpfc_hba *phba) 7262 { 7263 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7264 } 7265 7266 /** 7267 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7268 * @phba: pointer to lpfc hba data structure. 7269 * 7270 * This routine is invoked to allocate and initizlize the driver's sgl 7271 * list and set up the sgl xritag tag array accordingly. 7272 * 7273 **/ 7274 static void 7275 lpfc_init_sgl_list(struct lpfc_hba *phba) 7276 { 7277 /* Initialize and populate the sglq list per host/VF. */ 7278 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7279 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7280 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7281 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7282 7283 /* els xri-sgl book keeping */ 7284 phba->sli4_hba.els_xri_cnt = 0; 7285 7286 /* nvme xri-buffer book keeping */ 7287 phba->sli4_hba.io_xri_cnt = 0; 7288 } 7289 7290 /** 7291 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7292 * @phba: pointer to lpfc hba data structure. 7293 * 7294 * This routine is invoked to post rpi header templates to the 7295 * port for those SLI4 ports that do not support extents. This routine 7296 * posts a PAGE_SIZE memory region to the port to hold up to 7297 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7298 * and should be called only when interrupts are disabled. 7299 * 7300 * Return codes 7301 * 0 - successful 7302 * -ERROR - otherwise. 7303 **/ 7304 int 7305 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7306 { 7307 int rc = 0; 7308 struct lpfc_rpi_hdr *rpi_hdr; 7309 7310 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7311 if (!phba->sli4_hba.rpi_hdrs_in_use) 7312 return rc; 7313 if (phba->sli4_hba.extents_in_use) 7314 return -EIO; 7315 7316 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7317 if (!rpi_hdr) { 7318 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7319 "0391 Error during rpi post operation\n"); 7320 lpfc_sli4_remove_rpis(phba); 7321 rc = -ENODEV; 7322 } 7323 7324 return rc; 7325 } 7326 7327 /** 7328 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7329 * @phba: pointer to lpfc hba data structure. 7330 * 7331 * This routine is invoked to allocate a single 4KB memory region to 7332 * support rpis and stores them in the phba. This single region 7333 * provides support for up to 64 rpis. The region is used globally 7334 * by the device. 7335 * 7336 * Returns: 7337 * A valid rpi hdr on success. 7338 * A NULL pointer on any failure. 7339 **/ 7340 struct lpfc_rpi_hdr * 7341 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7342 { 7343 uint16_t rpi_limit, curr_rpi_range; 7344 struct lpfc_dmabuf *dmabuf; 7345 struct lpfc_rpi_hdr *rpi_hdr; 7346 7347 /* 7348 * If the SLI4 port supports extents, posting the rpi header isn't 7349 * required. Set the expected maximum count and let the actual value 7350 * get set when extents are fully allocated. 7351 */ 7352 if (!phba->sli4_hba.rpi_hdrs_in_use) 7353 return NULL; 7354 if (phba->sli4_hba.extents_in_use) 7355 return NULL; 7356 7357 /* The limit on the logical index is just the max_rpi count. */ 7358 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7359 7360 spin_lock_irq(&phba->hbalock); 7361 /* 7362 * Establish the starting RPI in this header block. The starting 7363 * rpi is normalized to a zero base because the physical rpi is 7364 * port based. 7365 */ 7366 curr_rpi_range = phba->sli4_hba.next_rpi; 7367 spin_unlock_irq(&phba->hbalock); 7368 7369 /* Reached full RPI range */ 7370 if (curr_rpi_range == rpi_limit) 7371 return NULL; 7372 7373 /* 7374 * First allocate the protocol header region for the port. The 7375 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7376 */ 7377 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7378 if (!dmabuf) 7379 return NULL; 7380 7381 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7382 LPFC_HDR_TEMPLATE_SIZE, 7383 &dmabuf->phys, GFP_KERNEL); 7384 if (!dmabuf->virt) { 7385 rpi_hdr = NULL; 7386 goto err_free_dmabuf; 7387 } 7388 7389 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7390 rpi_hdr = NULL; 7391 goto err_free_coherent; 7392 } 7393 7394 /* Save the rpi header data for cleanup later. */ 7395 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7396 if (!rpi_hdr) 7397 goto err_free_coherent; 7398 7399 rpi_hdr->dmabuf = dmabuf; 7400 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7401 rpi_hdr->page_count = 1; 7402 spin_lock_irq(&phba->hbalock); 7403 7404 /* The rpi_hdr stores the logical index only. */ 7405 rpi_hdr->start_rpi = curr_rpi_range; 7406 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7407 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7408 7409 spin_unlock_irq(&phba->hbalock); 7410 return rpi_hdr; 7411 7412 err_free_coherent: 7413 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7414 dmabuf->virt, dmabuf->phys); 7415 err_free_dmabuf: 7416 kfree(dmabuf); 7417 return NULL; 7418 } 7419 7420 /** 7421 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7422 * @phba: pointer to lpfc hba data structure. 7423 * 7424 * This routine is invoked to remove all memory resources allocated 7425 * to support rpis for SLI4 ports not supporting extents. This routine 7426 * presumes the caller has released all rpis consumed by fabric or port 7427 * logins and is prepared to have the header pages removed. 7428 **/ 7429 void 7430 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7431 { 7432 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7433 7434 if (!phba->sli4_hba.rpi_hdrs_in_use) 7435 goto exit; 7436 7437 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7438 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7439 list_del(&rpi_hdr->list); 7440 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7441 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7442 kfree(rpi_hdr->dmabuf); 7443 kfree(rpi_hdr); 7444 } 7445 exit: 7446 /* There are no rpis available to the port now. */ 7447 phba->sli4_hba.next_rpi = 0; 7448 } 7449 7450 /** 7451 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7452 * @pdev: pointer to pci device data structure. 7453 * 7454 * This routine is invoked to allocate the driver hba data structure for an 7455 * HBA device. If the allocation is successful, the phba reference to the 7456 * PCI device data structure is set. 7457 * 7458 * Return codes 7459 * pointer to @phba - successful 7460 * NULL - error 7461 **/ 7462 static struct lpfc_hba * 7463 lpfc_hba_alloc(struct pci_dev *pdev) 7464 { 7465 struct lpfc_hba *phba; 7466 7467 /* Allocate memory for HBA structure */ 7468 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7469 if (!phba) { 7470 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7471 return NULL; 7472 } 7473 7474 /* Set reference to PCI device in HBA structure */ 7475 phba->pcidev = pdev; 7476 7477 /* Assign an unused board number */ 7478 phba->brd_no = lpfc_get_instance(); 7479 if (phba->brd_no < 0) { 7480 kfree(phba); 7481 return NULL; 7482 } 7483 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7484 7485 spin_lock_init(&phba->ct_ev_lock); 7486 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7487 7488 return phba; 7489 } 7490 7491 /** 7492 * lpfc_hba_free - Free driver hba data structure with a device. 7493 * @phba: pointer to lpfc hba data structure. 7494 * 7495 * This routine is invoked to free the driver hba data structure with an 7496 * HBA device. 7497 **/ 7498 static void 7499 lpfc_hba_free(struct lpfc_hba *phba) 7500 { 7501 if (phba->sli_rev == LPFC_SLI_REV4) 7502 kfree(phba->sli4_hba.hdwq); 7503 7504 /* Release the driver assigned board number */ 7505 idr_remove(&lpfc_hba_index, phba->brd_no); 7506 7507 /* Free memory allocated with sli3 rings */ 7508 kfree(phba->sli.sli3_ring); 7509 phba->sli.sli3_ring = NULL; 7510 7511 kfree(phba); 7512 return; 7513 } 7514 7515 /** 7516 * lpfc_create_shost - Create hba physical port with associated scsi host. 7517 * @phba: pointer to lpfc hba data structure. 7518 * 7519 * This routine is invoked to create HBA physical port and associate a SCSI 7520 * host with it. 7521 * 7522 * Return codes 7523 * 0 - successful 7524 * other values - error 7525 **/ 7526 static int 7527 lpfc_create_shost(struct lpfc_hba *phba) 7528 { 7529 struct lpfc_vport *vport; 7530 struct Scsi_Host *shost; 7531 7532 /* Initialize HBA FC structure */ 7533 phba->fc_edtov = FF_DEF_EDTOV; 7534 phba->fc_ratov = FF_DEF_RATOV; 7535 phba->fc_altov = FF_DEF_ALTOV; 7536 phba->fc_arbtov = FF_DEF_ARBTOV; 7537 7538 atomic_set(&phba->sdev_cnt, 0); 7539 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7540 if (!vport) 7541 return -ENODEV; 7542 7543 shost = lpfc_shost_from_vport(vport); 7544 phba->pport = vport; 7545 7546 if (phba->nvmet_support) { 7547 /* Only 1 vport (pport) will support NVME target */ 7548 if (phba->txrdy_payload_pool == NULL) { 7549 phba->txrdy_payload_pool = dma_pool_create( 7550 "txrdy_pool", &phba->pcidev->dev, 7551 TXRDY_PAYLOAD_LEN, 16, 0); 7552 if (phba->txrdy_payload_pool) { 7553 phba->targetport = NULL; 7554 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7555 lpfc_printf_log(phba, KERN_INFO, 7556 LOG_INIT | LOG_NVME_DISC, 7557 "6076 NVME Target Found\n"); 7558 } 7559 } 7560 } 7561 7562 lpfc_debugfs_initialize(vport); 7563 /* Put reference to SCSI host to driver's device private data */ 7564 pci_set_drvdata(phba->pcidev, shost); 7565 7566 /* 7567 * At this point we are fully registered with PSA. In addition, 7568 * any initial discovery should be completed. 7569 */ 7570 vport->load_flag |= FC_ALLOW_FDMI; 7571 if (phba->cfg_enable_SmartSAN || 7572 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7573 7574 /* Setup appropriate attribute masks */ 7575 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7576 if (phba->cfg_enable_SmartSAN) 7577 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7578 else 7579 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7580 } 7581 return 0; 7582 } 7583 7584 /** 7585 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7586 * @phba: pointer to lpfc hba data structure. 7587 * 7588 * This routine is invoked to destroy HBA physical port and the associated 7589 * SCSI host. 7590 **/ 7591 static void 7592 lpfc_destroy_shost(struct lpfc_hba *phba) 7593 { 7594 struct lpfc_vport *vport = phba->pport; 7595 7596 /* Destroy physical port that associated with the SCSI host */ 7597 destroy_port(vport); 7598 7599 return; 7600 } 7601 7602 /** 7603 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7604 * @phba: pointer to lpfc hba data structure. 7605 * @shost: the shost to be used to detect Block guard settings. 7606 * 7607 * This routine sets up the local Block guard protocol settings for @shost. 7608 * This routine also allocates memory for debugging bg buffers. 7609 **/ 7610 static void 7611 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7612 { 7613 uint32_t old_mask; 7614 uint32_t old_guard; 7615 7616 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7617 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7618 "1478 Registering BlockGuard with the " 7619 "SCSI layer\n"); 7620 7621 old_mask = phba->cfg_prot_mask; 7622 old_guard = phba->cfg_prot_guard; 7623 7624 /* Only allow supported values */ 7625 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7626 SHOST_DIX_TYPE0_PROTECTION | 7627 SHOST_DIX_TYPE1_PROTECTION); 7628 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7629 SHOST_DIX_GUARD_CRC); 7630 7631 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7632 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7633 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7634 7635 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7636 if ((old_mask != phba->cfg_prot_mask) || 7637 (old_guard != phba->cfg_prot_guard)) 7638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7639 "1475 Registering BlockGuard with the " 7640 "SCSI layer: mask %d guard %d\n", 7641 phba->cfg_prot_mask, 7642 phba->cfg_prot_guard); 7643 7644 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7645 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7646 } else 7647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7648 "1479 Not Registering BlockGuard with the SCSI " 7649 "layer, Bad protection parameters: %d %d\n", 7650 old_mask, old_guard); 7651 } 7652 } 7653 7654 /** 7655 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7656 * @phba: pointer to lpfc hba data structure. 7657 * 7658 * This routine is invoked to perform all the necessary post initialization 7659 * setup for the device. 7660 **/ 7661 static void 7662 lpfc_post_init_setup(struct lpfc_hba *phba) 7663 { 7664 struct Scsi_Host *shost; 7665 struct lpfc_adapter_event_header adapter_event; 7666 7667 /* Get the default values for Model Name and Description */ 7668 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7669 7670 /* 7671 * hba setup may have changed the hba_queue_depth so we need to 7672 * adjust the value of can_queue. 7673 */ 7674 shost = pci_get_drvdata(phba->pcidev); 7675 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7676 7677 lpfc_host_attrib_init(shost); 7678 7679 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7680 spin_lock_irq(shost->host_lock); 7681 lpfc_poll_start_timer(phba); 7682 spin_unlock_irq(shost->host_lock); 7683 } 7684 7685 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7686 "0428 Perform SCSI scan\n"); 7687 /* Send board arrival event to upper layer */ 7688 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7689 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7690 fc_host_post_vendor_event(shost, fc_get_event_number(), 7691 sizeof(adapter_event), 7692 (char *) &adapter_event, 7693 LPFC_NL_VENDOR_ID); 7694 return; 7695 } 7696 7697 /** 7698 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7699 * @phba: pointer to lpfc hba data structure. 7700 * 7701 * This routine is invoked to set up the PCI device memory space for device 7702 * with SLI-3 interface spec. 7703 * 7704 * Return codes 7705 * 0 - successful 7706 * other values - error 7707 **/ 7708 static int 7709 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7710 { 7711 struct pci_dev *pdev = phba->pcidev; 7712 unsigned long bar0map_len, bar2map_len; 7713 int i, hbq_count; 7714 void *ptr; 7715 int error; 7716 7717 if (!pdev) 7718 return -ENODEV; 7719 7720 /* Set the device DMA mask size */ 7721 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7722 if (error) 7723 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7724 if (error) 7725 return error; 7726 error = -ENODEV; 7727 7728 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7729 * required by each mapping. 7730 */ 7731 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7732 bar0map_len = pci_resource_len(pdev, 0); 7733 7734 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7735 bar2map_len = pci_resource_len(pdev, 2); 7736 7737 /* Map HBA SLIM to a kernel virtual address. */ 7738 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7739 if (!phba->slim_memmap_p) { 7740 dev_printk(KERN_ERR, &pdev->dev, 7741 "ioremap failed for SLIM memory.\n"); 7742 goto out; 7743 } 7744 7745 /* Map HBA Control Registers to a kernel virtual address. */ 7746 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7747 if (!phba->ctrl_regs_memmap_p) { 7748 dev_printk(KERN_ERR, &pdev->dev, 7749 "ioremap failed for HBA control registers.\n"); 7750 goto out_iounmap_slim; 7751 } 7752 7753 /* Allocate memory for SLI-2 structures */ 7754 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7755 &phba->slim2p.phys, GFP_KERNEL); 7756 if (!phba->slim2p.virt) 7757 goto out_iounmap; 7758 7759 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7760 phba->mbox_ext = (phba->slim2p.virt + 7761 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7762 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7763 phba->IOCBs = (phba->slim2p.virt + 7764 offsetof(struct lpfc_sli2_slim, IOCBs)); 7765 7766 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7767 lpfc_sli_hbq_size(), 7768 &phba->hbqslimp.phys, 7769 GFP_KERNEL); 7770 if (!phba->hbqslimp.virt) 7771 goto out_free_slim; 7772 7773 hbq_count = lpfc_sli_hbq_count(); 7774 ptr = phba->hbqslimp.virt; 7775 for (i = 0; i < hbq_count; ++i) { 7776 phba->hbqs[i].hbq_virt = ptr; 7777 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7778 ptr += (lpfc_hbq_defs[i]->entry_count * 7779 sizeof(struct lpfc_hbq_entry)); 7780 } 7781 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7782 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7783 7784 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7785 7786 phba->MBslimaddr = phba->slim_memmap_p; 7787 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7788 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7789 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7790 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7791 7792 return 0; 7793 7794 out_free_slim: 7795 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7796 phba->slim2p.virt, phba->slim2p.phys); 7797 out_iounmap: 7798 iounmap(phba->ctrl_regs_memmap_p); 7799 out_iounmap_slim: 7800 iounmap(phba->slim_memmap_p); 7801 out: 7802 return error; 7803 } 7804 7805 /** 7806 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7807 * @phba: pointer to lpfc hba data structure. 7808 * 7809 * This routine is invoked to unset the PCI device memory space for device 7810 * with SLI-3 interface spec. 7811 **/ 7812 static void 7813 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7814 { 7815 struct pci_dev *pdev; 7816 7817 /* Obtain PCI device reference */ 7818 if (!phba->pcidev) 7819 return; 7820 else 7821 pdev = phba->pcidev; 7822 7823 /* Free coherent DMA memory allocated */ 7824 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7825 phba->hbqslimp.virt, phba->hbqslimp.phys); 7826 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7827 phba->slim2p.virt, phba->slim2p.phys); 7828 7829 /* I/O memory unmap */ 7830 iounmap(phba->ctrl_regs_memmap_p); 7831 iounmap(phba->slim_memmap_p); 7832 7833 return; 7834 } 7835 7836 /** 7837 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7838 * @phba: pointer to lpfc hba data structure. 7839 * 7840 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7841 * done and check status. 7842 * 7843 * Return 0 if successful, otherwise -ENODEV. 7844 **/ 7845 int 7846 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7847 { 7848 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7849 struct lpfc_register reg_data; 7850 int i, port_error = 0; 7851 uint32_t if_type; 7852 7853 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7854 memset(®_data, 0, sizeof(reg_data)); 7855 if (!phba->sli4_hba.PSMPHRregaddr) 7856 return -ENODEV; 7857 7858 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7859 for (i = 0; i < 3000; i++) { 7860 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7861 &portsmphr_reg.word0) || 7862 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7863 /* Port has a fatal POST error, break out */ 7864 port_error = -ENODEV; 7865 break; 7866 } 7867 if (LPFC_POST_STAGE_PORT_READY == 7868 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7869 break; 7870 msleep(10); 7871 } 7872 7873 /* 7874 * If there was a port error during POST, then don't proceed with 7875 * other register reads as the data may not be valid. Just exit. 7876 */ 7877 if (port_error) { 7878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7879 "1408 Port Failed POST - portsmphr=0x%x, " 7880 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7881 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7882 portsmphr_reg.word0, 7883 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7884 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7885 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7886 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7887 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7888 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7889 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7890 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7891 } else { 7892 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7893 "2534 Device Info: SLIFamily=0x%x, " 7894 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7895 "SLIHint_2=0x%x, FT=0x%x\n", 7896 bf_get(lpfc_sli_intf_sli_family, 7897 &phba->sli4_hba.sli_intf), 7898 bf_get(lpfc_sli_intf_slirev, 7899 &phba->sli4_hba.sli_intf), 7900 bf_get(lpfc_sli_intf_if_type, 7901 &phba->sli4_hba.sli_intf), 7902 bf_get(lpfc_sli_intf_sli_hint1, 7903 &phba->sli4_hba.sli_intf), 7904 bf_get(lpfc_sli_intf_sli_hint2, 7905 &phba->sli4_hba.sli_intf), 7906 bf_get(lpfc_sli_intf_func_type, 7907 &phba->sli4_hba.sli_intf)); 7908 /* 7909 * Check for other Port errors during the initialization 7910 * process. Fail the load if the port did not come up 7911 * correctly. 7912 */ 7913 if_type = bf_get(lpfc_sli_intf_if_type, 7914 &phba->sli4_hba.sli_intf); 7915 switch (if_type) { 7916 case LPFC_SLI_INTF_IF_TYPE_0: 7917 phba->sli4_hba.ue_mask_lo = 7918 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7919 phba->sli4_hba.ue_mask_hi = 7920 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7921 uerrlo_reg.word0 = 7922 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7923 uerrhi_reg.word0 = 7924 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7925 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7926 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7928 "1422 Unrecoverable Error " 7929 "Detected during POST " 7930 "uerr_lo_reg=0x%x, " 7931 "uerr_hi_reg=0x%x, " 7932 "ue_mask_lo_reg=0x%x, " 7933 "ue_mask_hi_reg=0x%x\n", 7934 uerrlo_reg.word0, 7935 uerrhi_reg.word0, 7936 phba->sli4_hba.ue_mask_lo, 7937 phba->sli4_hba.ue_mask_hi); 7938 port_error = -ENODEV; 7939 } 7940 break; 7941 case LPFC_SLI_INTF_IF_TYPE_2: 7942 case LPFC_SLI_INTF_IF_TYPE_6: 7943 /* Final checks. The port status should be clean. */ 7944 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7945 ®_data.word0) || 7946 (bf_get(lpfc_sliport_status_err, ®_data) && 7947 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7948 phba->work_status[0] = 7949 readl(phba->sli4_hba.u.if_type2. 7950 ERR1regaddr); 7951 phba->work_status[1] = 7952 readl(phba->sli4_hba.u.if_type2. 7953 ERR2regaddr); 7954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7955 "2888 Unrecoverable port error " 7956 "following POST: port status reg " 7957 "0x%x, port_smphr reg 0x%x, " 7958 "error 1=0x%x, error 2=0x%x\n", 7959 reg_data.word0, 7960 portsmphr_reg.word0, 7961 phba->work_status[0], 7962 phba->work_status[1]); 7963 port_error = -ENODEV; 7964 } 7965 break; 7966 case LPFC_SLI_INTF_IF_TYPE_1: 7967 default: 7968 break; 7969 } 7970 } 7971 return port_error; 7972 } 7973 7974 /** 7975 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7976 * @phba: pointer to lpfc hba data structure. 7977 * @if_type: The SLI4 interface type getting configured. 7978 * 7979 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7980 * memory map. 7981 **/ 7982 static void 7983 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7984 { 7985 switch (if_type) { 7986 case LPFC_SLI_INTF_IF_TYPE_0: 7987 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7988 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7989 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7990 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7991 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7992 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7993 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7994 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7995 phba->sli4_hba.SLIINTFregaddr = 7996 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7997 break; 7998 case LPFC_SLI_INTF_IF_TYPE_2: 7999 phba->sli4_hba.u.if_type2.EQDregaddr = 8000 phba->sli4_hba.conf_regs_memmap_p + 8001 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8002 phba->sli4_hba.u.if_type2.ERR1regaddr = 8003 phba->sli4_hba.conf_regs_memmap_p + 8004 LPFC_CTL_PORT_ER1_OFFSET; 8005 phba->sli4_hba.u.if_type2.ERR2regaddr = 8006 phba->sli4_hba.conf_regs_memmap_p + 8007 LPFC_CTL_PORT_ER2_OFFSET; 8008 phba->sli4_hba.u.if_type2.CTRLregaddr = 8009 phba->sli4_hba.conf_regs_memmap_p + 8010 LPFC_CTL_PORT_CTL_OFFSET; 8011 phba->sli4_hba.u.if_type2.STATUSregaddr = 8012 phba->sli4_hba.conf_regs_memmap_p + 8013 LPFC_CTL_PORT_STA_OFFSET; 8014 phba->sli4_hba.SLIINTFregaddr = 8015 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8016 phba->sli4_hba.PSMPHRregaddr = 8017 phba->sli4_hba.conf_regs_memmap_p + 8018 LPFC_CTL_PORT_SEM_OFFSET; 8019 phba->sli4_hba.RQDBregaddr = 8020 phba->sli4_hba.conf_regs_memmap_p + 8021 LPFC_ULP0_RQ_DOORBELL; 8022 phba->sli4_hba.WQDBregaddr = 8023 phba->sli4_hba.conf_regs_memmap_p + 8024 LPFC_ULP0_WQ_DOORBELL; 8025 phba->sli4_hba.CQDBregaddr = 8026 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8027 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8028 phba->sli4_hba.MQDBregaddr = 8029 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8030 phba->sli4_hba.BMBXregaddr = 8031 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8032 break; 8033 case LPFC_SLI_INTF_IF_TYPE_6: 8034 phba->sli4_hba.u.if_type2.EQDregaddr = 8035 phba->sli4_hba.conf_regs_memmap_p + 8036 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8037 phba->sli4_hba.u.if_type2.ERR1regaddr = 8038 phba->sli4_hba.conf_regs_memmap_p + 8039 LPFC_CTL_PORT_ER1_OFFSET; 8040 phba->sli4_hba.u.if_type2.ERR2regaddr = 8041 phba->sli4_hba.conf_regs_memmap_p + 8042 LPFC_CTL_PORT_ER2_OFFSET; 8043 phba->sli4_hba.u.if_type2.CTRLregaddr = 8044 phba->sli4_hba.conf_regs_memmap_p + 8045 LPFC_CTL_PORT_CTL_OFFSET; 8046 phba->sli4_hba.u.if_type2.STATUSregaddr = 8047 phba->sli4_hba.conf_regs_memmap_p + 8048 LPFC_CTL_PORT_STA_OFFSET; 8049 phba->sli4_hba.PSMPHRregaddr = 8050 phba->sli4_hba.conf_regs_memmap_p + 8051 LPFC_CTL_PORT_SEM_OFFSET; 8052 phba->sli4_hba.BMBXregaddr = 8053 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8054 break; 8055 case LPFC_SLI_INTF_IF_TYPE_1: 8056 default: 8057 dev_printk(KERN_ERR, &phba->pcidev->dev, 8058 "FATAL - unsupported SLI4 interface type - %d\n", 8059 if_type); 8060 break; 8061 } 8062 } 8063 8064 /** 8065 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8066 * @phba: pointer to lpfc hba data structure. 8067 * 8068 * This routine is invoked to set up SLI4 BAR1 register memory map. 8069 **/ 8070 static void 8071 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8072 { 8073 switch (if_type) { 8074 case LPFC_SLI_INTF_IF_TYPE_0: 8075 phba->sli4_hba.PSMPHRregaddr = 8076 phba->sli4_hba.ctrl_regs_memmap_p + 8077 LPFC_SLIPORT_IF0_SMPHR; 8078 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8079 LPFC_HST_ISR0; 8080 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8081 LPFC_HST_IMR0; 8082 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8083 LPFC_HST_ISCR0; 8084 break; 8085 case LPFC_SLI_INTF_IF_TYPE_6: 8086 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8087 LPFC_IF6_RQ_DOORBELL; 8088 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8089 LPFC_IF6_WQ_DOORBELL; 8090 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8091 LPFC_IF6_CQ_DOORBELL; 8092 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8093 LPFC_IF6_EQ_DOORBELL; 8094 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8095 LPFC_IF6_MQ_DOORBELL; 8096 break; 8097 case LPFC_SLI_INTF_IF_TYPE_2: 8098 case LPFC_SLI_INTF_IF_TYPE_1: 8099 default: 8100 dev_err(&phba->pcidev->dev, 8101 "FATAL - unsupported SLI4 interface type - %d\n", 8102 if_type); 8103 break; 8104 } 8105 } 8106 8107 /** 8108 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8109 * @phba: pointer to lpfc hba data structure. 8110 * @vf: virtual function number 8111 * 8112 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8113 * based on the given viftual function number, @vf. 8114 * 8115 * Return 0 if successful, otherwise -ENODEV. 8116 **/ 8117 static int 8118 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8119 { 8120 if (vf > LPFC_VIR_FUNC_MAX) 8121 return -ENODEV; 8122 8123 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8124 vf * LPFC_VFR_PAGE_SIZE + 8125 LPFC_ULP0_RQ_DOORBELL); 8126 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8127 vf * LPFC_VFR_PAGE_SIZE + 8128 LPFC_ULP0_WQ_DOORBELL); 8129 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8130 vf * LPFC_VFR_PAGE_SIZE + 8131 LPFC_EQCQ_DOORBELL); 8132 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8133 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8134 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8135 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8136 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8137 return 0; 8138 } 8139 8140 /** 8141 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8142 * @phba: pointer to lpfc hba data structure. 8143 * 8144 * This routine is invoked to create the bootstrap mailbox 8145 * region consistent with the SLI-4 interface spec. This 8146 * routine allocates all memory necessary to communicate 8147 * mailbox commands to the port and sets up all alignment 8148 * needs. No locks are expected to be held when calling 8149 * this routine. 8150 * 8151 * Return codes 8152 * 0 - successful 8153 * -ENOMEM - could not allocated memory. 8154 **/ 8155 static int 8156 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8157 { 8158 uint32_t bmbx_size; 8159 struct lpfc_dmabuf *dmabuf; 8160 struct dma_address *dma_address; 8161 uint32_t pa_addr; 8162 uint64_t phys_addr; 8163 8164 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8165 if (!dmabuf) 8166 return -ENOMEM; 8167 8168 /* 8169 * The bootstrap mailbox region is comprised of 2 parts 8170 * plus an alignment restriction of 16 bytes. 8171 */ 8172 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8173 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8174 &dmabuf->phys, GFP_KERNEL); 8175 if (!dmabuf->virt) { 8176 kfree(dmabuf); 8177 return -ENOMEM; 8178 } 8179 8180 /* 8181 * Initialize the bootstrap mailbox pointers now so that the register 8182 * operations are simple later. The mailbox dma address is required 8183 * to be 16-byte aligned. Also align the virtual memory as each 8184 * maibox is copied into the bmbx mailbox region before issuing the 8185 * command to the port. 8186 */ 8187 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8188 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8189 8190 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8191 LPFC_ALIGN_16_BYTE); 8192 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8193 LPFC_ALIGN_16_BYTE); 8194 8195 /* 8196 * Set the high and low physical addresses now. The SLI4 alignment 8197 * requirement is 16 bytes and the mailbox is posted to the port 8198 * as two 30-bit addresses. The other data is a bit marking whether 8199 * the 30-bit address is the high or low address. 8200 * Upcast bmbx aphys to 64bits so shift instruction compiles 8201 * clean on 32 bit machines. 8202 */ 8203 dma_address = &phba->sli4_hba.bmbx.dma_address; 8204 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8205 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8206 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8207 LPFC_BMBX_BIT1_ADDR_HI); 8208 8209 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8210 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8211 LPFC_BMBX_BIT1_ADDR_LO); 8212 return 0; 8213 } 8214 8215 /** 8216 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8217 * @phba: pointer to lpfc hba data structure. 8218 * 8219 * This routine is invoked to teardown the bootstrap mailbox 8220 * region and release all host resources. This routine requires 8221 * the caller to ensure all mailbox commands recovered, no 8222 * additional mailbox comands are sent, and interrupts are disabled 8223 * before calling this routine. 8224 * 8225 **/ 8226 static void 8227 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8228 { 8229 dma_free_coherent(&phba->pcidev->dev, 8230 phba->sli4_hba.bmbx.bmbx_size, 8231 phba->sli4_hba.bmbx.dmabuf->virt, 8232 phba->sli4_hba.bmbx.dmabuf->phys); 8233 8234 kfree(phba->sli4_hba.bmbx.dmabuf); 8235 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8236 } 8237 8238 /** 8239 * lpfc_sli4_read_config - Get the config parameters. 8240 * @phba: pointer to lpfc hba data structure. 8241 * 8242 * This routine is invoked to read the configuration parameters from the HBA. 8243 * The configuration parameters are used to set the base and maximum values 8244 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8245 * allocation for the port. 8246 * 8247 * Return codes 8248 * 0 - successful 8249 * -ENOMEM - No available memory 8250 * -EIO - The mailbox failed to complete successfully. 8251 **/ 8252 int 8253 lpfc_sli4_read_config(struct lpfc_hba *phba) 8254 { 8255 LPFC_MBOXQ_t *pmb; 8256 struct lpfc_mbx_read_config *rd_config; 8257 union lpfc_sli4_cfg_shdr *shdr; 8258 uint32_t shdr_status, shdr_add_status; 8259 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8260 struct lpfc_rsrc_desc_fcfcoe *desc; 8261 char *pdesc_0; 8262 uint16_t forced_link_speed; 8263 uint32_t if_type, qmin; 8264 int length, i, rc = 0, rc2; 8265 8266 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8267 if (!pmb) { 8268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8269 "2011 Unable to allocate memory for issuing " 8270 "SLI_CONFIG_SPECIAL mailbox command\n"); 8271 return -ENOMEM; 8272 } 8273 8274 lpfc_read_config(phba, pmb); 8275 8276 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8277 if (rc != MBX_SUCCESS) { 8278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8279 "2012 Mailbox failed , mbxCmd x%x " 8280 "READ_CONFIG, mbxStatus x%x\n", 8281 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8282 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8283 rc = -EIO; 8284 } else { 8285 rd_config = &pmb->u.mqe.un.rd_config; 8286 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8287 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8288 phba->sli4_hba.lnk_info.lnk_tp = 8289 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8290 phba->sli4_hba.lnk_info.lnk_no = 8291 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8292 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8293 "3081 lnk_type:%d, lnk_numb:%d\n", 8294 phba->sli4_hba.lnk_info.lnk_tp, 8295 phba->sli4_hba.lnk_info.lnk_no); 8296 } else 8297 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8298 "3082 Mailbox (x%x) returned ldv:x0\n", 8299 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8300 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8301 phba->bbcredit_support = 1; 8302 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8303 } 8304 8305 phba->sli4_hba.conf_trunk = 8306 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8307 phba->sli4_hba.extents_in_use = 8308 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8309 phba->sli4_hba.max_cfg_param.max_xri = 8310 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8311 /* Reduce resource usage in kdump environment */ 8312 if (is_kdump_kernel() && 8313 phba->sli4_hba.max_cfg_param.max_xri > 512) 8314 phba->sli4_hba.max_cfg_param.max_xri = 512; 8315 phba->sli4_hba.max_cfg_param.xri_base = 8316 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8317 phba->sli4_hba.max_cfg_param.max_vpi = 8318 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8319 /* Limit the max we support */ 8320 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8321 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8322 phba->sli4_hba.max_cfg_param.vpi_base = 8323 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8324 phba->sli4_hba.max_cfg_param.max_rpi = 8325 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8326 phba->sli4_hba.max_cfg_param.rpi_base = 8327 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8328 phba->sli4_hba.max_cfg_param.max_vfi = 8329 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8330 phba->sli4_hba.max_cfg_param.vfi_base = 8331 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8332 phba->sli4_hba.max_cfg_param.max_fcfi = 8333 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8334 phba->sli4_hba.max_cfg_param.max_eq = 8335 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8336 phba->sli4_hba.max_cfg_param.max_rq = 8337 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8338 phba->sli4_hba.max_cfg_param.max_wq = 8339 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8340 phba->sli4_hba.max_cfg_param.max_cq = 8341 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8342 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8343 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8344 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8345 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8346 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8347 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8348 phba->max_vports = phba->max_vpi; 8349 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8350 "2003 cfg params Extents? %d " 8351 "XRI(B:%d M:%d), " 8352 "VPI(B:%d M:%d) " 8353 "VFI(B:%d M:%d) " 8354 "RPI(B:%d M:%d) " 8355 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8356 phba->sli4_hba.extents_in_use, 8357 phba->sli4_hba.max_cfg_param.xri_base, 8358 phba->sli4_hba.max_cfg_param.max_xri, 8359 phba->sli4_hba.max_cfg_param.vpi_base, 8360 phba->sli4_hba.max_cfg_param.max_vpi, 8361 phba->sli4_hba.max_cfg_param.vfi_base, 8362 phba->sli4_hba.max_cfg_param.max_vfi, 8363 phba->sli4_hba.max_cfg_param.rpi_base, 8364 phba->sli4_hba.max_cfg_param.max_rpi, 8365 phba->sli4_hba.max_cfg_param.max_fcfi, 8366 phba->sli4_hba.max_cfg_param.max_eq, 8367 phba->sli4_hba.max_cfg_param.max_cq, 8368 phba->sli4_hba.max_cfg_param.max_wq, 8369 phba->sli4_hba.max_cfg_param.max_rq); 8370 8371 /* 8372 * Calculate queue resources based on how 8373 * many WQ/CQ/EQs are available. 8374 */ 8375 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8376 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8377 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8378 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8379 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8380 /* 8381 * Whats left after this can go toward NVME / FCP. 8382 * The minus 4 accounts for ELS, NVME LS, MBOX 8383 * plus one extra. When configured for 8384 * NVMET, FCP io channel WQs are not created. 8385 */ 8386 qmin -= 4; 8387 8388 /* Check to see if there is enough for NVME */ 8389 if ((phba->cfg_irq_chann > qmin) || 8390 (phba->cfg_hdw_queue > qmin)) { 8391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8392 "2005 Reducing Queues: " 8393 "WQ %d CQ %d EQ %d: min %d: " 8394 "IRQ %d HDWQ %d\n", 8395 phba->sli4_hba.max_cfg_param.max_wq, 8396 phba->sli4_hba.max_cfg_param.max_cq, 8397 phba->sli4_hba.max_cfg_param.max_eq, 8398 qmin, phba->cfg_irq_chann, 8399 phba->cfg_hdw_queue); 8400 8401 if (phba->cfg_irq_chann > qmin) 8402 phba->cfg_irq_chann = qmin; 8403 if (phba->cfg_hdw_queue > qmin) 8404 phba->cfg_hdw_queue = qmin; 8405 } 8406 } 8407 8408 if (rc) 8409 goto read_cfg_out; 8410 8411 /* Update link speed if forced link speed is supported */ 8412 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8413 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8414 forced_link_speed = 8415 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8416 if (forced_link_speed) { 8417 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8418 8419 switch (forced_link_speed) { 8420 case LINK_SPEED_1G: 8421 phba->cfg_link_speed = 8422 LPFC_USER_LINK_SPEED_1G; 8423 break; 8424 case LINK_SPEED_2G: 8425 phba->cfg_link_speed = 8426 LPFC_USER_LINK_SPEED_2G; 8427 break; 8428 case LINK_SPEED_4G: 8429 phba->cfg_link_speed = 8430 LPFC_USER_LINK_SPEED_4G; 8431 break; 8432 case LINK_SPEED_8G: 8433 phba->cfg_link_speed = 8434 LPFC_USER_LINK_SPEED_8G; 8435 break; 8436 case LINK_SPEED_10G: 8437 phba->cfg_link_speed = 8438 LPFC_USER_LINK_SPEED_10G; 8439 break; 8440 case LINK_SPEED_16G: 8441 phba->cfg_link_speed = 8442 LPFC_USER_LINK_SPEED_16G; 8443 break; 8444 case LINK_SPEED_32G: 8445 phba->cfg_link_speed = 8446 LPFC_USER_LINK_SPEED_32G; 8447 break; 8448 case LINK_SPEED_64G: 8449 phba->cfg_link_speed = 8450 LPFC_USER_LINK_SPEED_64G; 8451 break; 8452 case 0xffff: 8453 phba->cfg_link_speed = 8454 LPFC_USER_LINK_SPEED_AUTO; 8455 break; 8456 default: 8457 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8458 "0047 Unrecognized link " 8459 "speed : %d\n", 8460 forced_link_speed); 8461 phba->cfg_link_speed = 8462 LPFC_USER_LINK_SPEED_AUTO; 8463 } 8464 } 8465 } 8466 8467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8468 length = phba->sli4_hba.max_cfg_param.max_xri - 8469 lpfc_sli4_get_els_iocb_cnt(phba); 8470 if (phba->cfg_hba_queue_depth > length) { 8471 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8472 "3361 HBA queue depth changed from %d to %d\n", 8473 phba->cfg_hba_queue_depth, length); 8474 phba->cfg_hba_queue_depth = length; 8475 } 8476 8477 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8478 LPFC_SLI_INTF_IF_TYPE_2) 8479 goto read_cfg_out; 8480 8481 /* get the pf# and vf# for SLI4 if_type 2 port */ 8482 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8483 sizeof(struct lpfc_sli4_cfg_mhdr)); 8484 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8485 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8486 length, LPFC_SLI4_MBX_EMBED); 8487 8488 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8489 shdr = (union lpfc_sli4_cfg_shdr *) 8490 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8491 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8492 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8493 if (rc2 || shdr_status || shdr_add_status) { 8494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8495 "3026 Mailbox failed , mbxCmd x%x " 8496 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8497 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8498 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8499 goto read_cfg_out; 8500 } 8501 8502 /* search for fc_fcoe resrouce descriptor */ 8503 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8504 8505 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8506 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8507 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8508 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8509 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8510 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8511 goto read_cfg_out; 8512 8513 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8514 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8515 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8516 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8517 phba->sli4_hba.iov.pf_number = 8518 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8519 phba->sli4_hba.iov.vf_number = 8520 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8521 break; 8522 } 8523 } 8524 8525 if (i < LPFC_RSRC_DESC_MAX_NUM) 8526 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8527 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8528 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8529 phba->sli4_hba.iov.vf_number); 8530 else 8531 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8532 "3028 GET_FUNCTION_CONFIG: failed to find " 8533 "Resource Descriptor:x%x\n", 8534 LPFC_RSRC_DESC_TYPE_FCFCOE); 8535 8536 read_cfg_out: 8537 mempool_free(pmb, phba->mbox_mem_pool); 8538 return rc; 8539 } 8540 8541 /** 8542 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8543 * @phba: pointer to lpfc hba data structure. 8544 * 8545 * This routine is invoked to setup the port-side endian order when 8546 * the port if_type is 0. This routine has no function for other 8547 * if_types. 8548 * 8549 * Return codes 8550 * 0 - successful 8551 * -ENOMEM - No available memory 8552 * -EIO - The mailbox failed to complete successfully. 8553 **/ 8554 static int 8555 lpfc_setup_endian_order(struct lpfc_hba *phba) 8556 { 8557 LPFC_MBOXQ_t *mboxq; 8558 uint32_t if_type, rc = 0; 8559 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8560 HOST_ENDIAN_HIGH_WORD1}; 8561 8562 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8563 switch (if_type) { 8564 case LPFC_SLI_INTF_IF_TYPE_0: 8565 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8566 GFP_KERNEL); 8567 if (!mboxq) { 8568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8569 "0492 Unable to allocate memory for " 8570 "issuing SLI_CONFIG_SPECIAL mailbox " 8571 "command\n"); 8572 return -ENOMEM; 8573 } 8574 8575 /* 8576 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8577 * two words to contain special data values and no other data. 8578 */ 8579 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8580 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8581 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8582 if (rc != MBX_SUCCESS) { 8583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8584 "0493 SLI_CONFIG_SPECIAL mailbox " 8585 "failed with status x%x\n", 8586 rc); 8587 rc = -EIO; 8588 } 8589 mempool_free(mboxq, phba->mbox_mem_pool); 8590 break; 8591 case LPFC_SLI_INTF_IF_TYPE_6: 8592 case LPFC_SLI_INTF_IF_TYPE_2: 8593 case LPFC_SLI_INTF_IF_TYPE_1: 8594 default: 8595 break; 8596 } 8597 return rc; 8598 } 8599 8600 /** 8601 * lpfc_sli4_queue_verify - Verify and update EQ counts 8602 * @phba: pointer to lpfc hba data structure. 8603 * 8604 * This routine is invoked to check the user settable queue counts for EQs. 8605 * After this routine is called the counts will be set to valid values that 8606 * adhere to the constraints of the system's interrupt vectors and the port's 8607 * queue resources. 8608 * 8609 * Return codes 8610 * 0 - successful 8611 * -ENOMEM - No available memory 8612 **/ 8613 static int 8614 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8615 { 8616 /* 8617 * Sanity check for configured queue parameters against the run-time 8618 * device parameters 8619 */ 8620 8621 if (phba->nvmet_support) { 8622 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) 8623 phba->cfg_nvmet_mrq = phba->cfg_irq_chann; 8624 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8625 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8626 } 8627 8628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8629 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8630 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8631 phba->cfg_nvmet_mrq); 8632 8633 /* Get EQ depth from module parameter, fake the default for now */ 8634 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8635 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8636 8637 /* Get CQ depth from module parameter, fake the default for now */ 8638 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8639 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8640 return 0; 8641 } 8642 8643 static int 8644 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 8645 { 8646 struct lpfc_queue *qdesc; 8647 u32 wqesize; 8648 int cpu; 8649 8650 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 8651 /* Create Fast Path IO CQs */ 8652 if (phba->enab_exp_wqcq_pages) 8653 /* Increase the CQ size when WQEs contain an embedded cdb */ 8654 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8655 phba->sli4_hba.cq_esize, 8656 LPFC_CQE_EXP_COUNT, cpu); 8657 8658 else 8659 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8660 phba->sli4_hba.cq_esize, 8661 phba->sli4_hba.cq_ecount, cpu); 8662 if (!qdesc) { 8663 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8664 "0499 Failed allocate fast-path IO CQ (%d)\n", idx); 8665 return 1; 8666 } 8667 qdesc->qe_valid = 1; 8668 qdesc->hdwq = idx; 8669 qdesc->chann = cpu; 8670 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 8671 8672 /* Create Fast Path IO WQs */ 8673 if (phba->enab_exp_wqcq_pages) { 8674 /* Increase the WQ size when WQEs contain an embedded cdb */ 8675 wqesize = (phba->fcp_embed_io) ? 8676 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8677 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8678 wqesize, 8679 LPFC_WQE_EXP_COUNT, cpu); 8680 } else 8681 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8682 phba->sli4_hba.wq_esize, 8683 phba->sli4_hba.wq_ecount, cpu); 8684 8685 if (!qdesc) { 8686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8687 "0503 Failed allocate fast-path IO WQ (%d)\n", 8688 idx); 8689 return 1; 8690 } 8691 qdesc->hdwq = idx; 8692 qdesc->chann = cpu; 8693 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 8694 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8695 return 0; 8696 } 8697 8698 /** 8699 * lpfc_sli4_queue_create - Create all the SLI4 queues 8700 * @phba: pointer to lpfc hba data structure. 8701 * 8702 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8703 * operation. For each SLI4 queue type, the parameters such as queue entry 8704 * count (queue depth) shall be taken from the module parameter. For now, 8705 * we just use some constant number as place holder. 8706 * 8707 * Return codes 8708 * 0 - successful 8709 * -ENOMEM - No availble memory 8710 * -EIO - The mailbox failed to complete successfully. 8711 **/ 8712 int 8713 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8714 { 8715 struct lpfc_queue *qdesc; 8716 int idx, cpu, eqcpu; 8717 struct lpfc_sli4_hdw_queue *qp; 8718 struct lpfc_vector_map_info *cpup; 8719 struct lpfc_vector_map_info *eqcpup; 8720 struct lpfc_eq_intr_info *eqi; 8721 8722 /* 8723 * Create HBA Record arrays. 8724 * Both NVME and FCP will share that same vectors / EQs 8725 */ 8726 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8727 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8728 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8729 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8730 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8731 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8732 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8733 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8734 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8735 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8736 8737 if (!phba->sli4_hba.hdwq) { 8738 phba->sli4_hba.hdwq = kcalloc( 8739 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8740 GFP_KERNEL); 8741 if (!phba->sli4_hba.hdwq) { 8742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8743 "6427 Failed allocate memory for " 8744 "fast-path Hardware Queue array\n"); 8745 goto out_error; 8746 } 8747 /* Prepare hardware queues to take IO buffers */ 8748 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8749 qp = &phba->sli4_hba.hdwq[idx]; 8750 spin_lock_init(&qp->io_buf_list_get_lock); 8751 spin_lock_init(&qp->io_buf_list_put_lock); 8752 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8753 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8754 qp->get_io_bufs = 0; 8755 qp->put_io_bufs = 0; 8756 qp->total_io_bufs = 0; 8757 spin_lock_init(&qp->abts_io_buf_list_lock); 8758 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 8759 qp->abts_scsi_io_bufs = 0; 8760 qp->abts_nvme_io_bufs = 0; 8761 INIT_LIST_HEAD(&qp->sgl_list); 8762 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 8763 spin_lock_init(&qp->hdwq_lock); 8764 } 8765 } 8766 8767 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8768 if (phba->nvmet_support) { 8769 phba->sli4_hba.nvmet_cqset = kcalloc( 8770 phba->cfg_nvmet_mrq, 8771 sizeof(struct lpfc_queue *), 8772 GFP_KERNEL); 8773 if (!phba->sli4_hba.nvmet_cqset) { 8774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8775 "3121 Fail allocate memory for " 8776 "fast-path CQ set array\n"); 8777 goto out_error; 8778 } 8779 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8780 phba->cfg_nvmet_mrq, 8781 sizeof(struct lpfc_queue *), 8782 GFP_KERNEL); 8783 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8785 "3122 Fail allocate memory for " 8786 "fast-path RQ set hdr array\n"); 8787 goto out_error; 8788 } 8789 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8790 phba->cfg_nvmet_mrq, 8791 sizeof(struct lpfc_queue *), 8792 GFP_KERNEL); 8793 if (!phba->sli4_hba.nvmet_mrq_data) { 8794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8795 "3124 Fail allocate memory for " 8796 "fast-path RQ set data array\n"); 8797 goto out_error; 8798 } 8799 } 8800 } 8801 8802 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8803 8804 /* Create HBA Event Queues (EQs) */ 8805 for_each_present_cpu(cpu) { 8806 /* We only want to create 1 EQ per vector, even though 8807 * multiple CPUs might be using that vector. so only 8808 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 8809 */ 8810 cpup = &phba->sli4_hba.cpu_map[cpu]; 8811 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 8812 continue; 8813 8814 /* Get a ptr to the Hardware Queue associated with this CPU */ 8815 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8816 8817 /* Allocate an EQ */ 8818 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8819 phba->sli4_hba.eq_esize, 8820 phba->sli4_hba.eq_ecount, cpu); 8821 if (!qdesc) { 8822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8823 "0497 Failed allocate EQ (%d)\n", 8824 cpup->hdwq); 8825 goto out_error; 8826 } 8827 qdesc->qe_valid = 1; 8828 qdesc->hdwq = cpup->hdwq; 8829 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 8830 qdesc->last_cpu = qdesc->chann; 8831 8832 /* Save the allocated EQ in the Hardware Queue */ 8833 qp->hba_eq = qdesc; 8834 8835 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8836 list_add(&qdesc->cpu_list, &eqi->list); 8837 } 8838 8839 /* Now we need to populate the other Hardware Queues, that share 8840 * an IRQ vector, with the associated EQ ptr. 8841 */ 8842 for_each_present_cpu(cpu) { 8843 cpup = &phba->sli4_hba.cpu_map[cpu]; 8844 8845 /* Check for EQ already allocated in previous loop */ 8846 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 8847 continue; 8848 8849 /* Check for multiple CPUs per hdwq */ 8850 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8851 if (qp->hba_eq) 8852 continue; 8853 8854 /* We need to share an EQ for this hdwq */ 8855 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 8856 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 8857 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 8858 } 8859 8860 /* Allocate IO Path SLI4 CQ/WQs */ 8861 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8862 if (lpfc_alloc_io_wq_cq(phba, idx)) 8863 goto out_error; 8864 } 8865 8866 if (phba->nvmet_support) { 8867 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8868 cpu = lpfc_find_cpu_handle(phba, idx, 8869 LPFC_FIND_BY_HDWQ); 8870 qdesc = lpfc_sli4_queue_alloc(phba, 8871 LPFC_DEFAULT_PAGE_SIZE, 8872 phba->sli4_hba.cq_esize, 8873 phba->sli4_hba.cq_ecount, 8874 cpu); 8875 if (!qdesc) { 8876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8877 "3142 Failed allocate NVME " 8878 "CQ Set (%d)\n", idx); 8879 goto out_error; 8880 } 8881 qdesc->qe_valid = 1; 8882 qdesc->hdwq = idx; 8883 qdesc->chann = cpu; 8884 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8885 } 8886 } 8887 8888 /* 8889 * Create Slow Path Completion Queues (CQs) 8890 */ 8891 8892 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 8893 /* Create slow-path Mailbox Command Complete Queue */ 8894 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8895 phba->sli4_hba.cq_esize, 8896 phba->sli4_hba.cq_ecount, cpu); 8897 if (!qdesc) { 8898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8899 "0500 Failed allocate slow-path mailbox CQ\n"); 8900 goto out_error; 8901 } 8902 qdesc->qe_valid = 1; 8903 phba->sli4_hba.mbx_cq = qdesc; 8904 8905 /* Create slow-path ELS Complete Queue */ 8906 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8907 phba->sli4_hba.cq_esize, 8908 phba->sli4_hba.cq_ecount, cpu); 8909 if (!qdesc) { 8910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8911 "0501 Failed allocate slow-path ELS CQ\n"); 8912 goto out_error; 8913 } 8914 qdesc->qe_valid = 1; 8915 qdesc->chann = cpu; 8916 phba->sli4_hba.els_cq = qdesc; 8917 8918 8919 /* 8920 * Create Slow Path Work Queues (WQs) 8921 */ 8922 8923 /* Create Mailbox Command Queue */ 8924 8925 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8926 phba->sli4_hba.mq_esize, 8927 phba->sli4_hba.mq_ecount, cpu); 8928 if (!qdesc) { 8929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8930 "0505 Failed allocate slow-path MQ\n"); 8931 goto out_error; 8932 } 8933 qdesc->chann = cpu; 8934 phba->sli4_hba.mbx_wq = qdesc; 8935 8936 /* 8937 * Create ELS Work Queues 8938 */ 8939 8940 /* Create slow-path ELS Work Queue */ 8941 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8942 phba->sli4_hba.wq_esize, 8943 phba->sli4_hba.wq_ecount, cpu); 8944 if (!qdesc) { 8945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8946 "0504 Failed allocate slow-path ELS WQ\n"); 8947 goto out_error; 8948 } 8949 qdesc->chann = cpu; 8950 phba->sli4_hba.els_wq = qdesc; 8951 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8952 8953 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8954 /* Create NVME LS Complete Queue */ 8955 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8956 phba->sli4_hba.cq_esize, 8957 phba->sli4_hba.cq_ecount, cpu); 8958 if (!qdesc) { 8959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8960 "6079 Failed allocate NVME LS CQ\n"); 8961 goto out_error; 8962 } 8963 qdesc->chann = cpu; 8964 qdesc->qe_valid = 1; 8965 phba->sli4_hba.nvmels_cq = qdesc; 8966 8967 /* Create NVME LS Work Queue */ 8968 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8969 phba->sli4_hba.wq_esize, 8970 phba->sli4_hba.wq_ecount, cpu); 8971 if (!qdesc) { 8972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8973 "6080 Failed allocate NVME LS WQ\n"); 8974 goto out_error; 8975 } 8976 qdesc->chann = cpu; 8977 phba->sli4_hba.nvmels_wq = qdesc; 8978 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8979 } 8980 8981 /* 8982 * Create Receive Queue (RQ) 8983 */ 8984 8985 /* Create Receive Queue for header */ 8986 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8987 phba->sli4_hba.rq_esize, 8988 phba->sli4_hba.rq_ecount, cpu); 8989 if (!qdesc) { 8990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8991 "0506 Failed allocate receive HRQ\n"); 8992 goto out_error; 8993 } 8994 phba->sli4_hba.hdr_rq = qdesc; 8995 8996 /* Create Receive Queue for data */ 8997 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8998 phba->sli4_hba.rq_esize, 8999 phba->sli4_hba.rq_ecount, cpu); 9000 if (!qdesc) { 9001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9002 "0507 Failed allocate receive DRQ\n"); 9003 goto out_error; 9004 } 9005 phba->sli4_hba.dat_rq = qdesc; 9006 9007 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9008 phba->nvmet_support) { 9009 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9010 cpu = lpfc_find_cpu_handle(phba, idx, 9011 LPFC_FIND_BY_HDWQ); 9012 /* Create NVMET Receive Queue for header */ 9013 qdesc = lpfc_sli4_queue_alloc(phba, 9014 LPFC_DEFAULT_PAGE_SIZE, 9015 phba->sli4_hba.rq_esize, 9016 LPFC_NVMET_RQE_DEF_COUNT, 9017 cpu); 9018 if (!qdesc) { 9019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9020 "3146 Failed allocate " 9021 "receive HRQ\n"); 9022 goto out_error; 9023 } 9024 qdesc->hdwq = idx; 9025 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9026 9027 /* Only needed for header of RQ pair */ 9028 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9029 GFP_KERNEL, 9030 cpu_to_node(cpu)); 9031 if (qdesc->rqbp == NULL) { 9032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9033 "6131 Failed allocate " 9034 "Header RQBP\n"); 9035 goto out_error; 9036 } 9037 9038 /* Put list in known state in case driver load fails. */ 9039 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9040 9041 /* Create NVMET Receive Queue for data */ 9042 qdesc = lpfc_sli4_queue_alloc(phba, 9043 LPFC_DEFAULT_PAGE_SIZE, 9044 phba->sli4_hba.rq_esize, 9045 LPFC_NVMET_RQE_DEF_COUNT, 9046 cpu); 9047 if (!qdesc) { 9048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9049 "3156 Failed allocate " 9050 "receive DRQ\n"); 9051 goto out_error; 9052 } 9053 qdesc->hdwq = idx; 9054 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9055 } 9056 } 9057 9058 #if defined(BUILD_NVME) 9059 /* Clear NVME stats */ 9060 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9061 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9062 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9063 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9064 } 9065 } 9066 #endif 9067 9068 /* Clear SCSI stats */ 9069 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9070 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9071 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9072 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9073 } 9074 } 9075 9076 return 0; 9077 9078 out_error: 9079 lpfc_sli4_queue_destroy(phba); 9080 return -ENOMEM; 9081 } 9082 9083 static inline void 9084 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9085 { 9086 if (*qp != NULL) { 9087 lpfc_sli4_queue_free(*qp); 9088 *qp = NULL; 9089 } 9090 } 9091 9092 static inline void 9093 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9094 { 9095 int idx; 9096 9097 if (*qs == NULL) 9098 return; 9099 9100 for (idx = 0; idx < max; idx++) 9101 __lpfc_sli4_release_queue(&(*qs)[idx]); 9102 9103 kfree(*qs); 9104 *qs = NULL; 9105 } 9106 9107 static inline void 9108 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9109 { 9110 struct lpfc_sli4_hdw_queue *hdwq; 9111 struct lpfc_queue *eq; 9112 uint32_t idx; 9113 9114 hdwq = phba->sli4_hba.hdwq; 9115 9116 /* Loop thru all Hardware Queues */ 9117 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9118 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9119 lpfc_sli4_queue_free(hdwq[idx].io_cq); 9120 lpfc_sli4_queue_free(hdwq[idx].io_wq); 9121 hdwq[idx].io_cq = NULL; 9122 hdwq[idx].io_wq = NULL; 9123 if (phba->cfg_xpsgl && !phba->nvmet_support) 9124 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 9125 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 9126 } 9127 /* Loop thru all IRQ vectors */ 9128 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9129 /* Free the EQ corresponding to the IRQ vector */ 9130 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9131 lpfc_sli4_queue_free(eq); 9132 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9133 } 9134 } 9135 9136 /** 9137 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9138 * @phba: pointer to lpfc hba data structure. 9139 * 9140 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9141 * operation. 9142 * 9143 * Return codes 9144 * 0 - successful 9145 * -ENOMEM - No available memory 9146 * -EIO - The mailbox failed to complete successfully. 9147 **/ 9148 void 9149 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9150 { 9151 /* 9152 * Set FREE_INIT before beginning to free the queues. 9153 * Wait until the users of queues to acknowledge to 9154 * release queues by clearing FREE_WAIT. 9155 */ 9156 spin_lock_irq(&phba->hbalock); 9157 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9158 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9159 spin_unlock_irq(&phba->hbalock); 9160 msleep(20); 9161 spin_lock_irq(&phba->hbalock); 9162 } 9163 spin_unlock_irq(&phba->hbalock); 9164 9165 /* Release HBA eqs */ 9166 if (phba->sli4_hba.hdwq) 9167 lpfc_sli4_release_hdwq(phba); 9168 9169 if (phba->nvmet_support) { 9170 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9171 phba->cfg_nvmet_mrq); 9172 9173 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9174 phba->cfg_nvmet_mrq); 9175 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9176 phba->cfg_nvmet_mrq); 9177 } 9178 9179 /* Release mailbox command work queue */ 9180 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9181 9182 /* Release ELS work queue */ 9183 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9184 9185 /* Release ELS work queue */ 9186 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9187 9188 /* Release unsolicited receive queue */ 9189 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9190 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9191 9192 /* Release ELS complete queue */ 9193 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9194 9195 /* Release NVME LS complete queue */ 9196 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9197 9198 /* Release mailbox command complete queue */ 9199 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9200 9201 /* Everything on this list has been freed */ 9202 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9203 9204 /* Done with freeing the queues */ 9205 spin_lock_irq(&phba->hbalock); 9206 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9207 spin_unlock_irq(&phba->hbalock); 9208 } 9209 9210 int 9211 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9212 { 9213 struct lpfc_rqb *rqbp; 9214 struct lpfc_dmabuf *h_buf; 9215 struct rqb_dmabuf *rqb_buffer; 9216 9217 rqbp = rq->rqbp; 9218 while (!list_empty(&rqbp->rqb_buffer_list)) { 9219 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9220 struct lpfc_dmabuf, list); 9221 9222 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9223 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9224 rqbp->buffer_count--; 9225 } 9226 return 1; 9227 } 9228 9229 static int 9230 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9231 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9232 int qidx, uint32_t qtype) 9233 { 9234 struct lpfc_sli_ring *pring; 9235 int rc; 9236 9237 if (!eq || !cq || !wq) { 9238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9239 "6085 Fast-path %s (%d) not allocated\n", 9240 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9241 return -ENOMEM; 9242 } 9243 9244 /* create the Cq first */ 9245 rc = lpfc_cq_create(phba, cq, eq, 9246 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9247 if (rc) { 9248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9249 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9250 qidx, (uint32_t)rc); 9251 return rc; 9252 } 9253 9254 if (qtype != LPFC_MBOX) { 9255 /* Setup cq_map for fast lookup */ 9256 if (cq_map) 9257 *cq_map = cq->queue_id; 9258 9259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9260 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9261 qidx, cq->queue_id, qidx, eq->queue_id); 9262 9263 /* create the wq */ 9264 rc = lpfc_wq_create(phba, wq, cq, qtype); 9265 if (rc) { 9266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9267 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9268 qidx, (uint32_t)rc); 9269 /* no need to tear down cq - caller will do so */ 9270 return rc; 9271 } 9272 9273 /* Bind this CQ/WQ to the NVME ring */ 9274 pring = wq->pring; 9275 pring->sli.sli4.wqp = (void *)wq; 9276 cq->pring = pring; 9277 9278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9279 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9280 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9281 } else { 9282 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9283 if (rc) { 9284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9285 "0539 Failed setup of slow-path MQ: " 9286 "rc = 0x%x\n", rc); 9287 /* no need to tear down cq - caller will do so */ 9288 return rc; 9289 } 9290 9291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9292 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9293 phba->sli4_hba.mbx_wq->queue_id, 9294 phba->sli4_hba.mbx_cq->queue_id); 9295 } 9296 9297 return 0; 9298 } 9299 9300 /** 9301 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9302 * @phba: pointer to lpfc hba data structure. 9303 * 9304 * This routine will populate the cq_lookup table by all 9305 * available CQ queue_id's. 9306 **/ 9307 static void 9308 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9309 { 9310 struct lpfc_queue *eq, *childq; 9311 int qidx; 9312 9313 memset(phba->sli4_hba.cq_lookup, 0, 9314 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9315 /* Loop thru all IRQ vectors */ 9316 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9317 /* Get the EQ corresponding to the IRQ vector */ 9318 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9319 if (!eq) 9320 continue; 9321 /* Loop through all CQs associated with that EQ */ 9322 list_for_each_entry(childq, &eq->child_list, list) { 9323 if (childq->queue_id > phba->sli4_hba.cq_max) 9324 continue; 9325 if (childq->subtype == LPFC_IO) 9326 phba->sli4_hba.cq_lookup[childq->queue_id] = 9327 childq; 9328 } 9329 } 9330 } 9331 9332 /** 9333 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9334 * @phba: pointer to lpfc hba data structure. 9335 * 9336 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9337 * operation. 9338 * 9339 * Return codes 9340 * 0 - successful 9341 * -ENOMEM - No available memory 9342 * -EIO - The mailbox failed to complete successfully. 9343 **/ 9344 int 9345 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9346 { 9347 uint32_t shdr_status, shdr_add_status; 9348 union lpfc_sli4_cfg_shdr *shdr; 9349 struct lpfc_vector_map_info *cpup; 9350 struct lpfc_sli4_hdw_queue *qp; 9351 LPFC_MBOXQ_t *mboxq; 9352 int qidx, cpu; 9353 uint32_t length, usdelay; 9354 int rc = -ENOMEM; 9355 9356 /* Check for dual-ULP support */ 9357 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9358 if (!mboxq) { 9359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9360 "3249 Unable to allocate memory for " 9361 "QUERY_FW_CFG mailbox command\n"); 9362 return -ENOMEM; 9363 } 9364 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9365 sizeof(struct lpfc_sli4_cfg_mhdr)); 9366 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9367 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9368 length, LPFC_SLI4_MBX_EMBED); 9369 9370 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9371 9372 shdr = (union lpfc_sli4_cfg_shdr *) 9373 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9374 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9375 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9376 if (shdr_status || shdr_add_status || rc) { 9377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9378 "3250 QUERY_FW_CFG mailbox failed with status " 9379 "x%x add_status x%x, mbx status x%x\n", 9380 shdr_status, shdr_add_status, rc); 9381 if (rc != MBX_TIMEOUT) 9382 mempool_free(mboxq, phba->mbox_mem_pool); 9383 rc = -ENXIO; 9384 goto out_error; 9385 } 9386 9387 phba->sli4_hba.fw_func_mode = 9388 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9389 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9390 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9391 phba->sli4_hba.physical_port = 9392 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9394 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9395 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9396 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9397 9398 if (rc != MBX_TIMEOUT) 9399 mempool_free(mboxq, phba->mbox_mem_pool); 9400 9401 /* 9402 * Set up HBA Event Queues (EQs) 9403 */ 9404 qp = phba->sli4_hba.hdwq; 9405 9406 /* Set up HBA event queue */ 9407 if (!qp) { 9408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9409 "3147 Fast-path EQs not allocated\n"); 9410 rc = -ENOMEM; 9411 goto out_error; 9412 } 9413 9414 /* Loop thru all IRQ vectors */ 9415 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9416 /* Create HBA Event Queues (EQs) in order */ 9417 for_each_present_cpu(cpu) { 9418 cpup = &phba->sli4_hba.cpu_map[cpu]; 9419 9420 /* Look for the CPU thats using that vector with 9421 * LPFC_CPU_FIRST_IRQ set. 9422 */ 9423 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9424 continue; 9425 if (qidx != cpup->eq) 9426 continue; 9427 9428 /* Create an EQ for that vector */ 9429 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9430 phba->cfg_fcp_imax); 9431 if (rc) { 9432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9433 "0523 Failed setup of fast-path" 9434 " EQ (%d), rc = 0x%x\n", 9435 cpup->eq, (uint32_t)rc); 9436 goto out_destroy; 9437 } 9438 9439 /* Save the EQ for that vector in the hba_eq_hdl */ 9440 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9441 qp[cpup->hdwq].hba_eq; 9442 9443 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9444 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9445 cpup->eq, 9446 qp[cpup->hdwq].hba_eq->queue_id); 9447 } 9448 } 9449 9450 /* Loop thru all Hardware Queues */ 9451 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9452 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9453 cpup = &phba->sli4_hba.cpu_map[cpu]; 9454 9455 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9456 rc = lpfc_create_wq_cq(phba, 9457 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9458 qp[qidx].io_cq, 9459 qp[qidx].io_wq, 9460 &phba->sli4_hba.hdwq[qidx].io_cq_map, 9461 qidx, 9462 LPFC_IO); 9463 if (rc) { 9464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9465 "0535 Failed to setup fastpath " 9466 "IO WQ/CQ (%d), rc = 0x%x\n", 9467 qidx, (uint32_t)rc); 9468 goto out_destroy; 9469 } 9470 } 9471 9472 /* 9473 * Set up Slow Path Complete Queues (CQs) 9474 */ 9475 9476 /* Set up slow-path MBOX CQ/MQ */ 9477 9478 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9480 "0528 %s not allocated\n", 9481 phba->sli4_hba.mbx_cq ? 9482 "Mailbox WQ" : "Mailbox CQ"); 9483 rc = -ENOMEM; 9484 goto out_destroy; 9485 } 9486 9487 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9488 phba->sli4_hba.mbx_cq, 9489 phba->sli4_hba.mbx_wq, 9490 NULL, 0, LPFC_MBOX); 9491 if (rc) { 9492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9493 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9494 (uint32_t)rc); 9495 goto out_destroy; 9496 } 9497 if (phba->nvmet_support) { 9498 if (!phba->sli4_hba.nvmet_cqset) { 9499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9500 "3165 Fast-path NVME CQ Set " 9501 "array not allocated\n"); 9502 rc = -ENOMEM; 9503 goto out_destroy; 9504 } 9505 if (phba->cfg_nvmet_mrq > 1) { 9506 rc = lpfc_cq_create_set(phba, 9507 phba->sli4_hba.nvmet_cqset, 9508 qp, 9509 LPFC_WCQ, LPFC_NVMET); 9510 if (rc) { 9511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9512 "3164 Failed setup of NVME CQ " 9513 "Set, rc = 0x%x\n", 9514 (uint32_t)rc); 9515 goto out_destroy; 9516 } 9517 } else { 9518 /* Set up NVMET Receive Complete Queue */ 9519 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9520 qp[0].hba_eq, 9521 LPFC_WCQ, LPFC_NVMET); 9522 if (rc) { 9523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9524 "6089 Failed setup NVMET CQ: " 9525 "rc = 0x%x\n", (uint32_t)rc); 9526 goto out_destroy; 9527 } 9528 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9529 9530 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9531 "6090 NVMET CQ setup: cq-id=%d, " 9532 "parent eq-id=%d\n", 9533 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9534 qp[0].hba_eq->queue_id); 9535 } 9536 } 9537 9538 /* Set up slow-path ELS WQ/CQ */ 9539 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9541 "0530 ELS %s not allocated\n", 9542 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9543 rc = -ENOMEM; 9544 goto out_destroy; 9545 } 9546 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9547 phba->sli4_hba.els_cq, 9548 phba->sli4_hba.els_wq, 9549 NULL, 0, LPFC_ELS); 9550 if (rc) { 9551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9552 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9553 (uint32_t)rc); 9554 goto out_destroy; 9555 } 9556 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9557 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9558 phba->sli4_hba.els_wq->queue_id, 9559 phba->sli4_hba.els_cq->queue_id); 9560 9561 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9562 /* Set up NVME LS Complete Queue */ 9563 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9565 "6091 LS %s not allocated\n", 9566 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9567 rc = -ENOMEM; 9568 goto out_destroy; 9569 } 9570 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9571 phba->sli4_hba.nvmels_cq, 9572 phba->sli4_hba.nvmels_wq, 9573 NULL, 0, LPFC_NVME_LS); 9574 if (rc) { 9575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9576 "0526 Failed setup of NVVME LS WQ/CQ: " 9577 "rc = 0x%x\n", (uint32_t)rc); 9578 goto out_destroy; 9579 } 9580 9581 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9582 "6096 ELS WQ setup: wq-id=%d, " 9583 "parent cq-id=%d\n", 9584 phba->sli4_hba.nvmels_wq->queue_id, 9585 phba->sli4_hba.nvmels_cq->queue_id); 9586 } 9587 9588 /* 9589 * Create NVMET Receive Queue (RQ) 9590 */ 9591 if (phba->nvmet_support) { 9592 if ((!phba->sli4_hba.nvmet_cqset) || 9593 (!phba->sli4_hba.nvmet_mrq_hdr) || 9594 (!phba->sli4_hba.nvmet_mrq_data)) { 9595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9596 "6130 MRQ CQ Queues not " 9597 "allocated\n"); 9598 rc = -ENOMEM; 9599 goto out_destroy; 9600 } 9601 if (phba->cfg_nvmet_mrq > 1) { 9602 rc = lpfc_mrq_create(phba, 9603 phba->sli4_hba.nvmet_mrq_hdr, 9604 phba->sli4_hba.nvmet_mrq_data, 9605 phba->sli4_hba.nvmet_cqset, 9606 LPFC_NVMET); 9607 if (rc) { 9608 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9609 "6098 Failed setup of NVMET " 9610 "MRQ: rc = 0x%x\n", 9611 (uint32_t)rc); 9612 goto out_destroy; 9613 } 9614 9615 } else { 9616 rc = lpfc_rq_create(phba, 9617 phba->sli4_hba.nvmet_mrq_hdr[0], 9618 phba->sli4_hba.nvmet_mrq_data[0], 9619 phba->sli4_hba.nvmet_cqset[0], 9620 LPFC_NVMET); 9621 if (rc) { 9622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9623 "6057 Failed setup of NVMET " 9624 "Receive Queue: rc = 0x%x\n", 9625 (uint32_t)rc); 9626 goto out_destroy; 9627 } 9628 9629 lpfc_printf_log( 9630 phba, KERN_INFO, LOG_INIT, 9631 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9632 "dat-rq-id=%d parent cq-id=%d\n", 9633 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9634 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9635 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9636 9637 } 9638 } 9639 9640 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9642 "0540 Receive Queue not allocated\n"); 9643 rc = -ENOMEM; 9644 goto out_destroy; 9645 } 9646 9647 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9648 phba->sli4_hba.els_cq, LPFC_USOL); 9649 if (rc) { 9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9651 "0541 Failed setup of Receive Queue: " 9652 "rc = 0x%x\n", (uint32_t)rc); 9653 goto out_destroy; 9654 } 9655 9656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9657 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9658 "parent cq-id=%d\n", 9659 phba->sli4_hba.hdr_rq->queue_id, 9660 phba->sli4_hba.dat_rq->queue_id, 9661 phba->sli4_hba.els_cq->queue_id); 9662 9663 if (phba->cfg_fcp_imax) 9664 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9665 else 9666 usdelay = 0; 9667 9668 for (qidx = 0; qidx < phba->cfg_irq_chann; 9669 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9670 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9671 usdelay); 9672 9673 if (phba->sli4_hba.cq_max) { 9674 kfree(phba->sli4_hba.cq_lookup); 9675 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9676 sizeof(struct lpfc_queue *), GFP_KERNEL); 9677 if (!phba->sli4_hba.cq_lookup) { 9678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9679 "0549 Failed setup of CQ Lookup table: " 9680 "size 0x%x\n", phba->sli4_hba.cq_max); 9681 rc = -ENOMEM; 9682 goto out_destroy; 9683 } 9684 lpfc_setup_cq_lookup(phba); 9685 } 9686 return 0; 9687 9688 out_destroy: 9689 lpfc_sli4_queue_unset(phba); 9690 out_error: 9691 return rc; 9692 } 9693 9694 /** 9695 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9696 * @phba: pointer to lpfc hba data structure. 9697 * 9698 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9699 * operation. 9700 * 9701 * Return codes 9702 * 0 - successful 9703 * -ENOMEM - No available memory 9704 * -EIO - The mailbox failed to complete successfully. 9705 **/ 9706 void 9707 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9708 { 9709 struct lpfc_sli4_hdw_queue *qp; 9710 struct lpfc_queue *eq; 9711 int qidx; 9712 9713 /* Unset mailbox command work queue */ 9714 if (phba->sli4_hba.mbx_wq) 9715 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9716 9717 /* Unset NVME LS work queue */ 9718 if (phba->sli4_hba.nvmels_wq) 9719 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9720 9721 /* Unset ELS work queue */ 9722 if (phba->sli4_hba.els_wq) 9723 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9724 9725 /* Unset unsolicited receive queue */ 9726 if (phba->sli4_hba.hdr_rq) 9727 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9728 phba->sli4_hba.dat_rq); 9729 9730 /* Unset mailbox command complete queue */ 9731 if (phba->sli4_hba.mbx_cq) 9732 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9733 9734 /* Unset ELS complete queue */ 9735 if (phba->sli4_hba.els_cq) 9736 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9737 9738 /* Unset NVME LS complete queue */ 9739 if (phba->sli4_hba.nvmels_cq) 9740 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9741 9742 if (phba->nvmet_support) { 9743 /* Unset NVMET MRQ queue */ 9744 if (phba->sli4_hba.nvmet_mrq_hdr) { 9745 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9746 lpfc_rq_destroy( 9747 phba, 9748 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9749 phba->sli4_hba.nvmet_mrq_data[qidx]); 9750 } 9751 9752 /* Unset NVMET CQ Set complete queue */ 9753 if (phba->sli4_hba.nvmet_cqset) { 9754 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9755 lpfc_cq_destroy( 9756 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9757 } 9758 } 9759 9760 /* Unset fast-path SLI4 queues */ 9761 if (phba->sli4_hba.hdwq) { 9762 /* Loop thru all Hardware Queues */ 9763 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9764 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9765 qp = &phba->sli4_hba.hdwq[qidx]; 9766 lpfc_wq_destroy(phba, qp->io_wq); 9767 lpfc_cq_destroy(phba, qp->io_cq); 9768 } 9769 /* Loop thru all IRQ vectors */ 9770 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9771 /* Destroy the EQ corresponding to the IRQ vector */ 9772 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9773 lpfc_eq_destroy(phba, eq); 9774 } 9775 } 9776 9777 kfree(phba->sli4_hba.cq_lookup); 9778 phba->sli4_hba.cq_lookup = NULL; 9779 phba->sli4_hba.cq_max = 0; 9780 } 9781 9782 /** 9783 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9784 * @phba: pointer to lpfc hba data structure. 9785 * 9786 * This routine is invoked to allocate and set up a pool of completion queue 9787 * events. The body of the completion queue event is a completion queue entry 9788 * CQE. For now, this pool is used for the interrupt service routine to queue 9789 * the following HBA completion queue events for the worker thread to process: 9790 * - Mailbox asynchronous events 9791 * - Receive queue completion unsolicited events 9792 * Later, this can be used for all the slow-path events. 9793 * 9794 * Return codes 9795 * 0 - successful 9796 * -ENOMEM - No available memory 9797 **/ 9798 static int 9799 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9800 { 9801 struct lpfc_cq_event *cq_event; 9802 int i; 9803 9804 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9805 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9806 if (!cq_event) 9807 goto out_pool_create_fail; 9808 list_add_tail(&cq_event->list, 9809 &phba->sli4_hba.sp_cqe_event_pool); 9810 } 9811 return 0; 9812 9813 out_pool_create_fail: 9814 lpfc_sli4_cq_event_pool_destroy(phba); 9815 return -ENOMEM; 9816 } 9817 9818 /** 9819 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9820 * @phba: pointer to lpfc hba data structure. 9821 * 9822 * This routine is invoked to free the pool of completion queue events at 9823 * driver unload time. Note that, it is the responsibility of the driver 9824 * cleanup routine to free all the outstanding completion-queue events 9825 * allocated from this pool back into the pool before invoking this routine 9826 * to destroy the pool. 9827 **/ 9828 static void 9829 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9830 { 9831 struct lpfc_cq_event *cq_event, *next_cq_event; 9832 9833 list_for_each_entry_safe(cq_event, next_cq_event, 9834 &phba->sli4_hba.sp_cqe_event_pool, list) { 9835 list_del(&cq_event->list); 9836 kfree(cq_event); 9837 } 9838 } 9839 9840 /** 9841 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9842 * @phba: pointer to lpfc hba data structure. 9843 * 9844 * This routine is the lock free version of the API invoked to allocate a 9845 * completion-queue event from the free pool. 9846 * 9847 * Return: Pointer to the newly allocated completion-queue event if successful 9848 * NULL otherwise. 9849 **/ 9850 struct lpfc_cq_event * 9851 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9852 { 9853 struct lpfc_cq_event *cq_event = NULL; 9854 9855 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9856 struct lpfc_cq_event, list); 9857 return cq_event; 9858 } 9859 9860 /** 9861 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9862 * @phba: pointer to lpfc hba data structure. 9863 * 9864 * This routine is the lock version of the API invoked to allocate a 9865 * completion-queue event from the free pool. 9866 * 9867 * Return: Pointer to the newly allocated completion-queue event if successful 9868 * NULL otherwise. 9869 **/ 9870 struct lpfc_cq_event * 9871 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9872 { 9873 struct lpfc_cq_event *cq_event; 9874 unsigned long iflags; 9875 9876 spin_lock_irqsave(&phba->hbalock, iflags); 9877 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9878 spin_unlock_irqrestore(&phba->hbalock, iflags); 9879 return cq_event; 9880 } 9881 9882 /** 9883 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9884 * @phba: pointer to lpfc hba data structure. 9885 * @cq_event: pointer to the completion queue event to be freed. 9886 * 9887 * This routine is the lock free version of the API invoked to release a 9888 * completion-queue event back into the free pool. 9889 **/ 9890 void 9891 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9892 struct lpfc_cq_event *cq_event) 9893 { 9894 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 9895 } 9896 9897 /** 9898 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9899 * @phba: pointer to lpfc hba data structure. 9900 * @cq_event: pointer to the completion queue event to be freed. 9901 * 9902 * This routine is the lock version of the API invoked to release a 9903 * completion-queue event back into the free pool. 9904 **/ 9905 void 9906 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9907 struct lpfc_cq_event *cq_event) 9908 { 9909 unsigned long iflags; 9910 spin_lock_irqsave(&phba->hbalock, iflags); 9911 __lpfc_sli4_cq_event_release(phba, cq_event); 9912 spin_unlock_irqrestore(&phba->hbalock, iflags); 9913 } 9914 9915 /** 9916 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9917 * @phba: pointer to lpfc hba data structure. 9918 * 9919 * This routine is to free all the pending completion-queue events to the 9920 * back into the free pool for device reset. 9921 **/ 9922 static void 9923 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 9924 { 9925 LIST_HEAD(cqelist); 9926 struct lpfc_cq_event *cqe; 9927 unsigned long iflags; 9928 9929 /* Retrieve all the pending WCQEs from pending WCQE lists */ 9930 spin_lock_irqsave(&phba->hbalock, iflags); 9931 /* Pending FCP XRI abort events */ 9932 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9933 &cqelist); 9934 /* Pending ELS XRI abort events */ 9935 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9936 &cqelist); 9937 /* Pending asynnc events */ 9938 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9939 &cqelist); 9940 spin_unlock_irqrestore(&phba->hbalock, iflags); 9941 9942 while (!list_empty(&cqelist)) { 9943 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 9944 lpfc_sli4_cq_event_release(phba, cqe); 9945 } 9946 } 9947 9948 /** 9949 * lpfc_pci_function_reset - Reset pci function. 9950 * @phba: pointer to lpfc hba data structure. 9951 * 9952 * This routine is invoked to request a PCI function reset. It will destroys 9953 * all resources assigned to the PCI function which originates this request. 9954 * 9955 * Return codes 9956 * 0 - successful 9957 * -ENOMEM - No available memory 9958 * -EIO - The mailbox failed to complete successfully. 9959 **/ 9960 int 9961 lpfc_pci_function_reset(struct lpfc_hba *phba) 9962 { 9963 LPFC_MBOXQ_t *mboxq; 9964 uint32_t rc = 0, if_type; 9965 uint32_t shdr_status, shdr_add_status; 9966 uint32_t rdy_chk; 9967 uint32_t port_reset = 0; 9968 union lpfc_sli4_cfg_shdr *shdr; 9969 struct lpfc_register reg_data; 9970 uint16_t devid; 9971 9972 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9973 switch (if_type) { 9974 case LPFC_SLI_INTF_IF_TYPE_0: 9975 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 9976 GFP_KERNEL); 9977 if (!mboxq) { 9978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9979 "0494 Unable to allocate memory for " 9980 "issuing SLI_FUNCTION_RESET mailbox " 9981 "command\n"); 9982 return -ENOMEM; 9983 } 9984 9985 /* Setup PCI function reset mailbox-ioctl command */ 9986 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9987 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 9988 LPFC_SLI4_MBX_EMBED); 9989 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9990 shdr = (union lpfc_sli4_cfg_shdr *) 9991 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9992 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9993 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 9994 &shdr->response); 9995 if (rc != MBX_TIMEOUT) 9996 mempool_free(mboxq, phba->mbox_mem_pool); 9997 if (shdr_status || shdr_add_status || rc) { 9998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9999 "0495 SLI_FUNCTION_RESET mailbox " 10000 "failed with status x%x add_status x%x," 10001 " mbx status x%x\n", 10002 shdr_status, shdr_add_status, rc); 10003 rc = -ENXIO; 10004 } 10005 break; 10006 case LPFC_SLI_INTF_IF_TYPE_2: 10007 case LPFC_SLI_INTF_IF_TYPE_6: 10008 wait: 10009 /* 10010 * Poll the Port Status Register and wait for RDY for 10011 * up to 30 seconds. If the port doesn't respond, treat 10012 * it as an error. 10013 */ 10014 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10015 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10016 STATUSregaddr, ®_data.word0)) { 10017 rc = -ENODEV; 10018 goto out; 10019 } 10020 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10021 break; 10022 msleep(20); 10023 } 10024 10025 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10026 phba->work_status[0] = readl( 10027 phba->sli4_hba.u.if_type2.ERR1regaddr); 10028 phba->work_status[1] = readl( 10029 phba->sli4_hba.u.if_type2.ERR2regaddr); 10030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10031 "2890 Port not ready, port status reg " 10032 "0x%x error 1=0x%x, error 2=0x%x\n", 10033 reg_data.word0, 10034 phba->work_status[0], 10035 phba->work_status[1]); 10036 rc = -ENODEV; 10037 goto out; 10038 } 10039 10040 if (!port_reset) { 10041 /* 10042 * Reset the port now 10043 */ 10044 reg_data.word0 = 0; 10045 bf_set(lpfc_sliport_ctrl_end, ®_data, 10046 LPFC_SLIPORT_LITTLE_ENDIAN); 10047 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10048 LPFC_SLIPORT_INIT_PORT); 10049 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10050 CTRLregaddr); 10051 /* flush */ 10052 pci_read_config_word(phba->pcidev, 10053 PCI_DEVICE_ID, &devid); 10054 10055 port_reset = 1; 10056 msleep(20); 10057 goto wait; 10058 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10059 rc = -ENODEV; 10060 goto out; 10061 } 10062 break; 10063 10064 case LPFC_SLI_INTF_IF_TYPE_1: 10065 default: 10066 break; 10067 } 10068 10069 out: 10070 /* Catch the not-ready port failure after a port reset. */ 10071 if (rc) { 10072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10073 "3317 HBA not functional: IP Reset Failed " 10074 "try: echo fw_reset > board_mode\n"); 10075 rc = -ENODEV; 10076 } 10077 10078 return rc; 10079 } 10080 10081 /** 10082 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10083 * @phba: pointer to lpfc hba data structure. 10084 * 10085 * This routine is invoked to set up the PCI device memory space for device 10086 * with SLI-4 interface spec. 10087 * 10088 * Return codes 10089 * 0 - successful 10090 * other values - error 10091 **/ 10092 static int 10093 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10094 { 10095 struct pci_dev *pdev = phba->pcidev; 10096 unsigned long bar0map_len, bar1map_len, bar2map_len; 10097 int error; 10098 uint32_t if_type; 10099 10100 if (!pdev) 10101 return -ENODEV; 10102 10103 /* Set the device DMA mask size */ 10104 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10105 if (error) 10106 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10107 if (error) 10108 return error; 10109 10110 /* 10111 * The BARs and register set definitions and offset locations are 10112 * dependent on the if_type. 10113 */ 10114 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10115 &phba->sli4_hba.sli_intf.word0)) { 10116 return -ENODEV; 10117 } 10118 10119 /* There is no SLI3 failback for SLI4 devices. */ 10120 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10121 LPFC_SLI_INTF_VALID) { 10122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10123 "2894 SLI_INTF reg contents invalid " 10124 "sli_intf reg 0x%x\n", 10125 phba->sli4_hba.sli_intf.word0); 10126 return -ENODEV; 10127 } 10128 10129 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10130 /* 10131 * Get the bus address of SLI4 device Bar regions and the 10132 * number of bytes required by each mapping. The mapping of the 10133 * particular PCI BARs regions is dependent on the type of 10134 * SLI4 device. 10135 */ 10136 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10137 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10138 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10139 10140 /* 10141 * Map SLI4 PCI Config Space Register base to a kernel virtual 10142 * addr 10143 */ 10144 phba->sli4_hba.conf_regs_memmap_p = 10145 ioremap(phba->pci_bar0_map, bar0map_len); 10146 if (!phba->sli4_hba.conf_regs_memmap_p) { 10147 dev_printk(KERN_ERR, &pdev->dev, 10148 "ioremap failed for SLI4 PCI config " 10149 "registers.\n"); 10150 return -ENODEV; 10151 } 10152 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10153 /* Set up BAR0 PCI config space register memory map */ 10154 lpfc_sli4_bar0_register_memmap(phba, if_type); 10155 } else { 10156 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10157 bar0map_len = pci_resource_len(pdev, 1); 10158 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10159 dev_printk(KERN_ERR, &pdev->dev, 10160 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10161 return -ENODEV; 10162 } 10163 phba->sli4_hba.conf_regs_memmap_p = 10164 ioremap(phba->pci_bar0_map, bar0map_len); 10165 if (!phba->sli4_hba.conf_regs_memmap_p) { 10166 dev_printk(KERN_ERR, &pdev->dev, 10167 "ioremap failed for SLI4 PCI config " 10168 "registers.\n"); 10169 return -ENODEV; 10170 } 10171 lpfc_sli4_bar0_register_memmap(phba, if_type); 10172 } 10173 10174 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10175 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10176 /* 10177 * Map SLI4 if type 0 HBA Control Register base to a 10178 * kernel virtual address and setup the registers. 10179 */ 10180 phba->pci_bar1_map = pci_resource_start(pdev, 10181 PCI_64BIT_BAR2); 10182 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10183 phba->sli4_hba.ctrl_regs_memmap_p = 10184 ioremap(phba->pci_bar1_map, 10185 bar1map_len); 10186 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10187 dev_err(&pdev->dev, 10188 "ioremap failed for SLI4 HBA " 10189 "control registers.\n"); 10190 error = -ENOMEM; 10191 goto out_iounmap_conf; 10192 } 10193 phba->pci_bar2_memmap_p = 10194 phba->sli4_hba.ctrl_regs_memmap_p; 10195 lpfc_sli4_bar1_register_memmap(phba, if_type); 10196 } else { 10197 error = -ENOMEM; 10198 goto out_iounmap_conf; 10199 } 10200 } 10201 10202 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10203 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10204 /* 10205 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10206 * virtual address and setup the registers. 10207 */ 10208 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10209 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10210 phba->sli4_hba.drbl_regs_memmap_p = 10211 ioremap(phba->pci_bar1_map, bar1map_len); 10212 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10213 dev_err(&pdev->dev, 10214 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10215 error = -ENOMEM; 10216 goto out_iounmap_conf; 10217 } 10218 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10219 lpfc_sli4_bar1_register_memmap(phba, if_type); 10220 } 10221 10222 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10223 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10224 /* 10225 * Map SLI4 if type 0 HBA Doorbell Register base to 10226 * a kernel virtual address and setup the registers. 10227 */ 10228 phba->pci_bar2_map = pci_resource_start(pdev, 10229 PCI_64BIT_BAR4); 10230 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10231 phba->sli4_hba.drbl_regs_memmap_p = 10232 ioremap(phba->pci_bar2_map, 10233 bar2map_len); 10234 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10235 dev_err(&pdev->dev, 10236 "ioremap failed for SLI4 HBA" 10237 " doorbell registers.\n"); 10238 error = -ENOMEM; 10239 goto out_iounmap_ctrl; 10240 } 10241 phba->pci_bar4_memmap_p = 10242 phba->sli4_hba.drbl_regs_memmap_p; 10243 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10244 if (error) 10245 goto out_iounmap_all; 10246 } else { 10247 error = -ENOMEM; 10248 goto out_iounmap_all; 10249 } 10250 } 10251 10252 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10253 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10254 /* 10255 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10256 * virtual address and setup the registers. 10257 */ 10258 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10259 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10260 phba->sli4_hba.dpp_regs_memmap_p = 10261 ioremap(phba->pci_bar2_map, bar2map_len); 10262 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10263 dev_err(&pdev->dev, 10264 "ioremap failed for SLI4 HBA dpp registers.\n"); 10265 error = -ENOMEM; 10266 goto out_iounmap_ctrl; 10267 } 10268 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10269 } 10270 10271 /* Set up the EQ/CQ register handeling functions now */ 10272 switch (if_type) { 10273 case LPFC_SLI_INTF_IF_TYPE_0: 10274 case LPFC_SLI_INTF_IF_TYPE_2: 10275 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10276 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10277 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10278 break; 10279 case LPFC_SLI_INTF_IF_TYPE_6: 10280 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10281 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10282 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10283 break; 10284 default: 10285 break; 10286 } 10287 10288 return 0; 10289 10290 out_iounmap_all: 10291 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10292 out_iounmap_ctrl: 10293 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10294 out_iounmap_conf: 10295 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10296 10297 return error; 10298 } 10299 10300 /** 10301 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10302 * @phba: pointer to lpfc hba data structure. 10303 * 10304 * This routine is invoked to unset the PCI device memory space for device 10305 * with SLI-4 interface spec. 10306 **/ 10307 static void 10308 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10309 { 10310 uint32_t if_type; 10311 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10312 10313 switch (if_type) { 10314 case LPFC_SLI_INTF_IF_TYPE_0: 10315 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10316 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10317 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10318 break; 10319 case LPFC_SLI_INTF_IF_TYPE_2: 10320 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10321 break; 10322 case LPFC_SLI_INTF_IF_TYPE_6: 10323 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10324 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10325 break; 10326 case LPFC_SLI_INTF_IF_TYPE_1: 10327 default: 10328 dev_printk(KERN_ERR, &phba->pcidev->dev, 10329 "FATAL - unsupported SLI4 interface type - %d\n", 10330 if_type); 10331 break; 10332 } 10333 } 10334 10335 /** 10336 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10337 * @phba: pointer to lpfc hba data structure. 10338 * 10339 * This routine is invoked to enable the MSI-X interrupt vectors to device 10340 * with SLI-3 interface specs. 10341 * 10342 * Return codes 10343 * 0 - successful 10344 * other values - error 10345 **/ 10346 static int 10347 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10348 { 10349 int rc; 10350 LPFC_MBOXQ_t *pmb; 10351 10352 /* Set up MSI-X multi-message vectors */ 10353 rc = pci_alloc_irq_vectors(phba->pcidev, 10354 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10355 if (rc < 0) { 10356 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10357 "0420 PCI enable MSI-X failed (%d)\n", rc); 10358 goto vec_fail_out; 10359 } 10360 10361 /* 10362 * Assign MSI-X vectors to interrupt handlers 10363 */ 10364 10365 /* vector-0 is associated to slow-path handler */ 10366 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10367 &lpfc_sli_sp_intr_handler, 0, 10368 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10369 if (rc) { 10370 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10371 "0421 MSI-X slow-path request_irq failed " 10372 "(%d)\n", rc); 10373 goto msi_fail_out; 10374 } 10375 10376 /* vector-1 is associated to fast-path handler */ 10377 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10378 &lpfc_sli_fp_intr_handler, 0, 10379 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10380 10381 if (rc) { 10382 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10383 "0429 MSI-X fast-path request_irq failed " 10384 "(%d)\n", rc); 10385 goto irq_fail_out; 10386 } 10387 10388 /* 10389 * Configure HBA MSI-X attention conditions to messages 10390 */ 10391 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10392 10393 if (!pmb) { 10394 rc = -ENOMEM; 10395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10396 "0474 Unable to allocate memory for issuing " 10397 "MBOX_CONFIG_MSI command\n"); 10398 goto mem_fail_out; 10399 } 10400 rc = lpfc_config_msi(phba, pmb); 10401 if (rc) 10402 goto mbx_fail_out; 10403 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10404 if (rc != MBX_SUCCESS) { 10405 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10406 "0351 Config MSI mailbox command failed, " 10407 "mbxCmd x%x, mbxStatus x%x\n", 10408 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10409 goto mbx_fail_out; 10410 } 10411 10412 /* Free memory allocated for mailbox command */ 10413 mempool_free(pmb, phba->mbox_mem_pool); 10414 return rc; 10415 10416 mbx_fail_out: 10417 /* Free memory allocated for mailbox command */ 10418 mempool_free(pmb, phba->mbox_mem_pool); 10419 10420 mem_fail_out: 10421 /* free the irq already requested */ 10422 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10423 10424 irq_fail_out: 10425 /* free the irq already requested */ 10426 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10427 10428 msi_fail_out: 10429 /* Unconfigure MSI-X capability structure */ 10430 pci_free_irq_vectors(phba->pcidev); 10431 10432 vec_fail_out: 10433 return rc; 10434 } 10435 10436 /** 10437 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10438 * @phba: pointer to lpfc hba data structure. 10439 * 10440 * This routine is invoked to enable the MSI interrupt mode to device with 10441 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10442 * enable the MSI vector. The device driver is responsible for calling the 10443 * request_irq() to register MSI vector with a interrupt the handler, which 10444 * is done in this function. 10445 * 10446 * Return codes 10447 * 0 - successful 10448 * other values - error 10449 */ 10450 static int 10451 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10452 { 10453 int rc; 10454 10455 rc = pci_enable_msi(phba->pcidev); 10456 if (!rc) 10457 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10458 "0462 PCI enable MSI mode success.\n"); 10459 else { 10460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10461 "0471 PCI enable MSI mode failed (%d)\n", rc); 10462 return rc; 10463 } 10464 10465 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10466 0, LPFC_DRIVER_NAME, phba); 10467 if (rc) { 10468 pci_disable_msi(phba->pcidev); 10469 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10470 "0478 MSI request_irq failed (%d)\n", rc); 10471 } 10472 return rc; 10473 } 10474 10475 /** 10476 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10477 * @phba: pointer to lpfc hba data structure. 10478 * 10479 * This routine is invoked to enable device interrupt and associate driver's 10480 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10481 * spec. Depends on the interrupt mode configured to the driver, the driver 10482 * will try to fallback from the configured interrupt mode to an interrupt 10483 * mode which is supported by the platform, kernel, and device in the order 10484 * of: 10485 * MSI-X -> MSI -> IRQ. 10486 * 10487 * Return codes 10488 * 0 - successful 10489 * other values - error 10490 **/ 10491 static uint32_t 10492 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10493 { 10494 uint32_t intr_mode = LPFC_INTR_ERROR; 10495 int retval; 10496 10497 if (cfg_mode == 2) { 10498 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10499 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10500 if (!retval) { 10501 /* Now, try to enable MSI-X interrupt mode */ 10502 retval = lpfc_sli_enable_msix(phba); 10503 if (!retval) { 10504 /* Indicate initialization to MSI-X mode */ 10505 phba->intr_type = MSIX; 10506 intr_mode = 2; 10507 } 10508 } 10509 } 10510 10511 /* Fallback to MSI if MSI-X initialization failed */ 10512 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10513 retval = lpfc_sli_enable_msi(phba); 10514 if (!retval) { 10515 /* Indicate initialization to MSI mode */ 10516 phba->intr_type = MSI; 10517 intr_mode = 1; 10518 } 10519 } 10520 10521 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10522 if (phba->intr_type == NONE) { 10523 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10524 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10525 if (!retval) { 10526 /* Indicate initialization to INTx mode */ 10527 phba->intr_type = INTx; 10528 intr_mode = 0; 10529 } 10530 } 10531 return intr_mode; 10532 } 10533 10534 /** 10535 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10536 * @phba: pointer to lpfc hba data structure. 10537 * 10538 * This routine is invoked to disable device interrupt and disassociate the 10539 * driver's interrupt handler(s) from interrupt vector(s) to device with 10540 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10541 * release the interrupt vector(s) for the message signaled interrupt. 10542 **/ 10543 static void 10544 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10545 { 10546 int nr_irqs, i; 10547 10548 if (phba->intr_type == MSIX) 10549 nr_irqs = LPFC_MSIX_VECTORS; 10550 else 10551 nr_irqs = 1; 10552 10553 for (i = 0; i < nr_irqs; i++) 10554 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10555 pci_free_irq_vectors(phba->pcidev); 10556 10557 /* Reset interrupt management states */ 10558 phba->intr_type = NONE; 10559 phba->sli.slistat.sli_intr = 0; 10560 } 10561 10562 /** 10563 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10564 * @phba: pointer to lpfc hba data structure. 10565 * @id: EQ vector index or Hardware Queue index 10566 * @match: LPFC_FIND_BY_EQ = match by EQ 10567 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10568 * Return the CPU that matches the selection criteria 10569 */ 10570 static uint16_t 10571 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10572 { 10573 struct lpfc_vector_map_info *cpup; 10574 int cpu; 10575 10576 /* Loop through all CPUs */ 10577 for_each_present_cpu(cpu) { 10578 cpup = &phba->sli4_hba.cpu_map[cpu]; 10579 10580 /* If we are matching by EQ, there may be multiple CPUs using 10581 * using the same vector, so select the one with 10582 * LPFC_CPU_FIRST_IRQ set. 10583 */ 10584 if ((match == LPFC_FIND_BY_EQ) && 10585 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10586 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10587 (cpup->eq == id)) 10588 return cpu; 10589 10590 /* If matching by HDWQ, select the first CPU that matches */ 10591 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10592 return cpu; 10593 } 10594 return 0; 10595 } 10596 10597 #ifdef CONFIG_X86 10598 /** 10599 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10600 * @phba: pointer to lpfc hba data structure. 10601 * @cpu: CPU map index 10602 * @phys_id: CPU package physical id 10603 * @core_id: CPU core id 10604 */ 10605 static int 10606 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10607 uint16_t phys_id, uint16_t core_id) 10608 { 10609 struct lpfc_vector_map_info *cpup; 10610 int idx; 10611 10612 for_each_present_cpu(idx) { 10613 cpup = &phba->sli4_hba.cpu_map[idx]; 10614 /* Does the cpup match the one we are looking for */ 10615 if ((cpup->phys_id == phys_id) && 10616 (cpup->core_id == core_id) && 10617 (cpu != idx)) 10618 return 1; 10619 } 10620 return 0; 10621 } 10622 #endif 10623 10624 /** 10625 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10626 * @phba: pointer to lpfc hba data structure. 10627 * @vectors: number of msix vectors allocated. 10628 * 10629 * The routine will figure out the CPU affinity assignment for every 10630 * MSI-X vector allocated for the HBA. 10631 * In addition, the CPU to IO channel mapping will be calculated 10632 * and the phba->sli4_hba.cpu_map array will reflect this. 10633 */ 10634 static void 10635 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10636 { 10637 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 10638 int max_phys_id, min_phys_id; 10639 int max_core_id, min_core_id; 10640 struct lpfc_vector_map_info *cpup; 10641 struct lpfc_vector_map_info *new_cpup; 10642 const struct cpumask *maskp; 10643 #ifdef CONFIG_X86 10644 struct cpuinfo_x86 *cpuinfo; 10645 #endif 10646 10647 /* Init cpu_map array */ 10648 for_each_possible_cpu(cpu) { 10649 cpup = &phba->sli4_hba.cpu_map[cpu]; 10650 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 10651 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 10652 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 10653 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 10654 cpup->irq = LPFC_VECTOR_MAP_EMPTY; 10655 cpup->flag = 0; 10656 } 10657 10658 max_phys_id = 0; 10659 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 10660 max_core_id = 0; 10661 min_core_id = LPFC_VECTOR_MAP_EMPTY; 10662 10663 /* Update CPU map with physical id and core id of each CPU */ 10664 for_each_present_cpu(cpu) { 10665 cpup = &phba->sli4_hba.cpu_map[cpu]; 10666 #ifdef CONFIG_X86 10667 cpuinfo = &cpu_data(cpu); 10668 cpup->phys_id = cpuinfo->phys_proc_id; 10669 cpup->core_id = cpuinfo->cpu_core_id; 10670 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 10671 cpup->flag |= LPFC_CPU_MAP_HYPER; 10672 #else 10673 /* No distinction between CPUs for other platforms */ 10674 cpup->phys_id = 0; 10675 cpup->core_id = cpu; 10676 #endif 10677 10678 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10679 "3328 CPU %d physid %d coreid %d flag x%x\n", 10680 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 10681 10682 if (cpup->phys_id > max_phys_id) 10683 max_phys_id = cpup->phys_id; 10684 if (cpup->phys_id < min_phys_id) 10685 min_phys_id = cpup->phys_id; 10686 10687 if (cpup->core_id > max_core_id) 10688 max_core_id = cpup->core_id; 10689 if (cpup->core_id < min_core_id) 10690 min_core_id = cpup->core_id; 10691 } 10692 10693 for_each_possible_cpu(i) { 10694 struct lpfc_eq_intr_info *eqi = 10695 per_cpu_ptr(phba->sli4_hba.eq_info, i); 10696 10697 INIT_LIST_HEAD(&eqi->list); 10698 eqi->icnt = 0; 10699 } 10700 10701 /* This loop sets up all CPUs that are affinitized with a 10702 * irq vector assigned to the driver. All affinitized CPUs 10703 * will get a link to that vectors IRQ and EQ. 10704 * 10705 * NULL affinity mask handling: 10706 * If irq count is greater than one, log an error message. 10707 * If the null mask is received for the first irq, find the 10708 * first present cpu, and assign the eq index to ensure at 10709 * least one EQ is assigned. 10710 */ 10711 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10712 /* Get a CPU mask for all CPUs affinitized to this vector */ 10713 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10714 if (!maskp) { 10715 if (phba->cfg_irq_chann > 1) 10716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10717 "3329 No affinity mask found " 10718 "for vector %d (%d)\n", 10719 idx, phba->cfg_irq_chann); 10720 if (!idx) { 10721 cpu = cpumask_first(cpu_present_mask); 10722 cpup = &phba->sli4_hba.cpu_map[cpu]; 10723 cpup->eq = idx; 10724 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10725 cpup->flag |= LPFC_CPU_FIRST_IRQ; 10726 } 10727 break; 10728 } 10729 10730 i = 0; 10731 /* Loop through all CPUs associated with vector idx */ 10732 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 10733 /* Set the EQ index and IRQ for that vector */ 10734 cpup = &phba->sli4_hba.cpu_map[cpu]; 10735 cpup->eq = idx; 10736 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10737 10738 /* If this is the first CPU thats assigned to this 10739 * vector, set LPFC_CPU_FIRST_IRQ. 10740 */ 10741 if (!i) 10742 cpup->flag |= LPFC_CPU_FIRST_IRQ; 10743 i++; 10744 10745 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10746 "3336 Set Affinity: CPU %d " 10747 "irq %d eq %d flag x%x\n", 10748 cpu, cpup->irq, cpup->eq, cpup->flag); 10749 } 10750 } 10751 10752 /* After looking at each irq vector assigned to this pcidev, its 10753 * possible to see that not ALL CPUs have been accounted for. 10754 * Next we will set any unassigned (unaffinitized) cpu map 10755 * entries to a IRQ on the same phys_id. 10756 */ 10757 first_cpu = cpumask_first(cpu_present_mask); 10758 start_cpu = first_cpu; 10759 10760 for_each_present_cpu(cpu) { 10761 cpup = &phba->sli4_hba.cpu_map[cpu]; 10762 10763 /* Is this CPU entry unassigned */ 10764 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10765 /* Mark CPU as IRQ not assigned by the kernel */ 10766 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10767 10768 /* If so, find a new_cpup thats on the the SAME 10769 * phys_id as cpup. start_cpu will start where we 10770 * left off so all unassigned entries don't get assgined 10771 * the IRQ of the first entry. 10772 */ 10773 new_cpu = start_cpu; 10774 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10775 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10776 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10777 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10778 (new_cpup->phys_id == cpup->phys_id)) 10779 goto found_same; 10780 new_cpu = cpumask_next( 10781 new_cpu, cpu_present_mask); 10782 if (new_cpu == nr_cpumask_bits) 10783 new_cpu = first_cpu; 10784 } 10785 /* At this point, we leave the CPU as unassigned */ 10786 continue; 10787 found_same: 10788 /* We found a matching phys_id, so copy the IRQ info */ 10789 cpup->eq = new_cpup->eq; 10790 cpup->irq = new_cpup->irq; 10791 10792 /* Bump start_cpu to the next slot to minmize the 10793 * chance of having multiple unassigned CPU entries 10794 * selecting the same IRQ. 10795 */ 10796 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10797 if (start_cpu == nr_cpumask_bits) 10798 start_cpu = first_cpu; 10799 10800 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10801 "3337 Set Affinity: CPU %d " 10802 "irq %d from id %d same " 10803 "phys_id (%d)\n", 10804 cpu, cpup->irq, new_cpu, cpup->phys_id); 10805 } 10806 } 10807 10808 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 10809 start_cpu = first_cpu; 10810 10811 for_each_present_cpu(cpu) { 10812 cpup = &phba->sli4_hba.cpu_map[cpu]; 10813 10814 /* Is this entry unassigned */ 10815 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10816 /* Mark it as IRQ not assigned by the kernel */ 10817 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10818 10819 /* If so, find a new_cpup thats on ANY phys_id 10820 * as the cpup. start_cpu will start where we 10821 * left off so all unassigned entries don't get 10822 * assigned the IRQ of the first entry. 10823 */ 10824 new_cpu = start_cpu; 10825 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10826 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10827 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10828 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY)) 10829 goto found_any; 10830 new_cpu = cpumask_next( 10831 new_cpu, cpu_present_mask); 10832 if (new_cpu == nr_cpumask_bits) 10833 new_cpu = first_cpu; 10834 } 10835 /* We should never leave an entry unassigned */ 10836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10837 "3339 Set Affinity: CPU %d " 10838 "irq %d UNASSIGNED\n", 10839 cpup->hdwq, cpup->irq); 10840 continue; 10841 found_any: 10842 /* We found an available entry, copy the IRQ info */ 10843 cpup->eq = new_cpup->eq; 10844 cpup->irq = new_cpup->irq; 10845 10846 /* Bump start_cpu to the next slot to minmize the 10847 * chance of having multiple unassigned CPU entries 10848 * selecting the same IRQ. 10849 */ 10850 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10851 if (start_cpu == nr_cpumask_bits) 10852 start_cpu = first_cpu; 10853 10854 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10855 "3338 Set Affinity: CPU %d " 10856 "irq %d from id %d (%d/%d)\n", 10857 cpu, cpup->irq, new_cpu, 10858 new_cpup->phys_id, new_cpup->core_id); 10859 } 10860 } 10861 10862 /* Assign hdwq indices that are unique across all cpus in the map 10863 * that are also FIRST_CPUs. 10864 */ 10865 idx = 0; 10866 for_each_present_cpu(cpu) { 10867 cpup = &phba->sli4_hba.cpu_map[cpu]; 10868 10869 /* Only FIRST IRQs get a hdwq index assignment. */ 10870 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10871 continue; 10872 10873 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 10874 cpup->hdwq = idx; 10875 idx++; 10876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10877 "3333 Set Affinity: CPU %d (phys %d core %d): " 10878 "hdwq %d eq %d irq %d flg x%x\n", 10879 cpu, cpup->phys_id, cpup->core_id, 10880 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); 10881 } 10882 /* Finally we need to associate a hdwq with each cpu_map entry 10883 * This will be 1 to 1 - hdwq to cpu, unless there are less 10884 * hardware queues then CPUs. For that case we will just round-robin 10885 * the available hardware queues as they get assigned to CPUs. 10886 * The next_idx is the idx from the FIRST_CPU loop above to account 10887 * for irq_chann < hdwq. The idx is used for round-robin assignments 10888 * and needs to start at 0. 10889 */ 10890 next_idx = idx; 10891 start_cpu = 0; 10892 idx = 0; 10893 for_each_present_cpu(cpu) { 10894 cpup = &phba->sli4_hba.cpu_map[cpu]; 10895 10896 /* FIRST cpus are already mapped. */ 10897 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10898 continue; 10899 10900 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 10901 * of the unassigned cpus to the next idx so that all 10902 * hdw queues are fully utilized. 10903 */ 10904 if (next_idx < phba->cfg_hdw_queue) { 10905 cpup->hdwq = next_idx; 10906 next_idx++; 10907 continue; 10908 } 10909 10910 /* Not a First CPU and all hdw_queues are used. Reuse a 10911 * Hardware Queue for another CPU, so be smart about it 10912 * and pick one that has its IRQ/EQ mapped to the same phys_id 10913 * (CPU package) and core_id. 10914 */ 10915 new_cpu = start_cpu; 10916 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10917 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10918 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 10919 new_cpup->phys_id == cpup->phys_id && 10920 new_cpup->core_id == cpup->core_id) { 10921 goto found_hdwq; 10922 } 10923 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 10924 if (new_cpu == nr_cpumask_bits) 10925 new_cpu = first_cpu; 10926 } 10927 10928 /* If we can't match both phys_id and core_id, 10929 * settle for just a phys_id match. 10930 */ 10931 new_cpu = start_cpu; 10932 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10933 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10934 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 10935 new_cpup->phys_id == cpup->phys_id) 10936 goto found_hdwq; 10937 10938 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 10939 if (new_cpu == nr_cpumask_bits) 10940 new_cpu = first_cpu; 10941 } 10942 10943 /* Otherwise just round robin on cfg_hdw_queue */ 10944 cpup->hdwq = idx % phba->cfg_hdw_queue; 10945 idx++; 10946 goto logit; 10947 found_hdwq: 10948 /* We found an available entry, copy the IRQ info */ 10949 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10950 if (start_cpu == nr_cpumask_bits) 10951 start_cpu = first_cpu; 10952 cpup->hdwq = new_cpup->hdwq; 10953 logit: 10954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10955 "3335 Set Affinity: CPU %d (phys %d core %d): " 10956 "hdwq %d eq %d irq %d flg x%x\n", 10957 cpu, cpup->phys_id, cpup->core_id, 10958 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); 10959 } 10960 10961 /* The cpu_map array will be used later during initialization 10962 * when EQ / CQ / WQs are allocated and configured. 10963 */ 10964 return; 10965 } 10966 10967 /** 10968 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 10969 * @phba: pointer to lpfc hba data structure. 10970 * 10971 * This routine is invoked to enable the MSI-X interrupt vectors to device 10972 * with SLI-4 interface spec. 10973 * 10974 * Return codes 10975 * 0 - successful 10976 * other values - error 10977 **/ 10978 static int 10979 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 10980 { 10981 int vectors, rc, index; 10982 char *name; 10983 10984 /* Set up MSI-X multi-message vectors */ 10985 vectors = phba->cfg_irq_chann; 10986 10987 rc = pci_alloc_irq_vectors(phba->pcidev, 10988 1, 10989 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 10990 if (rc < 0) { 10991 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10992 "0484 PCI enable MSI-X failed (%d)\n", rc); 10993 goto vec_fail_out; 10994 } 10995 vectors = rc; 10996 10997 /* Assign MSI-X vectors to interrupt handlers */ 10998 for (index = 0; index < vectors; index++) { 10999 name = phba->sli4_hba.hba_eq_hdl[index].handler_name; 11000 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11001 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11002 LPFC_DRIVER_HANDLER_NAME"%d", index); 11003 11004 phba->sli4_hba.hba_eq_hdl[index].idx = index; 11005 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 11006 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11007 &lpfc_sli4_hba_intr_handler, 0, 11008 name, 11009 &phba->sli4_hba.hba_eq_hdl[index]); 11010 if (rc) { 11011 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11012 "0486 MSI-X fast-path (%d) " 11013 "request_irq failed (%d)\n", index, rc); 11014 goto cfg_fail_out; 11015 } 11016 } 11017 11018 if (vectors != phba->cfg_irq_chann) { 11019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11020 "3238 Reducing IO channels to match number of " 11021 "MSI-X vectors, requested %d got %d\n", 11022 phba->cfg_irq_chann, vectors); 11023 if (phba->cfg_irq_chann > vectors) 11024 phba->cfg_irq_chann = vectors; 11025 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors)) 11026 phba->cfg_nvmet_mrq = vectors; 11027 } 11028 11029 return rc; 11030 11031 cfg_fail_out: 11032 /* free the irq already requested */ 11033 for (--index; index >= 0; index--) 11034 free_irq(pci_irq_vector(phba->pcidev, index), 11035 &phba->sli4_hba.hba_eq_hdl[index]); 11036 11037 /* Unconfigure MSI-X capability structure */ 11038 pci_free_irq_vectors(phba->pcidev); 11039 11040 vec_fail_out: 11041 return rc; 11042 } 11043 11044 /** 11045 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11046 * @phba: pointer to lpfc hba data structure. 11047 * 11048 * This routine is invoked to enable the MSI interrupt mode to device with 11049 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 11050 * called to enable the MSI vector. The device driver is responsible for 11051 * calling the request_irq() to register MSI vector with a interrupt the 11052 * handler, which is done in this function. 11053 * 11054 * Return codes 11055 * 0 - successful 11056 * other values - error 11057 **/ 11058 static int 11059 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11060 { 11061 int rc, index; 11062 11063 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 11064 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 11065 if (rc > 0) 11066 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11067 "0487 PCI enable MSI mode success.\n"); 11068 else { 11069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11070 "0488 PCI enable MSI mode failed (%d)\n", rc); 11071 return rc ? rc : -1; 11072 } 11073 11074 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11075 0, LPFC_DRIVER_NAME, phba); 11076 if (rc) { 11077 pci_free_irq_vectors(phba->pcidev); 11078 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11079 "0490 MSI request_irq failed (%d)\n", rc); 11080 return rc; 11081 } 11082 11083 for (index = 0; index < phba->cfg_irq_chann; index++) { 11084 phba->sli4_hba.hba_eq_hdl[index].idx = index; 11085 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 11086 } 11087 11088 return 0; 11089 } 11090 11091 /** 11092 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11093 * @phba: pointer to lpfc hba data structure. 11094 * 11095 * This routine is invoked to enable device interrupt and associate driver's 11096 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11097 * interface spec. Depends on the interrupt mode configured to the driver, 11098 * the driver will try to fallback from the configured interrupt mode to an 11099 * interrupt mode which is supported by the platform, kernel, and device in 11100 * the order of: 11101 * MSI-X -> MSI -> IRQ. 11102 * 11103 * Return codes 11104 * 0 - successful 11105 * other values - error 11106 **/ 11107 static uint32_t 11108 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11109 { 11110 uint32_t intr_mode = LPFC_INTR_ERROR; 11111 int retval, idx; 11112 11113 if (cfg_mode == 2) { 11114 /* Preparation before conf_msi mbox cmd */ 11115 retval = 0; 11116 if (!retval) { 11117 /* Now, try to enable MSI-X interrupt mode */ 11118 retval = lpfc_sli4_enable_msix(phba); 11119 if (!retval) { 11120 /* Indicate initialization to MSI-X mode */ 11121 phba->intr_type = MSIX; 11122 intr_mode = 2; 11123 } 11124 } 11125 } 11126 11127 /* Fallback to MSI if MSI-X initialization failed */ 11128 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11129 retval = lpfc_sli4_enable_msi(phba); 11130 if (!retval) { 11131 /* Indicate initialization to MSI mode */ 11132 phba->intr_type = MSI; 11133 intr_mode = 1; 11134 } 11135 } 11136 11137 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11138 if (phba->intr_type == NONE) { 11139 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11140 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11141 if (!retval) { 11142 struct lpfc_hba_eq_hdl *eqhdl; 11143 11144 /* Indicate initialization to INTx mode */ 11145 phba->intr_type = INTx; 11146 intr_mode = 0; 11147 11148 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11149 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 11150 eqhdl->idx = idx; 11151 eqhdl->phba = phba; 11152 } 11153 } 11154 } 11155 return intr_mode; 11156 } 11157 11158 /** 11159 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11160 * @phba: pointer to lpfc hba data structure. 11161 * 11162 * This routine is invoked to disable device interrupt and disassociate 11163 * the driver's interrupt handler(s) from interrupt vector(s) to device 11164 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11165 * will release the interrupt vector(s) for the message signaled interrupt. 11166 **/ 11167 static void 11168 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11169 { 11170 /* Disable the currently initialized interrupt mode */ 11171 if (phba->intr_type == MSIX) { 11172 int index; 11173 11174 /* Free up MSI-X multi-message vectors */ 11175 for (index = 0; index < phba->cfg_irq_chann; index++) { 11176 irq_set_affinity_hint( 11177 pci_irq_vector(phba->pcidev, index), 11178 NULL); 11179 free_irq(pci_irq_vector(phba->pcidev, index), 11180 &phba->sli4_hba.hba_eq_hdl[index]); 11181 } 11182 } else { 11183 free_irq(phba->pcidev->irq, phba); 11184 } 11185 11186 pci_free_irq_vectors(phba->pcidev); 11187 11188 /* Reset interrupt management states */ 11189 phba->intr_type = NONE; 11190 phba->sli.slistat.sli_intr = 0; 11191 } 11192 11193 /** 11194 * lpfc_unset_hba - Unset SLI3 hba device initialization 11195 * @phba: pointer to lpfc hba data structure. 11196 * 11197 * This routine is invoked to unset the HBA device initialization steps to 11198 * a device with SLI-3 interface spec. 11199 **/ 11200 static void 11201 lpfc_unset_hba(struct lpfc_hba *phba) 11202 { 11203 struct lpfc_vport *vport = phba->pport; 11204 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11205 11206 spin_lock_irq(shost->host_lock); 11207 vport->load_flag |= FC_UNLOADING; 11208 spin_unlock_irq(shost->host_lock); 11209 11210 kfree(phba->vpi_bmask); 11211 kfree(phba->vpi_ids); 11212 11213 lpfc_stop_hba_timers(phba); 11214 11215 phba->pport->work_port_events = 0; 11216 11217 lpfc_sli_hba_down(phba); 11218 11219 lpfc_sli_brdrestart(phba); 11220 11221 lpfc_sli_disable_intr(phba); 11222 11223 return; 11224 } 11225 11226 /** 11227 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11228 * @phba: Pointer to HBA context object. 11229 * 11230 * This function is called in the SLI4 code path to wait for completion 11231 * of device's XRIs exchange busy. It will check the XRI exchange busy 11232 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11233 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11234 * I/Os every 30 seconds, log error message, and wait forever. Only when 11235 * all XRI exchange busy complete, the driver unload shall proceed with 11236 * invoking the function reset ioctl mailbox command to the CNA and the 11237 * the rest of the driver unload resource release. 11238 **/ 11239 static void 11240 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11241 { 11242 struct lpfc_sli4_hdw_queue *qp; 11243 int idx, ccnt; 11244 int wait_time = 0; 11245 int io_xri_cmpl = 1; 11246 int nvmet_xri_cmpl = 1; 11247 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11248 11249 /* Driver just aborted IOs during the hba_unset process. Pause 11250 * here to give the HBA time to complete the IO and get entries 11251 * into the abts lists. 11252 */ 11253 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11254 11255 /* Wait for NVME pending IO to flush back to transport. */ 11256 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11257 lpfc_nvme_wait_for_io_drain(phba); 11258 11259 ccnt = 0; 11260 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11261 qp = &phba->sli4_hba.hdwq[idx]; 11262 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 11263 if (!io_xri_cmpl) /* if list is NOT empty */ 11264 ccnt++; 11265 } 11266 if (ccnt) 11267 io_xri_cmpl = 0; 11268 11269 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11270 nvmet_xri_cmpl = 11271 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11272 } 11273 11274 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 11275 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11276 if (!nvmet_xri_cmpl) 11277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11278 "6424 NVMET XRI exchange busy " 11279 "wait time: %d seconds.\n", 11280 wait_time/1000); 11281 if (!io_xri_cmpl) 11282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11283 "6100 IO XRI exchange busy " 11284 "wait time: %d seconds.\n", 11285 wait_time/1000); 11286 if (!els_xri_cmpl) 11287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11288 "2878 ELS XRI exchange busy " 11289 "wait time: %d seconds.\n", 11290 wait_time/1000); 11291 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11292 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11293 } else { 11294 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11295 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11296 } 11297 11298 ccnt = 0; 11299 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11300 qp = &phba->sli4_hba.hdwq[idx]; 11301 io_xri_cmpl = list_empty( 11302 &qp->lpfc_abts_io_buf_list); 11303 if (!io_xri_cmpl) /* if list is NOT empty */ 11304 ccnt++; 11305 } 11306 if (ccnt) 11307 io_xri_cmpl = 0; 11308 11309 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11310 nvmet_xri_cmpl = list_empty( 11311 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11312 } 11313 els_xri_cmpl = 11314 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11315 11316 } 11317 } 11318 11319 /** 11320 * lpfc_sli4_hba_unset - Unset the fcoe hba 11321 * @phba: Pointer to HBA context object. 11322 * 11323 * This function is called in the SLI4 code path to reset the HBA's FCoE 11324 * function. The caller is not required to hold any lock. This routine 11325 * issues PCI function reset mailbox command to reset the FCoE function. 11326 * At the end of the function, it calls lpfc_hba_down_post function to 11327 * free any pending commands. 11328 **/ 11329 static void 11330 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11331 { 11332 int wait_cnt = 0; 11333 LPFC_MBOXQ_t *mboxq; 11334 struct pci_dev *pdev = phba->pcidev; 11335 11336 lpfc_stop_hba_timers(phba); 11337 if (phba->pport) 11338 phba->sli4_hba.intr_enable = 0; 11339 11340 /* 11341 * Gracefully wait out the potential current outstanding asynchronous 11342 * mailbox command. 11343 */ 11344 11345 /* First, block any pending async mailbox command from posted */ 11346 spin_lock_irq(&phba->hbalock); 11347 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11348 spin_unlock_irq(&phba->hbalock); 11349 /* Now, trying to wait it out if we can */ 11350 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11351 msleep(10); 11352 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11353 break; 11354 } 11355 /* Forcefully release the outstanding mailbox command if timed out */ 11356 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11357 spin_lock_irq(&phba->hbalock); 11358 mboxq = phba->sli.mbox_active; 11359 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11360 __lpfc_mbox_cmpl_put(phba, mboxq); 11361 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11362 phba->sli.mbox_active = NULL; 11363 spin_unlock_irq(&phba->hbalock); 11364 } 11365 11366 /* Abort all iocbs associated with the hba */ 11367 lpfc_sli_hba_iocb_abort(phba); 11368 11369 /* Wait for completion of device XRI exchange busy */ 11370 lpfc_sli4_xri_exchange_busy_wait(phba); 11371 11372 /* Disable PCI subsystem interrupt */ 11373 lpfc_sli4_disable_intr(phba); 11374 11375 /* Disable SR-IOV if enabled */ 11376 if (phba->cfg_sriov_nr_virtfn) 11377 pci_disable_sriov(pdev); 11378 11379 /* Stop kthread signal shall trigger work_done one more time */ 11380 kthread_stop(phba->worker_thread); 11381 11382 /* Disable FW logging to host memory */ 11383 lpfc_ras_stop_fwlog(phba); 11384 11385 /* Unset the queues shared with the hardware then release all 11386 * allocated resources. 11387 */ 11388 lpfc_sli4_queue_unset(phba); 11389 lpfc_sli4_queue_destroy(phba); 11390 11391 /* Reset SLI4 HBA FCoE function */ 11392 lpfc_pci_function_reset(phba); 11393 11394 /* Free RAS DMA memory */ 11395 if (phba->ras_fwlog.ras_enabled) 11396 lpfc_sli4_ras_dma_free(phba); 11397 11398 /* Stop the SLI4 device port */ 11399 if (phba->pport) 11400 phba->pport->work_port_events = 0; 11401 } 11402 11403 /** 11404 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11405 * @phba: Pointer to HBA context object. 11406 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11407 * 11408 * This function is called in the SLI4 code path to read the port's 11409 * sli4 capabilities. 11410 * 11411 * This function may be be called from any context that can block-wait 11412 * for the completion. The expectation is that this routine is called 11413 * typically from probe_one or from the online routine. 11414 **/ 11415 int 11416 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11417 { 11418 int rc; 11419 struct lpfc_mqe *mqe; 11420 struct lpfc_pc_sli4_params *sli4_params; 11421 uint32_t mbox_tmo; 11422 11423 rc = 0; 11424 mqe = &mboxq->u.mqe; 11425 11426 /* Read the port's SLI4 Parameters port capabilities */ 11427 lpfc_pc_sli4_params(mboxq); 11428 if (!phba->sli4_hba.intr_enable) 11429 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11430 else { 11431 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11432 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11433 } 11434 11435 if (unlikely(rc)) 11436 return 1; 11437 11438 sli4_params = &phba->sli4_hba.pc_sli4_params; 11439 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11440 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11441 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11442 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11443 &mqe->un.sli4_params); 11444 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11445 &mqe->un.sli4_params); 11446 sli4_params->proto_types = mqe->un.sli4_params.word3; 11447 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11448 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11449 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11450 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11451 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11452 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11453 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11454 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11455 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11456 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11457 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11458 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11459 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11460 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11461 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11462 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11463 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11464 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11465 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11466 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11467 11468 /* Make sure that sge_supp_len can be handled by the driver */ 11469 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11470 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11471 11472 return rc; 11473 } 11474 11475 /** 11476 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 11477 * @phba: Pointer to HBA context object. 11478 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11479 * 11480 * This function is called in the SLI4 code path to read the port's 11481 * sli4 capabilities. 11482 * 11483 * This function may be be called from any context that can block-wait 11484 * for the completion. The expectation is that this routine is called 11485 * typically from probe_one or from the online routine. 11486 **/ 11487 int 11488 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11489 { 11490 int rc; 11491 struct lpfc_mqe *mqe = &mboxq->u.mqe; 11492 struct lpfc_pc_sli4_params *sli4_params; 11493 uint32_t mbox_tmo; 11494 int length; 11495 bool exp_wqcq_pages = true; 11496 struct lpfc_sli4_parameters *mbx_sli4_parameters; 11497 11498 /* 11499 * By default, the driver assumes the SLI4 port requires RPI 11500 * header postings. The SLI4_PARAM response will correct this 11501 * assumption. 11502 */ 11503 phba->sli4_hba.rpi_hdrs_in_use = 1; 11504 11505 /* Read the port's SLI4 Config Parameters */ 11506 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 11507 sizeof(struct lpfc_sli4_cfg_mhdr)); 11508 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11509 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 11510 length, LPFC_SLI4_MBX_EMBED); 11511 if (!phba->sli4_hba.intr_enable) 11512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11513 else { 11514 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11515 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11516 } 11517 if (unlikely(rc)) 11518 return rc; 11519 sli4_params = &phba->sli4_hba.pc_sli4_params; 11520 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 11521 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 11522 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 11523 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 11524 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 11525 mbx_sli4_parameters); 11526 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 11527 mbx_sli4_parameters); 11528 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 11529 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 11530 else 11531 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 11532 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 11533 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 11534 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 11535 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 11536 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 11537 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 11538 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 11539 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 11540 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 11541 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 11542 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 11543 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 11544 mbx_sli4_parameters); 11545 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 11546 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 11547 mbx_sli4_parameters); 11548 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 11549 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 11550 11551 /* Check for Extended Pre-Registered SGL support */ 11552 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 11553 11554 /* Check for firmware nvme support */ 11555 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 11556 bf_get(cfg_xib, mbx_sli4_parameters)); 11557 11558 if (rc) { 11559 /* Save this to indicate the Firmware supports NVME */ 11560 sli4_params->nvme = 1; 11561 11562 /* Firmware NVME support, check driver FC4 NVME support */ 11563 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 11564 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 11565 "6133 Disabling NVME support: " 11566 "FC4 type not supported: x%x\n", 11567 phba->cfg_enable_fc4_type); 11568 goto fcponly; 11569 } 11570 } else { 11571 /* No firmware NVME support, check driver FC4 NVME support */ 11572 sli4_params->nvme = 0; 11573 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 11575 "6101 Disabling NVME support: Not " 11576 "supported by firmware (%d %d) x%x\n", 11577 bf_get(cfg_nvme, mbx_sli4_parameters), 11578 bf_get(cfg_xib, mbx_sli4_parameters), 11579 phba->cfg_enable_fc4_type); 11580 fcponly: 11581 phba->nvme_support = 0; 11582 phba->nvmet_support = 0; 11583 phba->cfg_nvmet_mrq = 0; 11584 phba->cfg_nvme_seg_cnt = 0; 11585 11586 /* If no FC4 type support, move to just SCSI support */ 11587 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 11588 return -ENODEV; 11589 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 11590 } 11591 } 11592 11593 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 11594 * accommodate 512K and 1M IOs in a single nvme buf and supply 11595 * enough NVME LS iocb buffers for larger connectivity counts. 11596 */ 11597 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11598 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 11599 phba->cfg_iocb_cnt = 5; 11600 } 11601 11602 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 11603 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11604 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 11605 phba->cfg_enable_pbde = 0; 11606 11607 /* 11608 * To support Suppress Response feature we must satisfy 3 conditions. 11609 * lpfc_suppress_rsp module parameter must be set (default). 11610 * In SLI4-Parameters Descriptor: 11611 * Extended Inline Buffers (XIB) must be supported. 11612 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 11613 * (double negative). 11614 */ 11615 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 11616 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 11617 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 11618 else 11619 phba->cfg_suppress_rsp = 0; 11620 11621 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 11622 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 11623 11624 /* Make sure that sge_supp_len can be handled by the driver */ 11625 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11626 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11627 11628 /* 11629 * Check whether the adapter supports an embedded copy of the 11630 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 11631 * to use this option, 128-byte WQEs must be used. 11632 */ 11633 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 11634 phba->fcp_embed_io = 1; 11635 else 11636 phba->fcp_embed_io = 0; 11637 11638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 11639 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 11640 bf_get(cfg_xib, mbx_sli4_parameters), 11641 phba->cfg_enable_pbde, 11642 phba->fcp_embed_io, phba->nvme_support, 11643 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 11644 11645 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 11646 LPFC_SLI_INTF_IF_TYPE_2) && 11647 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 11648 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 11649 exp_wqcq_pages = false; 11650 11651 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 11652 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 11653 exp_wqcq_pages && 11654 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 11655 phba->enab_exp_wqcq_pages = 1; 11656 else 11657 phba->enab_exp_wqcq_pages = 0; 11658 /* 11659 * Check if the SLI port supports MDS Diagnostics 11660 */ 11661 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 11662 phba->mds_diags_support = 1; 11663 else 11664 phba->mds_diags_support = 0; 11665 11666 /* 11667 * Check if the SLI port supports NSLER 11668 */ 11669 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 11670 phba->nsler = 1; 11671 else 11672 phba->nsler = 0; 11673 11674 return 0; 11675 } 11676 11677 /** 11678 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 11679 * @pdev: pointer to PCI device 11680 * @pid: pointer to PCI device identifier 11681 * 11682 * This routine is to be called to attach a device with SLI-3 interface spec 11683 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11684 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11685 * information of the device and driver to see if the driver state that it can 11686 * support this kind of device. If the match is successful, the driver core 11687 * invokes this routine. If this routine determines it can claim the HBA, it 11688 * does all the initialization that it needs to do to handle the HBA properly. 11689 * 11690 * Return code 11691 * 0 - driver can claim the device 11692 * negative value - driver can not claim the device 11693 **/ 11694 static int 11695 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 11696 { 11697 struct lpfc_hba *phba; 11698 struct lpfc_vport *vport = NULL; 11699 struct Scsi_Host *shost = NULL; 11700 int error; 11701 uint32_t cfg_mode, intr_mode; 11702 11703 /* Allocate memory for HBA structure */ 11704 phba = lpfc_hba_alloc(pdev); 11705 if (!phba) 11706 return -ENOMEM; 11707 11708 /* Perform generic PCI device enabling operation */ 11709 error = lpfc_enable_pci_dev(phba); 11710 if (error) 11711 goto out_free_phba; 11712 11713 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 11714 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 11715 if (error) 11716 goto out_disable_pci_dev; 11717 11718 /* Set up SLI-3 specific device PCI memory space */ 11719 error = lpfc_sli_pci_mem_setup(phba); 11720 if (error) { 11721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11722 "1402 Failed to set up pci memory space.\n"); 11723 goto out_disable_pci_dev; 11724 } 11725 11726 /* Set up SLI-3 specific device driver resources */ 11727 error = lpfc_sli_driver_resource_setup(phba); 11728 if (error) { 11729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11730 "1404 Failed to set up driver resource.\n"); 11731 goto out_unset_pci_mem_s3; 11732 } 11733 11734 /* Initialize and populate the iocb list per host */ 11735 11736 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 11737 if (error) { 11738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11739 "1405 Failed to initialize iocb list.\n"); 11740 goto out_unset_driver_resource_s3; 11741 } 11742 11743 /* Set up common device driver resources */ 11744 error = lpfc_setup_driver_resource_phase2(phba); 11745 if (error) { 11746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11747 "1406 Failed to set up driver resource.\n"); 11748 goto out_free_iocb_list; 11749 } 11750 11751 /* Get the default values for Model Name and Description */ 11752 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11753 11754 /* Create SCSI host to the physical port */ 11755 error = lpfc_create_shost(phba); 11756 if (error) { 11757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11758 "1407 Failed to create scsi host.\n"); 11759 goto out_unset_driver_resource; 11760 } 11761 11762 /* Configure sysfs attributes */ 11763 vport = phba->pport; 11764 error = lpfc_alloc_sysfs_attr(vport); 11765 if (error) { 11766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11767 "1476 Failed to allocate sysfs attr\n"); 11768 goto out_destroy_shost; 11769 } 11770 11771 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11772 /* Now, trying to enable interrupt and bring up the device */ 11773 cfg_mode = phba->cfg_use_msi; 11774 while (true) { 11775 /* Put device to a known state before enabling interrupt */ 11776 lpfc_stop_port(phba); 11777 /* Configure and enable interrupt */ 11778 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 11779 if (intr_mode == LPFC_INTR_ERROR) { 11780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11781 "0431 Failed to enable interrupt.\n"); 11782 error = -ENODEV; 11783 goto out_free_sysfs_attr; 11784 } 11785 /* SLI-3 HBA setup */ 11786 if (lpfc_sli_hba_setup(phba)) { 11787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11788 "1477 Failed to set up hba\n"); 11789 error = -ENODEV; 11790 goto out_remove_device; 11791 } 11792 11793 /* Wait 50ms for the interrupts of previous mailbox commands */ 11794 msleep(50); 11795 /* Check active interrupts on message signaled interrupts */ 11796 if (intr_mode == 0 || 11797 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 11798 /* Log the current active interrupt mode */ 11799 phba->intr_mode = intr_mode; 11800 lpfc_log_intr_mode(phba, intr_mode); 11801 break; 11802 } else { 11803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11804 "0447 Configure interrupt mode (%d) " 11805 "failed active interrupt test.\n", 11806 intr_mode); 11807 /* Disable the current interrupt mode */ 11808 lpfc_sli_disable_intr(phba); 11809 /* Try next level of interrupt mode */ 11810 cfg_mode = --intr_mode; 11811 } 11812 } 11813 11814 /* Perform post initialization setup */ 11815 lpfc_post_init_setup(phba); 11816 11817 /* Check if there are static vports to be created. */ 11818 lpfc_create_static_vport(phba); 11819 11820 return 0; 11821 11822 out_remove_device: 11823 lpfc_unset_hba(phba); 11824 out_free_sysfs_attr: 11825 lpfc_free_sysfs_attr(vport); 11826 out_destroy_shost: 11827 lpfc_destroy_shost(phba); 11828 out_unset_driver_resource: 11829 lpfc_unset_driver_resource_phase2(phba); 11830 out_free_iocb_list: 11831 lpfc_free_iocb_list(phba); 11832 out_unset_driver_resource_s3: 11833 lpfc_sli_driver_resource_unset(phba); 11834 out_unset_pci_mem_s3: 11835 lpfc_sli_pci_mem_unset(phba); 11836 out_disable_pci_dev: 11837 lpfc_disable_pci_dev(phba); 11838 if (shost) 11839 scsi_host_put(shost); 11840 out_free_phba: 11841 lpfc_hba_free(phba); 11842 return error; 11843 } 11844 11845 /** 11846 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 11847 * @pdev: pointer to PCI device 11848 * 11849 * This routine is to be called to disattach a device with SLI-3 interface 11850 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11851 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11852 * device to be removed from the PCI subsystem properly. 11853 **/ 11854 static void 11855 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 11856 { 11857 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11859 struct lpfc_vport **vports; 11860 struct lpfc_hba *phba = vport->phba; 11861 int i; 11862 11863 spin_lock_irq(&phba->hbalock); 11864 vport->load_flag |= FC_UNLOADING; 11865 spin_unlock_irq(&phba->hbalock); 11866 11867 lpfc_free_sysfs_attr(vport); 11868 11869 /* Release all the vports against this physical port */ 11870 vports = lpfc_create_vport_work_array(phba); 11871 if (vports != NULL) 11872 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11873 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11874 continue; 11875 fc_vport_terminate(vports[i]->fc_vport); 11876 } 11877 lpfc_destroy_vport_work_array(phba, vports); 11878 11879 /* Remove FC host and then SCSI host with the physical port */ 11880 fc_remove_host(shost); 11881 scsi_remove_host(shost); 11882 11883 lpfc_cleanup(vport); 11884 11885 /* 11886 * Bring down the SLI Layer. This step disable all interrupts, 11887 * clears the rings, discards all mailbox commands, and resets 11888 * the HBA. 11889 */ 11890 11891 /* HBA interrupt will be disabled after this call */ 11892 lpfc_sli_hba_down(phba); 11893 /* Stop kthread signal shall trigger work_done one more time */ 11894 kthread_stop(phba->worker_thread); 11895 /* Final cleanup of txcmplq and reset the HBA */ 11896 lpfc_sli_brdrestart(phba); 11897 11898 kfree(phba->vpi_bmask); 11899 kfree(phba->vpi_ids); 11900 11901 lpfc_stop_hba_timers(phba); 11902 spin_lock_irq(&phba->port_list_lock); 11903 list_del_init(&vport->listentry); 11904 spin_unlock_irq(&phba->port_list_lock); 11905 11906 lpfc_debugfs_terminate(vport); 11907 11908 /* Disable SR-IOV if enabled */ 11909 if (phba->cfg_sriov_nr_virtfn) 11910 pci_disable_sriov(pdev); 11911 11912 /* Disable interrupt */ 11913 lpfc_sli_disable_intr(phba); 11914 11915 scsi_host_put(shost); 11916 11917 /* 11918 * Call scsi_free before mem_free since scsi bufs are released to their 11919 * corresponding pools here. 11920 */ 11921 lpfc_scsi_free(phba); 11922 lpfc_free_iocb_list(phba); 11923 11924 lpfc_mem_free_all(phba); 11925 11926 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 11927 phba->hbqslimp.virt, phba->hbqslimp.phys); 11928 11929 /* Free resources associated with SLI2 interface */ 11930 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 11931 phba->slim2p.virt, phba->slim2p.phys); 11932 11933 /* unmap adapter SLIM and Control Registers */ 11934 iounmap(phba->ctrl_regs_memmap_p); 11935 iounmap(phba->slim_memmap_p); 11936 11937 lpfc_hba_free(phba); 11938 11939 pci_release_mem_regions(pdev); 11940 pci_disable_device(pdev); 11941 } 11942 11943 /** 11944 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 11945 * @pdev: pointer to PCI device 11946 * @msg: power management message 11947 * 11948 * This routine is to be called from the kernel's PCI subsystem to support 11949 * system Power Management (PM) to device with SLI-3 interface spec. When 11950 * PM invokes this method, it quiesces the device by stopping the driver's 11951 * worker thread for the device, turning off device's interrupt and DMA, 11952 * and bring the device offline. Note that as the driver implements the 11953 * minimum PM requirements to a power-aware driver's PM support for the 11954 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11955 * to the suspend() method call will be treated as SUSPEND and the driver will 11956 * fully reinitialize its device during resume() method call, the driver will 11957 * set device to PCI_D3hot state in PCI config space instead of setting it 11958 * according to the @msg provided by the PM. 11959 * 11960 * Return code 11961 * 0 - driver suspended the device 11962 * Error otherwise 11963 **/ 11964 static int 11965 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 11966 { 11967 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11968 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11969 11970 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11971 "0473 PCI device Power Management suspend.\n"); 11972 11973 /* Bring down the device */ 11974 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11975 lpfc_offline(phba); 11976 kthread_stop(phba->worker_thread); 11977 11978 /* Disable interrupt from device */ 11979 lpfc_sli_disable_intr(phba); 11980 11981 /* Save device state to PCI config space */ 11982 pci_save_state(pdev); 11983 pci_set_power_state(pdev, PCI_D3hot); 11984 11985 return 0; 11986 } 11987 11988 /** 11989 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 11990 * @pdev: pointer to PCI device 11991 * 11992 * This routine is to be called from the kernel's PCI subsystem to support 11993 * system Power Management (PM) to device with SLI-3 interface spec. When PM 11994 * invokes this method, it restores the device's PCI config space state and 11995 * fully reinitializes the device and brings it online. Note that as the 11996 * driver implements the minimum PM requirements to a power-aware driver's 11997 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 11998 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 11999 * driver will fully reinitialize its device during resume() method call, 12000 * the device will be set to PCI_D0 directly in PCI config space before 12001 * restoring the state. 12002 * 12003 * Return code 12004 * 0 - driver suspended the device 12005 * Error otherwise 12006 **/ 12007 static int 12008 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 12009 { 12010 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12011 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12012 uint32_t intr_mode; 12013 int error; 12014 12015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12016 "0452 PCI device Power Management resume.\n"); 12017 12018 /* Restore device state from PCI config space */ 12019 pci_set_power_state(pdev, PCI_D0); 12020 pci_restore_state(pdev); 12021 12022 /* 12023 * As the new kernel behavior of pci_restore_state() API call clears 12024 * device saved_state flag, need to save the restored state again. 12025 */ 12026 pci_save_state(pdev); 12027 12028 if (pdev->is_busmaster) 12029 pci_set_master(pdev); 12030 12031 /* Startup the kernel thread for this host adapter. */ 12032 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12033 "lpfc_worker_%d", phba->brd_no); 12034 if (IS_ERR(phba->worker_thread)) { 12035 error = PTR_ERR(phba->worker_thread); 12036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12037 "0434 PM resume failed to start worker " 12038 "thread: error=x%x.\n", error); 12039 return error; 12040 } 12041 12042 /* Configure and enable interrupt */ 12043 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12044 if (intr_mode == LPFC_INTR_ERROR) { 12045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12046 "0430 PM resume Failed to enable interrupt\n"); 12047 return -EIO; 12048 } else 12049 phba->intr_mode = intr_mode; 12050 12051 /* Restart HBA and bring it online */ 12052 lpfc_sli_brdrestart(phba); 12053 lpfc_online(phba); 12054 12055 /* Log the current active interrupt mode */ 12056 lpfc_log_intr_mode(phba, phba->intr_mode); 12057 12058 return 0; 12059 } 12060 12061 /** 12062 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12063 * @phba: pointer to lpfc hba data structure. 12064 * 12065 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12066 * aborts all the outstanding SCSI I/Os to the pci device. 12067 **/ 12068 static void 12069 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12070 { 12071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12072 "2723 PCI channel I/O abort preparing for recovery\n"); 12073 12074 /* 12075 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12076 * and let the SCSI mid-layer to retry them to recover. 12077 */ 12078 lpfc_sli_abort_fcp_rings(phba); 12079 } 12080 12081 /** 12082 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12083 * @phba: pointer to lpfc hba data structure. 12084 * 12085 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12086 * disables the device interrupt and pci device, and aborts the internal FCP 12087 * pending I/Os. 12088 **/ 12089 static void 12090 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12091 { 12092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12093 "2710 PCI channel disable preparing for reset\n"); 12094 12095 /* Block any management I/Os to the device */ 12096 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12097 12098 /* Block all SCSI devices' I/Os on the host */ 12099 lpfc_scsi_dev_block(phba); 12100 12101 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12102 lpfc_sli_flush_io_rings(phba); 12103 12104 /* stop all timers */ 12105 lpfc_stop_hba_timers(phba); 12106 12107 /* Disable interrupt and pci device */ 12108 lpfc_sli_disable_intr(phba); 12109 pci_disable_device(phba->pcidev); 12110 } 12111 12112 /** 12113 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12114 * @phba: pointer to lpfc hba data structure. 12115 * 12116 * This routine is called to prepare the SLI3 device for PCI slot permanently 12117 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12118 * pending I/Os. 12119 **/ 12120 static void 12121 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12122 { 12123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12124 "2711 PCI channel permanent disable for failure\n"); 12125 /* Block all SCSI devices' I/Os on the host */ 12126 lpfc_scsi_dev_block(phba); 12127 12128 /* stop all timers */ 12129 lpfc_stop_hba_timers(phba); 12130 12131 /* Clean up all driver's outstanding SCSI I/Os */ 12132 lpfc_sli_flush_io_rings(phba); 12133 } 12134 12135 /** 12136 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12137 * @pdev: pointer to PCI device. 12138 * @state: the current PCI connection state. 12139 * 12140 * This routine is called from the PCI subsystem for I/O error handling to 12141 * device with SLI-3 interface spec. This function is called by the PCI 12142 * subsystem after a PCI bus error affecting this device has been detected. 12143 * When this function is invoked, it will need to stop all the I/Os and 12144 * interrupt(s) to the device. Once that is done, it will return 12145 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12146 * as desired. 12147 * 12148 * Return codes 12149 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12150 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12151 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12152 **/ 12153 static pci_ers_result_t 12154 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12155 { 12156 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12157 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12158 12159 switch (state) { 12160 case pci_channel_io_normal: 12161 /* Non-fatal error, prepare for recovery */ 12162 lpfc_sli_prep_dev_for_recover(phba); 12163 return PCI_ERS_RESULT_CAN_RECOVER; 12164 case pci_channel_io_frozen: 12165 /* Fatal error, prepare for slot reset */ 12166 lpfc_sli_prep_dev_for_reset(phba); 12167 return PCI_ERS_RESULT_NEED_RESET; 12168 case pci_channel_io_perm_failure: 12169 /* Permanent failure, prepare for device down */ 12170 lpfc_sli_prep_dev_for_perm_failure(phba); 12171 return PCI_ERS_RESULT_DISCONNECT; 12172 default: 12173 /* Unknown state, prepare and request slot reset */ 12174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12175 "0472 Unknown PCI error state: x%x\n", state); 12176 lpfc_sli_prep_dev_for_reset(phba); 12177 return PCI_ERS_RESULT_NEED_RESET; 12178 } 12179 } 12180 12181 /** 12182 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12183 * @pdev: pointer to PCI device. 12184 * 12185 * This routine is called from the PCI subsystem for error handling to 12186 * device with SLI-3 interface spec. This is called after PCI bus has been 12187 * reset to restart the PCI card from scratch, as if from a cold-boot. 12188 * During the PCI subsystem error recovery, after driver returns 12189 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12190 * recovery and then call this routine before calling the .resume method 12191 * to recover the device. This function will initialize the HBA device, 12192 * enable the interrupt, but it will just put the HBA to offline state 12193 * without passing any I/O traffic. 12194 * 12195 * Return codes 12196 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12197 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12198 */ 12199 static pci_ers_result_t 12200 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12201 { 12202 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12203 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12204 struct lpfc_sli *psli = &phba->sli; 12205 uint32_t intr_mode; 12206 12207 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12208 if (pci_enable_device_mem(pdev)) { 12209 printk(KERN_ERR "lpfc: Cannot re-enable " 12210 "PCI device after reset.\n"); 12211 return PCI_ERS_RESULT_DISCONNECT; 12212 } 12213 12214 pci_restore_state(pdev); 12215 12216 /* 12217 * As the new kernel behavior of pci_restore_state() API call clears 12218 * device saved_state flag, need to save the restored state again. 12219 */ 12220 pci_save_state(pdev); 12221 12222 if (pdev->is_busmaster) 12223 pci_set_master(pdev); 12224 12225 spin_lock_irq(&phba->hbalock); 12226 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12227 spin_unlock_irq(&phba->hbalock); 12228 12229 /* Configure and enable interrupt */ 12230 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12231 if (intr_mode == LPFC_INTR_ERROR) { 12232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12233 "0427 Cannot re-enable interrupt after " 12234 "slot reset.\n"); 12235 return PCI_ERS_RESULT_DISCONNECT; 12236 } else 12237 phba->intr_mode = intr_mode; 12238 12239 /* Take device offline, it will perform cleanup */ 12240 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12241 lpfc_offline(phba); 12242 lpfc_sli_brdrestart(phba); 12243 12244 /* Log the current active interrupt mode */ 12245 lpfc_log_intr_mode(phba, phba->intr_mode); 12246 12247 return PCI_ERS_RESULT_RECOVERED; 12248 } 12249 12250 /** 12251 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12252 * @pdev: pointer to PCI device 12253 * 12254 * This routine is called from the PCI subsystem for error handling to device 12255 * with SLI-3 interface spec. It is called when kernel error recovery tells 12256 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12257 * error recovery. After this call, traffic can start to flow from this device 12258 * again. 12259 */ 12260 static void 12261 lpfc_io_resume_s3(struct pci_dev *pdev) 12262 { 12263 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12264 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12265 12266 /* Bring device online, it will be no-op for non-fatal error resume */ 12267 lpfc_online(phba); 12268 } 12269 12270 /** 12271 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12272 * @phba: pointer to lpfc hba data structure. 12273 * 12274 * returns the number of ELS/CT IOCBs to reserve 12275 **/ 12276 int 12277 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12278 { 12279 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12280 12281 if (phba->sli_rev == LPFC_SLI_REV4) { 12282 if (max_xri <= 100) 12283 return 10; 12284 else if (max_xri <= 256) 12285 return 25; 12286 else if (max_xri <= 512) 12287 return 50; 12288 else if (max_xri <= 1024) 12289 return 100; 12290 else if (max_xri <= 1536) 12291 return 150; 12292 else if (max_xri <= 2048) 12293 return 200; 12294 else 12295 return 250; 12296 } else 12297 return 0; 12298 } 12299 12300 /** 12301 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12302 * @phba: pointer to lpfc hba data structure. 12303 * 12304 * returns the number of ELS/CT + NVMET IOCBs to reserve 12305 **/ 12306 int 12307 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12308 { 12309 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12310 12311 if (phba->nvmet_support) 12312 max_xri += LPFC_NVMET_BUF_POST; 12313 return max_xri; 12314 } 12315 12316 12317 static void 12318 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12319 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12320 const struct firmware *fw) 12321 { 12322 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || 12323 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12324 magic_number != MAGIC_NUMER_G6) || 12325 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12326 magic_number != MAGIC_NUMER_G7)) 12327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12328 "3030 This firmware version is not supported on " 12329 "this HBA model. Device:%x Magic:%x Type:%x " 12330 "ID:%x Size %d %zd\n", 12331 phba->pcidev->device, magic_number, ftype, fid, 12332 fsize, fw->size); 12333 else 12334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12335 "3022 FW Download failed. Device:%x Magic:%x Type:%x " 12336 "ID:%x Size %d %zd\n", 12337 phba->pcidev->device, magic_number, ftype, fid, 12338 fsize, fw->size); 12339 } 12340 12341 12342 /** 12343 * lpfc_write_firmware - attempt to write a firmware image to the port 12344 * @fw: pointer to firmware image returned from request_firmware. 12345 * @phba: pointer to lpfc hba data structure. 12346 * 12347 **/ 12348 static void 12349 lpfc_write_firmware(const struct firmware *fw, void *context) 12350 { 12351 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12352 char fwrev[FW_REV_STR_SIZE]; 12353 struct lpfc_grp_hdr *image; 12354 struct list_head dma_buffer_list; 12355 int i, rc = 0; 12356 struct lpfc_dmabuf *dmabuf, *next; 12357 uint32_t offset = 0, temp_offset = 0; 12358 uint32_t magic_number, ftype, fid, fsize; 12359 12360 /* It can be null in no-wait mode, sanity check */ 12361 if (!fw) { 12362 rc = -ENXIO; 12363 goto out; 12364 } 12365 image = (struct lpfc_grp_hdr *)fw->data; 12366 12367 magic_number = be32_to_cpu(image->magic_number); 12368 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12369 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12370 fsize = be32_to_cpu(image->size); 12371 12372 INIT_LIST_HEAD(&dma_buffer_list); 12373 lpfc_decode_firmware_rev(phba, fwrev, 1); 12374 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12376 "3023 Updating Firmware, Current Version:%s " 12377 "New Version:%s\n", 12378 fwrev, image->revision); 12379 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12380 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12381 GFP_KERNEL); 12382 if (!dmabuf) { 12383 rc = -ENOMEM; 12384 goto release_out; 12385 } 12386 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12387 SLI4_PAGE_SIZE, 12388 &dmabuf->phys, 12389 GFP_KERNEL); 12390 if (!dmabuf->virt) { 12391 kfree(dmabuf); 12392 rc = -ENOMEM; 12393 goto release_out; 12394 } 12395 list_add_tail(&dmabuf->list, &dma_buffer_list); 12396 } 12397 while (offset < fw->size) { 12398 temp_offset = offset; 12399 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12400 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12401 memcpy(dmabuf->virt, 12402 fw->data + temp_offset, 12403 fw->size - temp_offset); 12404 temp_offset = fw->size; 12405 break; 12406 } 12407 memcpy(dmabuf->virt, fw->data + temp_offset, 12408 SLI4_PAGE_SIZE); 12409 temp_offset += SLI4_PAGE_SIZE; 12410 } 12411 rc = lpfc_wr_object(phba, &dma_buffer_list, 12412 (fw->size - offset), &offset); 12413 if (rc) { 12414 lpfc_log_write_firmware_error(phba, offset, 12415 magic_number, ftype, fid, fsize, fw); 12416 goto release_out; 12417 } 12418 } 12419 rc = offset; 12420 } else 12421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12422 "3029 Skipped Firmware update, Current " 12423 "Version:%s New Version:%s\n", 12424 fwrev, image->revision); 12425 12426 release_out: 12427 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12428 list_del(&dmabuf->list); 12429 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12430 dmabuf->virt, dmabuf->phys); 12431 kfree(dmabuf); 12432 } 12433 release_firmware(fw); 12434 out: 12435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12436 "3024 Firmware update done: %d.\n", rc); 12437 return; 12438 } 12439 12440 /** 12441 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12442 * @phba: pointer to lpfc hba data structure. 12443 * 12444 * This routine is called to perform Linux generic firmware upgrade on device 12445 * that supports such feature. 12446 **/ 12447 int 12448 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 12449 { 12450 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 12451 int ret; 12452 const struct firmware *fw; 12453 12454 /* Only supported on SLI4 interface type 2 for now */ 12455 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 12456 LPFC_SLI_INTF_IF_TYPE_2) 12457 return -EPERM; 12458 12459 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 12460 12461 if (fw_upgrade == INT_FW_UPGRADE) { 12462 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 12463 file_name, &phba->pcidev->dev, 12464 GFP_KERNEL, (void *)phba, 12465 lpfc_write_firmware); 12466 } else if (fw_upgrade == RUN_FW_UPGRADE) { 12467 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 12468 if (!ret) 12469 lpfc_write_firmware(fw, (void *)phba); 12470 } else { 12471 ret = -EINVAL; 12472 } 12473 12474 return ret; 12475 } 12476 12477 /** 12478 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 12479 * @pdev: pointer to PCI device 12480 * @pid: pointer to PCI device identifier 12481 * 12482 * This routine is called from the kernel's PCI subsystem to device with 12483 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12484 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12485 * information of the device and driver to see if the driver state that it 12486 * can support this kind of device. If the match is successful, the driver 12487 * core invokes this routine. If this routine determines it can claim the HBA, 12488 * it does all the initialization that it needs to do to handle the HBA 12489 * properly. 12490 * 12491 * Return code 12492 * 0 - driver can claim the device 12493 * negative value - driver can not claim the device 12494 **/ 12495 static int 12496 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 12497 { 12498 struct lpfc_hba *phba; 12499 struct lpfc_vport *vport = NULL; 12500 struct Scsi_Host *shost = NULL; 12501 int error; 12502 uint32_t cfg_mode, intr_mode; 12503 12504 /* Allocate memory for HBA structure */ 12505 phba = lpfc_hba_alloc(pdev); 12506 if (!phba) 12507 return -ENOMEM; 12508 12509 /* Perform generic PCI device enabling operation */ 12510 error = lpfc_enable_pci_dev(phba); 12511 if (error) 12512 goto out_free_phba; 12513 12514 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 12515 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 12516 if (error) 12517 goto out_disable_pci_dev; 12518 12519 /* Set up SLI-4 specific device PCI memory space */ 12520 error = lpfc_sli4_pci_mem_setup(phba); 12521 if (error) { 12522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12523 "1410 Failed to set up pci memory space.\n"); 12524 goto out_disable_pci_dev; 12525 } 12526 12527 /* Set up SLI-4 Specific device driver resources */ 12528 error = lpfc_sli4_driver_resource_setup(phba); 12529 if (error) { 12530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12531 "1412 Failed to set up driver resource.\n"); 12532 goto out_unset_pci_mem_s4; 12533 } 12534 12535 INIT_LIST_HEAD(&phba->active_rrq_list); 12536 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 12537 12538 /* Set up common device driver resources */ 12539 error = lpfc_setup_driver_resource_phase2(phba); 12540 if (error) { 12541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12542 "1414 Failed to set up driver resource.\n"); 12543 goto out_unset_driver_resource_s4; 12544 } 12545 12546 /* Get the default values for Model Name and Description */ 12547 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12548 12549 /* Now, trying to enable interrupt and bring up the device */ 12550 cfg_mode = phba->cfg_use_msi; 12551 12552 /* Put device to a known state before enabling interrupt */ 12553 phba->pport = NULL; 12554 lpfc_stop_port(phba); 12555 12556 /* Configure and enable interrupt */ 12557 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 12558 if (intr_mode == LPFC_INTR_ERROR) { 12559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12560 "0426 Failed to enable interrupt.\n"); 12561 error = -ENODEV; 12562 goto out_unset_driver_resource; 12563 } 12564 /* Default to single EQ for non-MSI-X */ 12565 if (phba->intr_type != MSIX) { 12566 phba->cfg_irq_chann = 1; 12567 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12568 if (phba->nvmet_support) 12569 phba->cfg_nvmet_mrq = 1; 12570 } 12571 } 12572 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 12573 12574 /* Create SCSI host to the physical port */ 12575 error = lpfc_create_shost(phba); 12576 if (error) { 12577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12578 "1415 Failed to create scsi host.\n"); 12579 goto out_disable_intr; 12580 } 12581 vport = phba->pport; 12582 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12583 12584 /* Configure sysfs attributes */ 12585 error = lpfc_alloc_sysfs_attr(vport); 12586 if (error) { 12587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12588 "1416 Failed to allocate sysfs attr\n"); 12589 goto out_destroy_shost; 12590 } 12591 12592 /* Set up SLI-4 HBA */ 12593 if (lpfc_sli4_hba_setup(phba)) { 12594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12595 "1421 Failed to set up hba\n"); 12596 error = -ENODEV; 12597 goto out_free_sysfs_attr; 12598 } 12599 12600 /* Log the current active interrupt mode */ 12601 phba->intr_mode = intr_mode; 12602 lpfc_log_intr_mode(phba, intr_mode); 12603 12604 /* Perform post initialization setup */ 12605 lpfc_post_init_setup(phba); 12606 12607 /* NVME support in FW earlier in the driver load corrects the 12608 * FC4 type making a check for nvme_support unnecessary. 12609 */ 12610 if (phba->nvmet_support == 0) { 12611 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12612 /* Create NVME binding with nvme_fc_transport. This 12613 * ensures the vport is initialized. If the localport 12614 * create fails, it should not unload the driver to 12615 * support field issues. 12616 */ 12617 error = lpfc_nvme_create_localport(vport); 12618 if (error) { 12619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12620 "6004 NVME registration " 12621 "failed, error x%x\n", 12622 error); 12623 } 12624 } 12625 } 12626 12627 /* check for firmware upgrade or downgrade */ 12628 if (phba->cfg_request_firmware_upgrade) 12629 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 12630 12631 /* Check if there are static vports to be created. */ 12632 lpfc_create_static_vport(phba); 12633 12634 /* Enable RAS FW log support */ 12635 lpfc_sli4_ras_setup(phba); 12636 12637 return 0; 12638 12639 out_free_sysfs_attr: 12640 lpfc_free_sysfs_attr(vport); 12641 out_destroy_shost: 12642 lpfc_destroy_shost(phba); 12643 out_disable_intr: 12644 lpfc_sli4_disable_intr(phba); 12645 out_unset_driver_resource: 12646 lpfc_unset_driver_resource_phase2(phba); 12647 out_unset_driver_resource_s4: 12648 lpfc_sli4_driver_resource_unset(phba); 12649 out_unset_pci_mem_s4: 12650 lpfc_sli4_pci_mem_unset(phba); 12651 out_disable_pci_dev: 12652 lpfc_disable_pci_dev(phba); 12653 if (shost) 12654 scsi_host_put(shost); 12655 out_free_phba: 12656 lpfc_hba_free(phba); 12657 return error; 12658 } 12659 12660 /** 12661 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 12662 * @pdev: pointer to PCI device 12663 * 12664 * This routine is called from the kernel's PCI subsystem to device with 12665 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12666 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12667 * device to be removed from the PCI subsystem properly. 12668 **/ 12669 static void 12670 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 12671 { 12672 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12673 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12674 struct lpfc_vport **vports; 12675 struct lpfc_hba *phba = vport->phba; 12676 int i; 12677 12678 /* Mark the device unloading flag */ 12679 spin_lock_irq(&phba->hbalock); 12680 vport->load_flag |= FC_UNLOADING; 12681 spin_unlock_irq(&phba->hbalock); 12682 12683 /* Free the HBA sysfs attributes */ 12684 lpfc_free_sysfs_attr(vport); 12685 12686 /* Release all the vports against this physical port */ 12687 vports = lpfc_create_vport_work_array(phba); 12688 if (vports != NULL) 12689 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12690 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12691 continue; 12692 fc_vport_terminate(vports[i]->fc_vport); 12693 } 12694 lpfc_destroy_vport_work_array(phba, vports); 12695 12696 /* Remove FC host and then SCSI host with the physical port */ 12697 fc_remove_host(shost); 12698 scsi_remove_host(shost); 12699 12700 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 12701 * localports are destroyed after to cleanup all transport memory. 12702 */ 12703 lpfc_cleanup(vport); 12704 lpfc_nvmet_destroy_targetport(phba); 12705 lpfc_nvme_destroy_localport(vport); 12706 12707 /* De-allocate multi-XRI pools */ 12708 if (phba->cfg_xri_rebalancing) 12709 lpfc_destroy_multixri_pools(phba); 12710 12711 /* 12712 * Bring down the SLI Layer. This step disables all interrupts, 12713 * clears the rings, discards all mailbox commands, and resets 12714 * the HBA FCoE function. 12715 */ 12716 lpfc_debugfs_terminate(vport); 12717 12718 lpfc_stop_hba_timers(phba); 12719 spin_lock_irq(&phba->port_list_lock); 12720 list_del_init(&vport->listentry); 12721 spin_unlock_irq(&phba->port_list_lock); 12722 12723 /* Perform scsi free before driver resource_unset since scsi 12724 * buffers are released to their corresponding pools here. 12725 */ 12726 lpfc_io_free(phba); 12727 lpfc_free_iocb_list(phba); 12728 lpfc_sli4_hba_unset(phba); 12729 12730 lpfc_unset_driver_resource_phase2(phba); 12731 lpfc_sli4_driver_resource_unset(phba); 12732 12733 /* Unmap adapter Control and Doorbell registers */ 12734 lpfc_sli4_pci_mem_unset(phba); 12735 12736 /* Release PCI resources and disable device's PCI function */ 12737 scsi_host_put(shost); 12738 lpfc_disable_pci_dev(phba); 12739 12740 /* Finally, free the driver's device data structure */ 12741 lpfc_hba_free(phba); 12742 12743 return; 12744 } 12745 12746 /** 12747 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 12748 * @pdev: pointer to PCI device 12749 * @msg: power management message 12750 * 12751 * This routine is called from the kernel's PCI subsystem to support system 12752 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 12753 * this method, it quiesces the device by stopping the driver's worker 12754 * thread for the device, turning off device's interrupt and DMA, and bring 12755 * the device offline. Note that as the driver implements the minimum PM 12756 * requirements to a power-aware driver's PM support for suspend/resume -- all 12757 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 12758 * method call will be treated as SUSPEND and the driver will fully 12759 * reinitialize its device during resume() method call, the driver will set 12760 * device to PCI_D3hot state in PCI config space instead of setting it 12761 * according to the @msg provided by the PM. 12762 * 12763 * Return code 12764 * 0 - driver suspended the device 12765 * Error otherwise 12766 **/ 12767 static int 12768 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 12769 { 12770 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12771 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12772 12773 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12774 "2843 PCI device Power Management suspend.\n"); 12775 12776 /* Bring down the device */ 12777 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12778 lpfc_offline(phba); 12779 kthread_stop(phba->worker_thread); 12780 12781 /* Disable interrupt from device */ 12782 lpfc_sli4_disable_intr(phba); 12783 lpfc_sli4_queue_destroy(phba); 12784 12785 /* Save device state to PCI config space */ 12786 pci_save_state(pdev); 12787 pci_set_power_state(pdev, PCI_D3hot); 12788 12789 return 0; 12790 } 12791 12792 /** 12793 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 12794 * @pdev: pointer to PCI device 12795 * 12796 * This routine is called from the kernel's PCI subsystem to support system 12797 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 12798 * this method, it restores the device's PCI config space state and fully 12799 * reinitializes the device and brings it online. Note that as the driver 12800 * implements the minimum PM requirements to a power-aware driver's PM for 12801 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12802 * to the suspend() method call will be treated as SUSPEND and the driver 12803 * will fully reinitialize its device during resume() method call, the device 12804 * will be set to PCI_D0 directly in PCI config space before restoring the 12805 * state. 12806 * 12807 * Return code 12808 * 0 - driver suspended the device 12809 * Error otherwise 12810 **/ 12811 static int 12812 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 12813 { 12814 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12815 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12816 uint32_t intr_mode; 12817 int error; 12818 12819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12820 "0292 PCI device Power Management resume.\n"); 12821 12822 /* Restore device state from PCI config space */ 12823 pci_set_power_state(pdev, PCI_D0); 12824 pci_restore_state(pdev); 12825 12826 /* 12827 * As the new kernel behavior of pci_restore_state() API call clears 12828 * device saved_state flag, need to save the restored state again. 12829 */ 12830 pci_save_state(pdev); 12831 12832 if (pdev->is_busmaster) 12833 pci_set_master(pdev); 12834 12835 /* Startup the kernel thread for this host adapter. */ 12836 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12837 "lpfc_worker_%d", phba->brd_no); 12838 if (IS_ERR(phba->worker_thread)) { 12839 error = PTR_ERR(phba->worker_thread); 12840 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12841 "0293 PM resume failed to start worker " 12842 "thread: error=x%x.\n", error); 12843 return error; 12844 } 12845 12846 /* Configure and enable interrupt */ 12847 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12848 if (intr_mode == LPFC_INTR_ERROR) { 12849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12850 "0294 PM resume Failed to enable interrupt\n"); 12851 return -EIO; 12852 } else 12853 phba->intr_mode = intr_mode; 12854 12855 /* Restart HBA and bring it online */ 12856 lpfc_sli_brdrestart(phba); 12857 lpfc_online(phba); 12858 12859 /* Log the current active interrupt mode */ 12860 lpfc_log_intr_mode(phba, phba->intr_mode); 12861 12862 return 0; 12863 } 12864 12865 /** 12866 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 12867 * @phba: pointer to lpfc hba data structure. 12868 * 12869 * This routine is called to prepare the SLI4 device for PCI slot recover. It 12870 * aborts all the outstanding SCSI I/Os to the pci device. 12871 **/ 12872 static void 12873 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 12874 { 12875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12876 "2828 PCI channel I/O abort preparing for recovery\n"); 12877 /* 12878 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12879 * and let the SCSI mid-layer to retry them to recover. 12880 */ 12881 lpfc_sli_abort_fcp_rings(phba); 12882 } 12883 12884 /** 12885 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 12886 * @phba: pointer to lpfc hba data structure. 12887 * 12888 * This routine is called to prepare the SLI4 device for PCI slot reset. It 12889 * disables the device interrupt and pci device, and aborts the internal FCP 12890 * pending I/Os. 12891 **/ 12892 static void 12893 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 12894 { 12895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12896 "2826 PCI channel disable preparing for reset\n"); 12897 12898 /* Block any management I/Os to the device */ 12899 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 12900 12901 /* Block all SCSI devices' I/Os on the host */ 12902 lpfc_scsi_dev_block(phba); 12903 12904 /* Flush all driver's outstanding I/Os as we are to reset */ 12905 lpfc_sli_flush_io_rings(phba); 12906 12907 /* stop all timers */ 12908 lpfc_stop_hba_timers(phba); 12909 12910 /* Disable interrupt and pci device */ 12911 lpfc_sli4_disable_intr(phba); 12912 lpfc_sli4_queue_destroy(phba); 12913 pci_disable_device(phba->pcidev); 12914 } 12915 12916 /** 12917 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 12918 * @phba: pointer to lpfc hba data structure. 12919 * 12920 * This routine is called to prepare the SLI4 device for PCI slot permanently 12921 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12922 * pending I/Os. 12923 **/ 12924 static void 12925 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12926 { 12927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12928 "2827 PCI channel permanent disable for failure\n"); 12929 12930 /* Block all SCSI devices' I/Os on the host */ 12931 lpfc_scsi_dev_block(phba); 12932 12933 /* stop all timers */ 12934 lpfc_stop_hba_timers(phba); 12935 12936 /* Clean up all driver's outstanding I/Os */ 12937 lpfc_sli_flush_io_rings(phba); 12938 } 12939 12940 /** 12941 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 12942 * @pdev: pointer to PCI device. 12943 * @state: the current PCI connection state. 12944 * 12945 * This routine is called from the PCI subsystem for error handling to device 12946 * with SLI-4 interface spec. This function is called by the PCI subsystem 12947 * after a PCI bus error affecting this device has been detected. When this 12948 * function is invoked, it will need to stop all the I/Os and interrupt(s) 12949 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 12950 * for the PCI subsystem to perform proper recovery as desired. 12951 * 12952 * Return codes 12953 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12954 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12955 **/ 12956 static pci_ers_result_t 12957 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 12958 { 12959 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12960 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12961 12962 switch (state) { 12963 case pci_channel_io_normal: 12964 /* Non-fatal error, prepare for recovery */ 12965 lpfc_sli4_prep_dev_for_recover(phba); 12966 return PCI_ERS_RESULT_CAN_RECOVER; 12967 case pci_channel_io_frozen: 12968 /* Fatal error, prepare for slot reset */ 12969 lpfc_sli4_prep_dev_for_reset(phba); 12970 return PCI_ERS_RESULT_NEED_RESET; 12971 case pci_channel_io_perm_failure: 12972 /* Permanent failure, prepare for device down */ 12973 lpfc_sli4_prep_dev_for_perm_failure(phba); 12974 return PCI_ERS_RESULT_DISCONNECT; 12975 default: 12976 /* Unknown state, prepare and request slot reset */ 12977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12978 "2825 Unknown PCI error state: x%x\n", state); 12979 lpfc_sli4_prep_dev_for_reset(phba); 12980 return PCI_ERS_RESULT_NEED_RESET; 12981 } 12982 } 12983 12984 /** 12985 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 12986 * @pdev: pointer to PCI device. 12987 * 12988 * This routine is called from the PCI subsystem for error handling to device 12989 * with SLI-4 interface spec. It is called after PCI bus has been reset to 12990 * restart the PCI card from scratch, as if from a cold-boot. During the 12991 * PCI subsystem error recovery, after the driver returns 12992 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12993 * recovery and then call this routine before calling the .resume method to 12994 * recover the device. This function will initialize the HBA device, enable 12995 * the interrupt, but it will just put the HBA to offline state without 12996 * passing any I/O traffic. 12997 * 12998 * Return codes 12999 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13000 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13001 */ 13002 static pci_ers_result_t 13003 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13004 { 13005 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13006 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13007 struct lpfc_sli *psli = &phba->sli; 13008 uint32_t intr_mode; 13009 13010 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13011 if (pci_enable_device_mem(pdev)) { 13012 printk(KERN_ERR "lpfc: Cannot re-enable " 13013 "PCI device after reset.\n"); 13014 return PCI_ERS_RESULT_DISCONNECT; 13015 } 13016 13017 pci_restore_state(pdev); 13018 13019 /* 13020 * As the new kernel behavior of pci_restore_state() API call clears 13021 * device saved_state flag, need to save the restored state again. 13022 */ 13023 pci_save_state(pdev); 13024 13025 if (pdev->is_busmaster) 13026 pci_set_master(pdev); 13027 13028 spin_lock_irq(&phba->hbalock); 13029 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13030 spin_unlock_irq(&phba->hbalock); 13031 13032 /* Configure and enable interrupt */ 13033 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13034 if (intr_mode == LPFC_INTR_ERROR) { 13035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13036 "2824 Cannot re-enable interrupt after " 13037 "slot reset.\n"); 13038 return PCI_ERS_RESULT_DISCONNECT; 13039 } else 13040 phba->intr_mode = intr_mode; 13041 13042 /* Log the current active interrupt mode */ 13043 lpfc_log_intr_mode(phba, phba->intr_mode); 13044 13045 return PCI_ERS_RESULT_RECOVERED; 13046 } 13047 13048 /** 13049 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13050 * @pdev: pointer to PCI device 13051 * 13052 * This routine is called from the PCI subsystem for error handling to device 13053 * with SLI-4 interface spec. It is called when kernel error recovery tells 13054 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13055 * error recovery. After this call, traffic can start to flow from this device 13056 * again. 13057 **/ 13058 static void 13059 lpfc_io_resume_s4(struct pci_dev *pdev) 13060 { 13061 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13062 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13063 13064 /* 13065 * In case of slot reset, as function reset is performed through 13066 * mailbox command which needs DMA to be enabled, this operation 13067 * has to be moved to the io resume phase. Taking device offline 13068 * will perform the necessary cleanup. 13069 */ 13070 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13071 /* Perform device reset */ 13072 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13073 lpfc_offline(phba); 13074 lpfc_sli_brdrestart(phba); 13075 /* Bring the device back online */ 13076 lpfc_online(phba); 13077 } 13078 } 13079 13080 /** 13081 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13082 * @pdev: pointer to PCI device 13083 * @pid: pointer to PCI device identifier 13084 * 13085 * This routine is to be registered to the kernel's PCI subsystem. When an 13086 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13087 * at PCI device-specific information of the device and driver to see if the 13088 * driver state that it can support this kind of device. If the match is 13089 * successful, the driver core invokes this routine. This routine dispatches 13090 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13091 * do all the initialization that it needs to do to handle the HBA device 13092 * properly. 13093 * 13094 * Return code 13095 * 0 - driver can claim the device 13096 * negative value - driver can not claim the device 13097 **/ 13098 static int 13099 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13100 { 13101 int rc; 13102 struct lpfc_sli_intf intf; 13103 13104 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13105 return -ENODEV; 13106 13107 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13108 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13109 rc = lpfc_pci_probe_one_s4(pdev, pid); 13110 else 13111 rc = lpfc_pci_probe_one_s3(pdev, pid); 13112 13113 return rc; 13114 } 13115 13116 /** 13117 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13118 * @pdev: pointer to PCI device 13119 * 13120 * This routine is to be registered to the kernel's PCI subsystem. When an 13121 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13122 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13123 * remove routine, which will perform all the necessary cleanup for the 13124 * device to be removed from the PCI subsystem properly. 13125 **/ 13126 static void 13127 lpfc_pci_remove_one(struct pci_dev *pdev) 13128 { 13129 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13130 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13131 13132 switch (phba->pci_dev_grp) { 13133 case LPFC_PCI_DEV_LP: 13134 lpfc_pci_remove_one_s3(pdev); 13135 break; 13136 case LPFC_PCI_DEV_OC: 13137 lpfc_pci_remove_one_s4(pdev); 13138 break; 13139 default: 13140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13141 "1424 Invalid PCI device group: 0x%x\n", 13142 phba->pci_dev_grp); 13143 break; 13144 } 13145 return; 13146 } 13147 13148 /** 13149 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13150 * @pdev: pointer to PCI device 13151 * @msg: power management message 13152 * 13153 * This routine is to be registered to the kernel's PCI subsystem to support 13154 * system Power Management (PM). When PM invokes this method, it dispatches 13155 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13156 * suspend the device. 13157 * 13158 * Return code 13159 * 0 - driver suspended the device 13160 * Error otherwise 13161 **/ 13162 static int 13163 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 13164 { 13165 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13166 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13167 int rc = -ENODEV; 13168 13169 switch (phba->pci_dev_grp) { 13170 case LPFC_PCI_DEV_LP: 13171 rc = lpfc_pci_suspend_one_s3(pdev, msg); 13172 break; 13173 case LPFC_PCI_DEV_OC: 13174 rc = lpfc_pci_suspend_one_s4(pdev, msg); 13175 break; 13176 default: 13177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13178 "1425 Invalid PCI device group: 0x%x\n", 13179 phba->pci_dev_grp); 13180 break; 13181 } 13182 return rc; 13183 } 13184 13185 /** 13186 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13187 * @pdev: pointer to PCI device 13188 * 13189 * This routine is to be registered to the kernel's PCI subsystem to support 13190 * system Power Management (PM). When PM invokes this method, it dispatches 13191 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13192 * resume the device. 13193 * 13194 * Return code 13195 * 0 - driver suspended the device 13196 * Error otherwise 13197 **/ 13198 static int 13199 lpfc_pci_resume_one(struct pci_dev *pdev) 13200 { 13201 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13202 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13203 int rc = -ENODEV; 13204 13205 switch (phba->pci_dev_grp) { 13206 case LPFC_PCI_DEV_LP: 13207 rc = lpfc_pci_resume_one_s3(pdev); 13208 break; 13209 case LPFC_PCI_DEV_OC: 13210 rc = lpfc_pci_resume_one_s4(pdev); 13211 break; 13212 default: 13213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13214 "1426 Invalid PCI device group: 0x%x\n", 13215 phba->pci_dev_grp); 13216 break; 13217 } 13218 return rc; 13219 } 13220 13221 /** 13222 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13223 * @pdev: pointer to PCI device. 13224 * @state: the current PCI connection state. 13225 * 13226 * This routine is registered to the PCI subsystem for error handling. This 13227 * function is called by the PCI subsystem after a PCI bus error affecting 13228 * this device has been detected. When this routine is invoked, it dispatches 13229 * the action to the proper SLI-3 or SLI-4 device error detected handling 13230 * routine, which will perform the proper error detected operation. 13231 * 13232 * Return codes 13233 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13234 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13235 **/ 13236 static pci_ers_result_t 13237 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13238 { 13239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13240 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13241 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13242 13243 switch (phba->pci_dev_grp) { 13244 case LPFC_PCI_DEV_LP: 13245 rc = lpfc_io_error_detected_s3(pdev, state); 13246 break; 13247 case LPFC_PCI_DEV_OC: 13248 rc = lpfc_io_error_detected_s4(pdev, state); 13249 break; 13250 default: 13251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13252 "1427 Invalid PCI device group: 0x%x\n", 13253 phba->pci_dev_grp); 13254 break; 13255 } 13256 return rc; 13257 } 13258 13259 /** 13260 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13261 * @pdev: pointer to PCI device. 13262 * 13263 * This routine is registered to the PCI subsystem for error handling. This 13264 * function is called after PCI bus has been reset to restart the PCI card 13265 * from scratch, as if from a cold-boot. When this routine is invoked, it 13266 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13267 * routine, which will perform the proper device reset. 13268 * 13269 * Return codes 13270 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13271 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13272 **/ 13273 static pci_ers_result_t 13274 lpfc_io_slot_reset(struct pci_dev *pdev) 13275 { 13276 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13277 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13278 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13279 13280 switch (phba->pci_dev_grp) { 13281 case LPFC_PCI_DEV_LP: 13282 rc = lpfc_io_slot_reset_s3(pdev); 13283 break; 13284 case LPFC_PCI_DEV_OC: 13285 rc = lpfc_io_slot_reset_s4(pdev); 13286 break; 13287 default: 13288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13289 "1428 Invalid PCI device group: 0x%x\n", 13290 phba->pci_dev_grp); 13291 break; 13292 } 13293 return rc; 13294 } 13295 13296 /** 13297 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13298 * @pdev: pointer to PCI device 13299 * 13300 * This routine is registered to the PCI subsystem for error handling. It 13301 * is called when kernel error recovery tells the lpfc driver that it is 13302 * OK to resume normal PCI operation after PCI bus error recovery. When 13303 * this routine is invoked, it dispatches the action to the proper SLI-3 13304 * or SLI-4 device io_resume routine, which will resume the device operation. 13305 **/ 13306 static void 13307 lpfc_io_resume(struct pci_dev *pdev) 13308 { 13309 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13310 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13311 13312 switch (phba->pci_dev_grp) { 13313 case LPFC_PCI_DEV_LP: 13314 lpfc_io_resume_s3(pdev); 13315 break; 13316 case LPFC_PCI_DEV_OC: 13317 lpfc_io_resume_s4(pdev); 13318 break; 13319 default: 13320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13321 "1429 Invalid PCI device group: 0x%x\n", 13322 phba->pci_dev_grp); 13323 break; 13324 } 13325 return; 13326 } 13327 13328 /** 13329 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13330 * @phba: pointer to lpfc hba data structure. 13331 * 13332 * This routine checks to see if OAS is supported for this adapter. If 13333 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13334 * the enable oas flag is cleared and the pool created for OAS device data 13335 * is destroyed. 13336 * 13337 **/ 13338 static void 13339 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13340 { 13341 13342 if (!phba->cfg_EnableXLane) 13343 return; 13344 13345 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13346 phba->cfg_fof = 1; 13347 } else { 13348 phba->cfg_fof = 0; 13349 if (phba->device_data_mem_pool) 13350 mempool_destroy(phba->device_data_mem_pool); 13351 phba->device_data_mem_pool = NULL; 13352 } 13353 13354 return; 13355 } 13356 13357 /** 13358 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13359 * @phba: pointer to lpfc hba data structure. 13360 * 13361 * This routine checks to see if RAS is supported by the adapter. Check the 13362 * function through which RAS support enablement is to be done. 13363 **/ 13364 void 13365 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13366 { 13367 switch (phba->pcidev->device) { 13368 case PCI_DEVICE_ID_LANCER_G6_FC: 13369 case PCI_DEVICE_ID_LANCER_G7_FC: 13370 phba->ras_fwlog.ras_hwsupport = true; 13371 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13372 phba->cfg_ras_fwlog_buffsize) 13373 phba->ras_fwlog.ras_enabled = true; 13374 else 13375 phba->ras_fwlog.ras_enabled = false; 13376 break; 13377 default: 13378 phba->ras_fwlog.ras_hwsupport = false; 13379 } 13380 } 13381 13382 13383 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13384 13385 static const struct pci_error_handlers lpfc_err_handler = { 13386 .error_detected = lpfc_io_error_detected, 13387 .slot_reset = lpfc_io_slot_reset, 13388 .resume = lpfc_io_resume, 13389 }; 13390 13391 static struct pci_driver lpfc_driver = { 13392 .name = LPFC_DRIVER_NAME, 13393 .id_table = lpfc_id_table, 13394 .probe = lpfc_pci_probe_one, 13395 .remove = lpfc_pci_remove_one, 13396 .shutdown = lpfc_pci_remove_one, 13397 .suspend = lpfc_pci_suspend_one, 13398 .resume = lpfc_pci_resume_one, 13399 .err_handler = &lpfc_err_handler, 13400 }; 13401 13402 static const struct file_operations lpfc_mgmt_fop = { 13403 .owner = THIS_MODULE, 13404 }; 13405 13406 static struct miscdevice lpfc_mgmt_dev = { 13407 .minor = MISC_DYNAMIC_MINOR, 13408 .name = "lpfcmgmt", 13409 .fops = &lpfc_mgmt_fop, 13410 }; 13411 13412 /** 13413 * lpfc_init - lpfc module initialization routine 13414 * 13415 * This routine is to be invoked when the lpfc module is loaded into the 13416 * kernel. The special kernel macro module_init() is used to indicate the 13417 * role of this routine to the kernel as lpfc module entry point. 13418 * 13419 * Return codes 13420 * 0 - successful 13421 * -ENOMEM - FC attach transport failed 13422 * all others - failed 13423 */ 13424 static int __init 13425 lpfc_init(void) 13426 { 13427 int error = 0; 13428 13429 printk(LPFC_MODULE_DESC "\n"); 13430 printk(LPFC_COPYRIGHT "\n"); 13431 13432 error = misc_register(&lpfc_mgmt_dev); 13433 if (error) 13434 printk(KERN_ERR "Could not register lpfcmgmt device, " 13435 "misc_register returned with status %d", error); 13436 13437 lpfc_transport_functions.vport_create = lpfc_vport_create; 13438 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 13439 lpfc_transport_template = 13440 fc_attach_transport(&lpfc_transport_functions); 13441 if (lpfc_transport_template == NULL) 13442 return -ENOMEM; 13443 lpfc_vport_transport_template = 13444 fc_attach_transport(&lpfc_vport_transport_functions); 13445 if (lpfc_vport_transport_template == NULL) { 13446 fc_release_transport(lpfc_transport_template); 13447 return -ENOMEM; 13448 } 13449 lpfc_nvme_cmd_template(); 13450 lpfc_nvmet_cmd_template(); 13451 13452 /* Initialize in case vector mapping is needed */ 13453 lpfc_present_cpu = num_present_cpus(); 13454 13455 error = pci_register_driver(&lpfc_driver); 13456 if (error) { 13457 fc_release_transport(lpfc_transport_template); 13458 fc_release_transport(lpfc_vport_transport_template); 13459 } 13460 13461 return error; 13462 } 13463 13464 /** 13465 * lpfc_exit - lpfc module removal routine 13466 * 13467 * This routine is invoked when the lpfc module is removed from the kernel. 13468 * The special kernel macro module_exit() is used to indicate the role of 13469 * this routine to the kernel as lpfc module exit point. 13470 */ 13471 static void __exit 13472 lpfc_exit(void) 13473 { 13474 misc_deregister(&lpfc_mgmt_dev); 13475 pci_unregister_driver(&lpfc_driver); 13476 fc_release_transport(lpfc_transport_template); 13477 fc_release_transport(lpfc_vport_transport_template); 13478 idr_destroy(&lpfc_hba_index); 13479 } 13480 13481 module_init(lpfc_init); 13482 module_exit(lpfc_exit); 13483 MODULE_LICENSE("GPL"); 13484 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 13485 MODULE_AUTHOR("Broadcom"); 13486 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 13487