1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_transport_fc.h> 45 #include <scsi/scsi_tcq.h> 46 #include <scsi/fc/fc_fs.h> 47 48 #include <linux/nvme-fc-driver.h> 49 50 #include "lpfc_hw4.h" 51 #include "lpfc_hw.h" 52 #include "lpfc_sli.h" 53 #include "lpfc_sli4.h" 54 #include "lpfc_nl.h" 55 #include "lpfc_disc.h" 56 #include "lpfc.h" 57 #include "lpfc_scsi.h" 58 #include "lpfc_nvme.h" 59 #include "lpfc_nvmet.h" 60 #include "lpfc_logmsg.h" 61 #include "lpfc_crtn.h" 62 #include "lpfc_vport.h" 63 #include "lpfc_version.h" 64 #include "lpfc_ids.h" 65 66 char *_dump_buf_data; 67 unsigned long _dump_buf_data_order; 68 char *_dump_buf_dif; 69 unsigned long _dump_buf_dif_order; 70 spinlock_t _dump_buf_lock; 71 72 /* Used when mapping IRQ vectors in a driver centric manner */ 73 uint16_t *lpfc_used_cpu; 74 uint32_t lpfc_present_cpu; 75 76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 77 static int lpfc_post_rcv_buf(struct lpfc_hba *); 78 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 80 static int lpfc_setup_endian_order(struct lpfc_hba *); 81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 82 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 84 static void lpfc_init_sgl_list(struct lpfc_hba *); 85 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 86 static void lpfc_free_active_sgl(struct lpfc_hba *); 87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 92 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 95 96 static struct scsi_transport_template *lpfc_transport_template = NULL; 97 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 98 static DEFINE_IDR(lpfc_hba_index); 99 #define LPFC_NVMET_BUF_POST 254 100 101 /** 102 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 103 * @phba: pointer to lpfc hba data structure. 104 * 105 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 106 * mailbox command. It retrieves the revision information from the HBA and 107 * collects the Vital Product Data (VPD) about the HBA for preparing the 108 * configuration of the HBA. 109 * 110 * Return codes: 111 * 0 - success. 112 * -ERESTART - requests the SLI layer to reset the HBA and try again. 113 * Any other value - indicates an error. 114 **/ 115 int 116 lpfc_config_port_prep(struct lpfc_hba *phba) 117 { 118 lpfc_vpd_t *vp = &phba->vpd; 119 int i = 0, rc; 120 LPFC_MBOXQ_t *pmb; 121 MAILBOX_t *mb; 122 char *lpfc_vpd_data = NULL; 123 uint16_t offset = 0; 124 static char licensed[56] = 125 "key unlock for use with gnu public licensed code only\0"; 126 static int init_key = 1; 127 128 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 129 if (!pmb) { 130 phba->link_state = LPFC_HBA_ERROR; 131 return -ENOMEM; 132 } 133 134 mb = &pmb->u.mb; 135 phba->link_state = LPFC_INIT_MBX_CMDS; 136 137 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 138 if (init_key) { 139 uint32_t *ptext = (uint32_t *) licensed; 140 141 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 142 *ptext = cpu_to_be32(*ptext); 143 init_key = 0; 144 } 145 146 lpfc_read_nv(phba, pmb); 147 memset((char*)mb->un.varRDnvp.rsvd3, 0, 148 sizeof (mb->un.varRDnvp.rsvd3)); 149 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 150 sizeof (licensed)); 151 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 154 if (rc != MBX_SUCCESS) { 155 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 156 "0324 Config Port initialization " 157 "error, mbxCmd x%x READ_NVPARM, " 158 "mbxStatus x%x\n", 159 mb->mbxCommand, mb->mbxStatus); 160 mempool_free(pmb, phba->mbox_mem_pool); 161 return -ERESTART; 162 } 163 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 164 sizeof(phba->wwnn)); 165 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 166 sizeof(phba->wwpn)); 167 } 168 169 phba->sli3_options = 0x0; 170 171 /* Setup and issue mailbox READ REV command */ 172 lpfc_read_rev(phba, pmb); 173 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 174 if (rc != MBX_SUCCESS) { 175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 176 "0439 Adapter failed to init, mbxCmd x%x " 177 "READ_REV, mbxStatus x%x\n", 178 mb->mbxCommand, mb->mbxStatus); 179 mempool_free( pmb, phba->mbox_mem_pool); 180 return -ERESTART; 181 } 182 183 184 /* 185 * The value of rr must be 1 since the driver set the cv field to 1. 186 * This setting requires the FW to set all revision fields. 187 */ 188 if (mb->un.varRdRev.rr == 0) { 189 vp->rev.rBit = 0; 190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 191 "0440 Adapter failed to init, READ_REV has " 192 "missing revision information.\n"); 193 mempool_free(pmb, phba->mbox_mem_pool); 194 return -ERESTART; 195 } 196 197 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 198 mempool_free(pmb, phba->mbox_mem_pool); 199 return -EINVAL; 200 } 201 202 /* Save information as VPD data */ 203 vp->rev.rBit = 1; 204 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 205 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 206 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 207 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 208 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 209 vp->rev.biuRev = mb->un.varRdRev.biuRev; 210 vp->rev.smRev = mb->un.varRdRev.smRev; 211 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 212 vp->rev.endecRev = mb->un.varRdRev.endecRev; 213 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 214 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 215 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 216 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 217 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 218 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 219 220 /* If the sli feature level is less then 9, we must 221 * tear down all RPIs and VPIs on link down if NPIV 222 * is enabled. 223 */ 224 if (vp->rev.feaLevelHigh < 9) 225 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 226 227 if (lpfc_is_LC_HBA(phba->pcidev->device)) 228 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 229 sizeof (phba->RandomData)); 230 231 /* Get adapter VPD information */ 232 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 233 if (!lpfc_vpd_data) 234 goto out_free_mbox; 235 do { 236 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 237 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 238 239 if (rc != MBX_SUCCESS) { 240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 241 "0441 VPD not present on adapter, " 242 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 243 mb->mbxCommand, mb->mbxStatus); 244 mb->un.varDmp.word_cnt = 0; 245 } 246 /* dump mem may return a zero when finished or we got a 247 * mailbox error, either way we are done. 248 */ 249 if (mb->un.varDmp.word_cnt == 0) 250 break; 251 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 252 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 253 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 254 lpfc_vpd_data + offset, 255 mb->un.varDmp.word_cnt); 256 offset += mb->un.varDmp.word_cnt; 257 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 258 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 259 260 kfree(lpfc_vpd_data); 261 out_free_mbox: 262 mempool_free(pmb, phba->mbox_mem_pool); 263 return 0; 264 } 265 266 /** 267 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 268 * @phba: pointer to lpfc hba data structure. 269 * @pmboxq: pointer to the driver internal queue element for mailbox command. 270 * 271 * This is the completion handler for driver's configuring asynchronous event 272 * mailbox command to the device. If the mailbox command returns successfully, 273 * it will set internal async event support flag to 1; otherwise, it will 274 * set internal async event support flag to 0. 275 **/ 276 static void 277 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 278 { 279 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 280 phba->temp_sensor_support = 1; 281 else 282 phba->temp_sensor_support = 0; 283 mempool_free(pmboxq, phba->mbox_mem_pool); 284 return; 285 } 286 287 /** 288 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 289 * @phba: pointer to lpfc hba data structure. 290 * @pmboxq: pointer to the driver internal queue element for mailbox command. 291 * 292 * This is the completion handler for dump mailbox command for getting 293 * wake up parameters. When this command complete, the response contain 294 * Option rom version of the HBA. This function translate the version number 295 * into a human readable string and store it in OptionROMVersion. 296 **/ 297 static void 298 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 299 { 300 struct prog_id *prg; 301 uint32_t prog_id_word; 302 char dist = ' '; 303 /* character array used for decoding dist type. */ 304 char dist_char[] = "nabx"; 305 306 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309 } 310 311 prg = (struct prog_id *) &prog_id_word; 312 313 /* word 7 contain option rom version */ 314 prog_id_word = pmboxq->u.mb.un.varWords[7]; 315 316 /* Decode the Option rom version word to a readable string */ 317 if (prg->dist < 4) 318 dist = dist_char[prg->dist]; 319 320 if ((prg->dist == 3) && (prg->num == 0)) 321 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 322 prg->ver, prg->rev, prg->lev); 323 else 324 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 325 prg->ver, prg->rev, prg->lev, 326 dist, prg->num); 327 mempool_free(pmboxq, phba->mbox_mem_pool); 328 return; 329 } 330 331 /** 332 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 333 * cfg_soft_wwnn, cfg_soft_wwpn 334 * @vport: pointer to lpfc vport data structure. 335 * 336 * 337 * Return codes 338 * None. 339 **/ 340 void 341 lpfc_update_vport_wwn(struct lpfc_vport *vport) 342 { 343 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 344 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 345 346 /* If the soft name exists then update it using the service params */ 347 if (vport->phba->cfg_soft_wwnn) 348 u64_to_wwn(vport->phba->cfg_soft_wwnn, 349 vport->fc_sparam.nodeName.u.wwn); 350 if (vport->phba->cfg_soft_wwpn) 351 u64_to_wwn(vport->phba->cfg_soft_wwpn, 352 vport->fc_sparam.portName.u.wwn); 353 354 /* 355 * If the name is empty or there exists a soft name 356 * then copy the service params name, otherwise use the fc name 357 */ 358 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 360 sizeof(struct lpfc_name)); 361 else 362 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 363 sizeof(struct lpfc_name)); 364 365 /* 366 * If the port name has changed, then set the Param changes flag 367 * to unreg the login 368 */ 369 if (vport->fc_portname.u.wwn[0] != 0 && 370 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 371 sizeof(struct lpfc_name))) 372 vport->vport_flag |= FAWWPN_PARAM_CHG; 373 374 if (vport->fc_portname.u.wwn[0] == 0 || 375 vport->phba->cfg_soft_wwpn || 376 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 377 vport->vport_flag & FAWWPN_SET) { 378 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 379 sizeof(struct lpfc_name)); 380 vport->vport_flag &= ~FAWWPN_SET; 381 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 382 vport->vport_flag |= FAWWPN_SET; 383 } 384 else 385 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 386 sizeof(struct lpfc_name)); 387 } 388 389 /** 390 * lpfc_config_port_post - Perform lpfc initialization after config port 391 * @phba: pointer to lpfc hba data structure. 392 * 393 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 394 * command call. It performs all internal resource and state setups on the 395 * port: post IOCB buffers, enable appropriate host interrupt attentions, 396 * ELS ring timers, etc. 397 * 398 * Return codes 399 * 0 - success. 400 * Any other value - error. 401 **/ 402 int 403 lpfc_config_port_post(struct lpfc_hba *phba) 404 { 405 struct lpfc_vport *vport = phba->pport; 406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 407 LPFC_MBOXQ_t *pmb; 408 MAILBOX_t *mb; 409 struct lpfc_dmabuf *mp; 410 struct lpfc_sli *psli = &phba->sli; 411 uint32_t status, timeout; 412 int i, j; 413 int rc; 414 415 spin_lock_irq(&phba->hbalock); 416 /* 417 * If the Config port completed correctly the HBA is not 418 * over heated any more. 419 */ 420 if (phba->over_temp_state == HBA_OVER_TEMP) 421 phba->over_temp_state = HBA_NORMAL_TEMP; 422 spin_unlock_irq(&phba->hbalock); 423 424 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 425 if (!pmb) { 426 phba->link_state = LPFC_HBA_ERROR; 427 return -ENOMEM; 428 } 429 mb = &pmb->u.mb; 430 431 /* Get login parameters for NID. */ 432 rc = lpfc_read_sparam(phba, pmb, 0); 433 if (rc) { 434 mempool_free(pmb, phba->mbox_mem_pool); 435 return -ENOMEM; 436 } 437 438 pmb->vport = vport; 439 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 441 "0448 Adapter failed init, mbxCmd x%x " 442 "READ_SPARM mbxStatus x%x\n", 443 mb->mbxCommand, mb->mbxStatus); 444 phba->link_state = LPFC_HBA_ERROR; 445 mp = (struct lpfc_dmabuf *) pmb->context1; 446 mempool_free(pmb, phba->mbox_mem_pool); 447 lpfc_mbuf_free(phba, mp->virt, mp->phys); 448 kfree(mp); 449 return -EIO; 450 } 451 452 mp = (struct lpfc_dmabuf *) pmb->context1; 453 454 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 456 kfree(mp); 457 pmb->context1 = NULL; 458 lpfc_update_vport_wwn(vport); 459 460 /* Update the fc_host data structures with new wwn. */ 461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 463 fc_host_max_npiv_vports(shost) = phba->max_vpi; 464 465 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 466 /* This should be consolidated into parse_vpd ? - mr */ 467 if (phba->SerialNumber[0] == 0) { 468 uint8_t *outptr; 469 470 outptr = &vport->fc_nodename.u.s.IEEE[0]; 471 for (i = 0; i < 12; i++) { 472 status = *outptr++; 473 j = ((status & 0xf0) >> 4); 474 if (j <= 9) 475 phba->SerialNumber[i] = 476 (char)((uint8_t) 0x30 + (uint8_t) j); 477 else 478 phba->SerialNumber[i] = 479 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 480 i++; 481 j = (status & 0xf); 482 if (j <= 9) 483 phba->SerialNumber[i] = 484 (char)((uint8_t) 0x30 + (uint8_t) j); 485 else 486 phba->SerialNumber[i] = 487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 488 } 489 } 490 491 lpfc_read_config(phba, pmb); 492 pmb->vport = vport; 493 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 495 "0453 Adapter failed to init, mbxCmd x%x " 496 "READ_CONFIG, mbxStatus x%x\n", 497 mb->mbxCommand, mb->mbxStatus); 498 phba->link_state = LPFC_HBA_ERROR; 499 mempool_free( pmb, phba->mbox_mem_pool); 500 return -EIO; 501 } 502 503 /* Check if the port is disabled */ 504 lpfc_sli_read_link_ste(phba); 505 506 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 507 i = (mb->un.varRdConfig.max_xri + 1); 508 if (phba->cfg_hba_queue_depth > i) { 509 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 510 "3359 HBA queue depth changed from %d to %d\n", 511 phba->cfg_hba_queue_depth, i); 512 phba->cfg_hba_queue_depth = i; 513 } 514 515 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 516 i = (mb->un.varRdConfig.max_xri >> 3); 517 if (phba->pport->cfg_lun_queue_depth > i) { 518 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 519 "3360 LUN queue depth changed from %d to %d\n", 520 phba->pport->cfg_lun_queue_depth, i); 521 phba->pport->cfg_lun_queue_depth = i; 522 } 523 524 phba->lmt = mb->un.varRdConfig.lmt; 525 526 /* Get the default values for Model Name and Description */ 527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 528 529 phba->link_state = LPFC_LINK_DOWN; 530 531 /* Only process IOCBs on ELS ring till hba_state is READY */ 532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 536 537 /* Post receive buffers for desired rings */ 538 if (phba->sli_rev != 3) 539 lpfc_post_rcv_buf(phba); 540 541 /* 542 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 543 */ 544 if (phba->intr_type == MSIX) { 545 rc = lpfc_config_msi(phba, pmb); 546 if (rc) { 547 mempool_free(pmb, phba->mbox_mem_pool); 548 return -EIO; 549 } 550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 551 if (rc != MBX_SUCCESS) { 552 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 553 "0352 Config MSI mailbox command " 554 "failed, mbxCmd x%x, mbxStatus x%x\n", 555 pmb->u.mb.mbxCommand, 556 pmb->u.mb.mbxStatus); 557 mempool_free(pmb, phba->mbox_mem_pool); 558 return -EIO; 559 } 560 } 561 562 spin_lock_irq(&phba->hbalock); 563 /* Initialize ERATT handling flag */ 564 phba->hba_flag &= ~HBA_ERATT_HANDLED; 565 566 /* Enable appropriate host interrupts */ 567 if (lpfc_readl(phba->HCregaddr, &status)) { 568 spin_unlock_irq(&phba->hbalock); 569 return -EIO; 570 } 571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 572 if (psli->num_rings > 0) 573 status |= HC_R0INT_ENA; 574 if (psli->num_rings > 1) 575 status |= HC_R1INT_ENA; 576 if (psli->num_rings > 2) 577 status |= HC_R2INT_ENA; 578 if (psli->num_rings > 3) 579 status |= HC_R3INT_ENA; 580 581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 582 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 583 status &= ~(HC_R0INT_ENA); 584 585 writel(status, phba->HCregaddr); 586 readl(phba->HCregaddr); /* flush */ 587 spin_unlock_irq(&phba->hbalock); 588 589 /* Set up ring-0 (ELS) timer */ 590 timeout = phba->fc_ratov * 2; 591 mod_timer(&vport->els_tmofunc, 592 jiffies + msecs_to_jiffies(1000 * timeout)); 593 /* Set up heart beat (HB) timer */ 594 mod_timer(&phba->hb_tmofunc, 595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 596 phba->hb_outstanding = 0; 597 phba->last_completion_time = jiffies; 598 /* Set up error attention (ERATT) polling timer */ 599 mod_timer(&phba->eratt_poll, 600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 601 602 if (phba->hba_flag & LINK_DISABLED) { 603 lpfc_printf_log(phba, 604 KERN_ERR, LOG_INIT, 605 "2598 Adapter Link is disabled.\n"); 606 lpfc_down_link(phba, pmb); 607 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 608 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 609 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 610 lpfc_printf_log(phba, 611 KERN_ERR, LOG_INIT, 612 "2599 Adapter failed to issue DOWN_LINK" 613 " mbox command rc 0x%x\n", rc); 614 615 mempool_free(pmb, phba->mbox_mem_pool); 616 return -EIO; 617 } 618 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 619 mempool_free(pmb, phba->mbox_mem_pool); 620 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 621 if (rc) 622 return rc; 623 } 624 /* MBOX buffer will be freed in mbox compl */ 625 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 626 if (!pmb) { 627 phba->link_state = LPFC_HBA_ERROR; 628 return -ENOMEM; 629 } 630 631 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 632 pmb->mbox_cmpl = lpfc_config_async_cmpl; 633 pmb->vport = phba->pport; 634 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 635 636 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 637 lpfc_printf_log(phba, 638 KERN_ERR, 639 LOG_INIT, 640 "0456 Adapter failed to issue " 641 "ASYNCEVT_ENABLE mbox status x%x\n", 642 rc); 643 mempool_free(pmb, phba->mbox_mem_pool); 644 } 645 646 /* Get Option rom version */ 647 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 648 if (!pmb) { 649 phba->link_state = LPFC_HBA_ERROR; 650 return -ENOMEM; 651 } 652 653 lpfc_dump_wakeup_param(phba, pmb); 654 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 655 pmb->vport = phba->pport; 656 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 657 658 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 660 "to get Option ROM version status x%x\n", rc); 661 mempool_free(pmb, phba->mbox_mem_pool); 662 } 663 664 return 0; 665 } 666 667 /** 668 * lpfc_hba_init_link - Initialize the FC link 669 * @phba: pointer to lpfc hba data structure. 670 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 671 * 672 * This routine will issue the INIT_LINK mailbox command call. 673 * It is available to other drivers through the lpfc_hba data 674 * structure for use as a delayed link up mechanism with the 675 * module parameter lpfc_suppress_link_up. 676 * 677 * Return code 678 * 0 - success 679 * Any other value - error 680 **/ 681 static int 682 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 683 { 684 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 685 } 686 687 /** 688 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 689 * @phba: pointer to lpfc hba data structure. 690 * @fc_topology: desired fc topology. 691 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 692 * 693 * This routine will issue the INIT_LINK mailbox command call. 694 * It is available to other drivers through the lpfc_hba data 695 * structure for use as a delayed link up mechanism with the 696 * module parameter lpfc_suppress_link_up. 697 * 698 * Return code 699 * 0 - success 700 * Any other value - error 701 **/ 702 int 703 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 704 uint32_t flag) 705 { 706 struct lpfc_vport *vport = phba->pport; 707 LPFC_MBOXQ_t *pmb; 708 MAILBOX_t *mb; 709 int rc; 710 711 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 712 if (!pmb) { 713 phba->link_state = LPFC_HBA_ERROR; 714 return -ENOMEM; 715 } 716 mb = &pmb->u.mb; 717 pmb->vport = vport; 718 719 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 721 !(phba->lmt & LMT_1Gb)) || 722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 723 !(phba->lmt & LMT_2Gb)) || 724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 725 !(phba->lmt & LMT_4Gb)) || 726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 727 !(phba->lmt & LMT_8Gb)) || 728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 729 !(phba->lmt & LMT_10Gb)) || 730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 731 !(phba->lmt & LMT_16Gb)) || 732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 733 !(phba->lmt & LMT_32Gb))) { 734 /* Reset link speed to auto */ 735 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 736 "1302 Invalid speed for this board:%d " 737 "Reset link speed to auto.\n", 738 phba->cfg_link_speed); 739 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 740 } 741 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 743 if (phba->sli_rev < LPFC_SLI_REV4) 744 lpfc_set_loopback_flag(phba); 745 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 746 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 748 "0498 Adapter failed to init, mbxCmd x%x " 749 "INIT_LINK, mbxStatus x%x\n", 750 mb->mbxCommand, mb->mbxStatus); 751 if (phba->sli_rev <= LPFC_SLI_REV3) { 752 /* Clear all interrupt enable conditions */ 753 writel(0, phba->HCregaddr); 754 readl(phba->HCregaddr); /* flush */ 755 /* Clear all pending interrupts */ 756 writel(0xffffffff, phba->HAregaddr); 757 readl(phba->HAregaddr); /* flush */ 758 } 759 phba->link_state = LPFC_HBA_ERROR; 760 if (rc != MBX_BUSY || flag == MBX_POLL) 761 mempool_free(pmb, phba->mbox_mem_pool); 762 return -EIO; 763 } 764 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 765 if (flag == MBX_POLL) 766 mempool_free(pmb, phba->mbox_mem_pool); 767 768 return 0; 769 } 770 771 /** 772 * lpfc_hba_down_link - this routine downs the FC link 773 * @phba: pointer to lpfc hba data structure. 774 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 775 * 776 * This routine will issue the DOWN_LINK mailbox command call. 777 * It is available to other drivers through the lpfc_hba data 778 * structure for use to stop the link. 779 * 780 * Return code 781 * 0 - success 782 * Any other value - error 783 **/ 784 static int 785 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 786 { 787 LPFC_MBOXQ_t *pmb; 788 int rc; 789 790 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 791 if (!pmb) { 792 phba->link_state = LPFC_HBA_ERROR; 793 return -ENOMEM; 794 } 795 796 lpfc_printf_log(phba, 797 KERN_ERR, LOG_INIT, 798 "0491 Adapter Link is disabled.\n"); 799 lpfc_down_link(phba, pmb); 800 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 801 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 802 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 803 lpfc_printf_log(phba, 804 KERN_ERR, LOG_INIT, 805 "2522 Adapter failed to issue DOWN_LINK" 806 " mbox command rc 0x%x\n", rc); 807 808 mempool_free(pmb, phba->mbox_mem_pool); 809 return -EIO; 810 } 811 if (flag == MBX_POLL) 812 mempool_free(pmb, phba->mbox_mem_pool); 813 814 return 0; 815 } 816 817 /** 818 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 819 * @phba: pointer to lpfc HBA data structure. 820 * 821 * This routine will do LPFC uninitialization before the HBA is reset when 822 * bringing down the SLI Layer. 823 * 824 * Return codes 825 * 0 - success. 826 * Any other value - error. 827 **/ 828 int 829 lpfc_hba_down_prep(struct lpfc_hba *phba) 830 { 831 struct lpfc_vport **vports; 832 int i; 833 834 if (phba->sli_rev <= LPFC_SLI_REV3) { 835 /* Disable interrupts */ 836 writel(0, phba->HCregaddr); 837 readl(phba->HCregaddr); /* flush */ 838 } 839 840 if (phba->pport->load_flag & FC_UNLOADING) 841 lpfc_cleanup_discovery_resources(phba->pport); 842 else { 843 vports = lpfc_create_vport_work_array(phba); 844 if (vports != NULL) 845 for (i = 0; i <= phba->max_vports && 846 vports[i] != NULL; i++) 847 lpfc_cleanup_discovery_resources(vports[i]); 848 lpfc_destroy_vport_work_array(phba, vports); 849 } 850 return 0; 851 } 852 853 /** 854 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 855 * rspiocb which got deferred 856 * 857 * @phba: pointer to lpfc HBA data structure. 858 * 859 * This routine will cleanup completed slow path events after HBA is reset 860 * when bringing down the SLI Layer. 861 * 862 * 863 * Return codes 864 * void. 865 **/ 866 static void 867 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 868 { 869 struct lpfc_iocbq *rspiocbq; 870 struct hbq_dmabuf *dmabuf; 871 struct lpfc_cq_event *cq_event; 872 873 spin_lock_irq(&phba->hbalock); 874 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 875 spin_unlock_irq(&phba->hbalock); 876 877 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 878 /* Get the response iocb from the head of work queue */ 879 spin_lock_irq(&phba->hbalock); 880 list_remove_head(&phba->sli4_hba.sp_queue_event, 881 cq_event, struct lpfc_cq_event, list); 882 spin_unlock_irq(&phba->hbalock); 883 884 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 885 case CQE_CODE_COMPL_WQE: 886 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 887 cq_event); 888 lpfc_sli_release_iocbq(phba, rspiocbq); 889 break; 890 case CQE_CODE_RECEIVE: 891 case CQE_CODE_RECEIVE_V1: 892 dmabuf = container_of(cq_event, struct hbq_dmabuf, 893 cq_event); 894 lpfc_in_buf_free(phba, &dmabuf->dbuf); 895 } 896 } 897 } 898 899 /** 900 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 901 * @phba: pointer to lpfc HBA data structure. 902 * 903 * This routine will cleanup posted ELS buffers after the HBA is reset 904 * when bringing down the SLI Layer. 905 * 906 * 907 * Return codes 908 * void. 909 **/ 910 static void 911 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 912 { 913 struct lpfc_sli *psli = &phba->sli; 914 struct lpfc_sli_ring *pring; 915 struct lpfc_dmabuf *mp, *next_mp; 916 LIST_HEAD(buflist); 917 int count; 918 919 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 920 lpfc_sli_hbqbuf_free_all(phba); 921 else { 922 /* Cleanup preposted buffers on the ELS ring */ 923 pring = &psli->sli3_ring[LPFC_ELS_RING]; 924 spin_lock_irq(&phba->hbalock); 925 list_splice_init(&pring->postbufq, &buflist); 926 spin_unlock_irq(&phba->hbalock); 927 928 count = 0; 929 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 930 list_del(&mp->list); 931 count++; 932 lpfc_mbuf_free(phba, mp->virt, mp->phys); 933 kfree(mp); 934 } 935 936 spin_lock_irq(&phba->hbalock); 937 pring->postbufq_cnt -= count; 938 spin_unlock_irq(&phba->hbalock); 939 } 940 } 941 942 /** 943 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 944 * @phba: pointer to lpfc HBA data structure. 945 * 946 * This routine will cleanup the txcmplq after the HBA is reset when bringing 947 * down the SLI Layer. 948 * 949 * Return codes 950 * void 951 **/ 952 static void 953 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 954 { 955 struct lpfc_sli *psli = &phba->sli; 956 struct lpfc_queue *qp = NULL; 957 struct lpfc_sli_ring *pring; 958 LIST_HEAD(completions); 959 int i; 960 961 if (phba->sli_rev != LPFC_SLI_REV4) { 962 for (i = 0; i < psli->num_rings; i++) { 963 pring = &psli->sli3_ring[i]; 964 spin_lock_irq(&phba->hbalock); 965 /* At this point in time the HBA is either reset or DOA 966 * Nothing should be on txcmplq as it will 967 * NEVER complete. 968 */ 969 list_splice_init(&pring->txcmplq, &completions); 970 pring->txcmplq_cnt = 0; 971 spin_unlock_irq(&phba->hbalock); 972 973 lpfc_sli_abort_iocb_ring(phba, pring); 974 } 975 /* Cancel all the IOCBs from the completions list */ 976 lpfc_sli_cancel_iocbs(phba, &completions, 977 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 978 return; 979 } 980 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 981 pring = qp->pring; 982 if (!pring) 983 continue; 984 spin_lock_irq(&pring->ring_lock); 985 list_splice_init(&pring->txcmplq, &completions); 986 pring->txcmplq_cnt = 0; 987 spin_unlock_irq(&pring->ring_lock); 988 lpfc_sli_abort_iocb_ring(phba, pring); 989 } 990 /* Cancel all the IOCBs from the completions list */ 991 lpfc_sli_cancel_iocbs(phba, &completions, 992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 993 } 994 995 /** 996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 997 int i; 998 * @phba: pointer to lpfc HBA data structure. 999 * 1000 * This routine will do uninitialization after the HBA is reset when bring 1001 * down the SLI Layer. 1002 * 1003 * Return codes 1004 * 0 - success. 1005 * Any other value - error. 1006 **/ 1007 static int 1008 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1009 { 1010 lpfc_hba_free_post_buf(phba); 1011 lpfc_hba_clean_txcmplq(phba); 1012 return 0; 1013 } 1014 1015 /** 1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1017 * @phba: pointer to lpfc HBA data structure. 1018 * 1019 * This routine will do uninitialization after the HBA is reset when bring 1020 * down the SLI Layer. 1021 * 1022 * Return codes 1023 * 0 - success. 1024 * Any other value - error. 1025 **/ 1026 static int 1027 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1028 { 1029 struct lpfc_scsi_buf *psb, *psb_next; 1030 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1031 LIST_HEAD(aborts); 1032 LIST_HEAD(nvme_aborts); 1033 LIST_HEAD(nvmet_aborts); 1034 unsigned long iflag = 0; 1035 struct lpfc_sglq *sglq_entry = NULL; 1036 1037 1038 lpfc_sli_hbqbuf_free_all(phba); 1039 lpfc_hba_clean_txcmplq(phba); 1040 1041 /* At this point in time the HBA is either reset or DOA. Either 1042 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1043 * on the lpfc_els_sgl_list so that it can either be freed if the 1044 * driver is unloading or reposted if the driver is restarting 1045 * the port. 1046 */ 1047 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1048 /* scsl_buf_list */ 1049 /* sgl_list_lock required because worker thread uses this 1050 * list. 1051 */ 1052 spin_lock(&phba->sli4_hba.sgl_list_lock); 1053 list_for_each_entry(sglq_entry, 1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1055 sglq_entry->state = SGL_FREED; 1056 1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1058 &phba->sli4_hba.lpfc_els_sgl_list); 1059 1060 1061 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1062 /* abts_scsi_buf_list_lock required because worker thread uses this 1063 * list. 1064 */ 1065 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 1066 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1067 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1068 &aborts); 1069 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1070 } 1071 1072 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1073 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1074 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, 1075 &nvme_aborts); 1076 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1077 &nvmet_aborts); 1078 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1079 } 1080 1081 spin_unlock_irq(&phba->hbalock); 1082 1083 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1084 psb->pCmd = NULL; 1085 psb->status = IOSTAT_SUCCESS; 1086 } 1087 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1088 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1089 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1090 1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1092 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { 1093 psb->pCmd = NULL; 1094 psb->status = IOSTAT_SUCCESS; 1095 } 1096 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); 1097 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); 1098 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); 1099 1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1102 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1103 } 1104 } 1105 1106 lpfc_sli4_free_sp_events(phba); 1107 return 0; 1108 } 1109 1110 /** 1111 * lpfc_hba_down_post - Wrapper func for hba down post routine 1112 * @phba: pointer to lpfc HBA data structure. 1113 * 1114 * This routine wraps the actual SLI3 or SLI4 routine for performing 1115 * uninitialization after the HBA is reset when bring down the SLI Layer. 1116 * 1117 * Return codes 1118 * 0 - success. 1119 * Any other value - error. 1120 **/ 1121 int 1122 lpfc_hba_down_post(struct lpfc_hba *phba) 1123 { 1124 return (*phba->lpfc_hba_down_post)(phba); 1125 } 1126 1127 /** 1128 * lpfc_hb_timeout - The HBA-timer timeout handler 1129 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1130 * 1131 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1132 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1133 * work-port-events bitmap and the worker thread is notified. This timeout 1134 * event will be used by the worker thread to invoke the actual timeout 1135 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1136 * be performed in the timeout handler and the HBA timeout event bit shall 1137 * be cleared by the worker thread after it has taken the event bitmap out. 1138 **/ 1139 static void 1140 lpfc_hb_timeout(unsigned long ptr) 1141 { 1142 struct lpfc_hba *phba; 1143 uint32_t tmo_posted; 1144 unsigned long iflag; 1145 1146 phba = (struct lpfc_hba *)ptr; 1147 1148 /* Check for heart beat timeout conditions */ 1149 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1150 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1151 if (!tmo_posted) 1152 phba->pport->work_port_events |= WORKER_HB_TMO; 1153 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1154 1155 /* Tell the worker thread there is work to do */ 1156 if (!tmo_posted) 1157 lpfc_worker_wake_up(phba); 1158 return; 1159 } 1160 1161 /** 1162 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1163 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1164 * 1165 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1166 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1167 * work-port-events bitmap and the worker thread is notified. This timeout 1168 * event will be used by the worker thread to invoke the actual timeout 1169 * handler routine, lpfc_rrq_handler. Any periodical operations will 1170 * be performed in the timeout handler and the RRQ timeout event bit shall 1171 * be cleared by the worker thread after it has taken the event bitmap out. 1172 **/ 1173 static void 1174 lpfc_rrq_timeout(unsigned long ptr) 1175 { 1176 struct lpfc_hba *phba; 1177 unsigned long iflag; 1178 1179 phba = (struct lpfc_hba *)ptr; 1180 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1181 if (!(phba->pport->load_flag & FC_UNLOADING)) 1182 phba->hba_flag |= HBA_RRQ_ACTIVE; 1183 else 1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1185 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1186 1187 if (!(phba->pport->load_flag & FC_UNLOADING)) 1188 lpfc_worker_wake_up(phba); 1189 } 1190 1191 /** 1192 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1193 * @phba: pointer to lpfc hba data structure. 1194 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1195 * 1196 * This is the callback function to the lpfc heart-beat mailbox command. 1197 * If configured, the lpfc driver issues the heart-beat mailbox command to 1198 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1199 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1200 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1201 * heart-beat outstanding state. Once the mailbox command comes back and 1202 * no error conditions detected, the heart-beat mailbox command timer is 1203 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1204 * state is cleared for the next heart-beat. If the timer expired with the 1205 * heart-beat outstanding state set, the driver will put the HBA offline. 1206 **/ 1207 static void 1208 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1209 { 1210 unsigned long drvr_flag; 1211 1212 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1213 phba->hb_outstanding = 0; 1214 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1215 1216 /* Check and reset heart-beat timer is necessary */ 1217 mempool_free(pmboxq, phba->mbox_mem_pool); 1218 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1219 !(phba->link_state == LPFC_HBA_ERROR) && 1220 !(phba->pport->load_flag & FC_UNLOADING)) 1221 mod_timer(&phba->hb_tmofunc, 1222 jiffies + 1223 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1224 return; 1225 } 1226 1227 /** 1228 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1229 * @phba: pointer to lpfc hba data structure. 1230 * 1231 * This is the actual HBA-timer timeout handler to be invoked by the worker 1232 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1233 * handler performs any periodic operations needed for the device. If such 1234 * periodic event has already been attended to either in the interrupt handler 1235 * or by processing slow-ring or fast-ring events within the HBA-timer 1236 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1237 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1238 * is configured and there is no heart-beat mailbox command outstanding, a 1239 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1240 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1241 * to offline. 1242 **/ 1243 void 1244 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1245 { 1246 struct lpfc_vport **vports; 1247 LPFC_MBOXQ_t *pmboxq; 1248 struct lpfc_dmabuf *buf_ptr; 1249 int retval, i; 1250 struct lpfc_sli *psli = &phba->sli; 1251 LIST_HEAD(completions); 1252 1253 vports = lpfc_create_vport_work_array(phba); 1254 if (vports != NULL) 1255 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1256 lpfc_rcv_seq_check_edtov(vports[i]); 1257 lpfc_fdmi_num_disc_check(vports[i]); 1258 } 1259 lpfc_destroy_vport_work_array(phba, vports); 1260 1261 if ((phba->link_state == LPFC_HBA_ERROR) || 1262 (phba->pport->load_flag & FC_UNLOADING) || 1263 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1264 return; 1265 1266 spin_lock_irq(&phba->pport->work_port_lock); 1267 1268 if (time_after(phba->last_completion_time + 1269 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1270 jiffies)) { 1271 spin_unlock_irq(&phba->pport->work_port_lock); 1272 if (!phba->hb_outstanding) 1273 mod_timer(&phba->hb_tmofunc, 1274 jiffies + 1275 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1276 else 1277 mod_timer(&phba->hb_tmofunc, 1278 jiffies + 1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1280 return; 1281 } 1282 spin_unlock_irq(&phba->pport->work_port_lock); 1283 1284 if (phba->elsbuf_cnt && 1285 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1286 spin_lock_irq(&phba->hbalock); 1287 list_splice_init(&phba->elsbuf, &completions); 1288 phba->elsbuf_cnt = 0; 1289 phba->elsbuf_prev_cnt = 0; 1290 spin_unlock_irq(&phba->hbalock); 1291 1292 while (!list_empty(&completions)) { 1293 list_remove_head(&completions, buf_ptr, 1294 struct lpfc_dmabuf, list); 1295 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1296 kfree(buf_ptr); 1297 } 1298 } 1299 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1300 1301 /* If there is no heart beat outstanding, issue a heartbeat command */ 1302 if (phba->cfg_enable_hba_heartbeat) { 1303 if (!phba->hb_outstanding) { 1304 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1305 (list_empty(&psli->mboxq))) { 1306 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1307 GFP_KERNEL); 1308 if (!pmboxq) { 1309 mod_timer(&phba->hb_tmofunc, 1310 jiffies + 1311 msecs_to_jiffies(1000 * 1312 LPFC_HB_MBOX_INTERVAL)); 1313 return; 1314 } 1315 1316 lpfc_heart_beat(phba, pmboxq); 1317 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1318 pmboxq->vport = phba->pport; 1319 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1320 MBX_NOWAIT); 1321 1322 if (retval != MBX_BUSY && 1323 retval != MBX_SUCCESS) { 1324 mempool_free(pmboxq, 1325 phba->mbox_mem_pool); 1326 mod_timer(&phba->hb_tmofunc, 1327 jiffies + 1328 msecs_to_jiffies(1000 * 1329 LPFC_HB_MBOX_INTERVAL)); 1330 return; 1331 } 1332 phba->skipped_hb = 0; 1333 phba->hb_outstanding = 1; 1334 } else if (time_before_eq(phba->last_completion_time, 1335 phba->skipped_hb)) { 1336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1337 "2857 Last completion time not " 1338 " updated in %d ms\n", 1339 jiffies_to_msecs(jiffies 1340 - phba->last_completion_time)); 1341 } else 1342 phba->skipped_hb = jiffies; 1343 1344 mod_timer(&phba->hb_tmofunc, 1345 jiffies + 1346 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1347 return; 1348 } else { 1349 /* 1350 * If heart beat timeout called with hb_outstanding set 1351 * we need to give the hb mailbox cmd a chance to 1352 * complete or TMO. 1353 */ 1354 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1355 "0459 Adapter heartbeat still out" 1356 "standing:last compl time was %d ms.\n", 1357 jiffies_to_msecs(jiffies 1358 - phba->last_completion_time)); 1359 mod_timer(&phba->hb_tmofunc, 1360 jiffies + 1361 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1362 } 1363 } else { 1364 mod_timer(&phba->hb_tmofunc, 1365 jiffies + 1366 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1367 } 1368 } 1369 1370 /** 1371 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1372 * @phba: pointer to lpfc hba data structure. 1373 * 1374 * This routine is called to bring the HBA offline when HBA hardware error 1375 * other than Port Error 6 has been detected. 1376 **/ 1377 static void 1378 lpfc_offline_eratt(struct lpfc_hba *phba) 1379 { 1380 struct lpfc_sli *psli = &phba->sli; 1381 1382 spin_lock_irq(&phba->hbalock); 1383 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1384 spin_unlock_irq(&phba->hbalock); 1385 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1386 1387 lpfc_offline(phba); 1388 lpfc_reset_barrier(phba); 1389 spin_lock_irq(&phba->hbalock); 1390 lpfc_sli_brdreset(phba); 1391 spin_unlock_irq(&phba->hbalock); 1392 lpfc_hba_down_post(phba); 1393 lpfc_sli_brdready(phba, HS_MBRDY); 1394 lpfc_unblock_mgmt_io(phba); 1395 phba->link_state = LPFC_HBA_ERROR; 1396 return; 1397 } 1398 1399 /** 1400 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1401 * @phba: pointer to lpfc hba data structure. 1402 * 1403 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1404 * other than Port Error 6 has been detected. 1405 **/ 1406 void 1407 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1408 { 1409 spin_lock_irq(&phba->hbalock); 1410 phba->link_state = LPFC_HBA_ERROR; 1411 spin_unlock_irq(&phba->hbalock); 1412 1413 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1414 lpfc_offline(phba); 1415 lpfc_hba_down_post(phba); 1416 lpfc_unblock_mgmt_io(phba); 1417 } 1418 1419 /** 1420 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1421 * @phba: pointer to lpfc hba data structure. 1422 * 1423 * This routine is invoked to handle the deferred HBA hardware error 1424 * conditions. This type of error is indicated by HBA by setting ER1 1425 * and another ER bit in the host status register. The driver will 1426 * wait until the ER1 bit clears before handling the error condition. 1427 **/ 1428 static void 1429 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1430 { 1431 uint32_t old_host_status = phba->work_hs; 1432 struct lpfc_sli *psli = &phba->sli; 1433 1434 /* If the pci channel is offline, ignore possible errors, 1435 * since we cannot communicate with the pci card anyway. 1436 */ 1437 if (pci_channel_offline(phba->pcidev)) { 1438 spin_lock_irq(&phba->hbalock); 1439 phba->hba_flag &= ~DEFER_ERATT; 1440 spin_unlock_irq(&phba->hbalock); 1441 return; 1442 } 1443 1444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1445 "0479 Deferred Adapter Hardware Error " 1446 "Data: x%x x%x x%x\n", 1447 phba->work_hs, 1448 phba->work_status[0], phba->work_status[1]); 1449 1450 spin_lock_irq(&phba->hbalock); 1451 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1452 spin_unlock_irq(&phba->hbalock); 1453 1454 1455 /* 1456 * Firmware stops when it triggred erratt. That could cause the I/Os 1457 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1458 * SCSI layer retry it after re-establishing link. 1459 */ 1460 lpfc_sli_abort_fcp_rings(phba); 1461 1462 /* 1463 * There was a firmware error. Take the hba offline and then 1464 * attempt to restart it. 1465 */ 1466 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1467 lpfc_offline(phba); 1468 1469 /* Wait for the ER1 bit to clear.*/ 1470 while (phba->work_hs & HS_FFER1) { 1471 msleep(100); 1472 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1473 phba->work_hs = UNPLUG_ERR ; 1474 break; 1475 } 1476 /* If driver is unloading let the worker thread continue */ 1477 if (phba->pport->load_flag & FC_UNLOADING) { 1478 phba->work_hs = 0; 1479 break; 1480 } 1481 } 1482 1483 /* 1484 * This is to ptrotect against a race condition in which 1485 * first write to the host attention register clear the 1486 * host status register. 1487 */ 1488 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1489 phba->work_hs = old_host_status & ~HS_FFER1; 1490 1491 spin_lock_irq(&phba->hbalock); 1492 phba->hba_flag &= ~DEFER_ERATT; 1493 spin_unlock_irq(&phba->hbalock); 1494 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1495 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1496 } 1497 1498 static void 1499 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1500 { 1501 struct lpfc_board_event_header board_event; 1502 struct Scsi_Host *shost; 1503 1504 board_event.event_type = FC_REG_BOARD_EVENT; 1505 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1506 shost = lpfc_shost_from_vport(phba->pport); 1507 fc_host_post_vendor_event(shost, fc_get_event_number(), 1508 sizeof(board_event), 1509 (char *) &board_event, 1510 LPFC_NL_VENDOR_ID); 1511 } 1512 1513 /** 1514 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1515 * @phba: pointer to lpfc hba data structure. 1516 * 1517 * This routine is invoked to handle the following HBA hardware error 1518 * conditions: 1519 * 1 - HBA error attention interrupt 1520 * 2 - DMA ring index out of range 1521 * 3 - Mailbox command came back as unknown 1522 **/ 1523 static void 1524 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1525 { 1526 struct lpfc_vport *vport = phba->pport; 1527 struct lpfc_sli *psli = &phba->sli; 1528 uint32_t event_data; 1529 unsigned long temperature; 1530 struct temp_event temp_event_data; 1531 struct Scsi_Host *shost; 1532 1533 /* If the pci channel is offline, ignore possible errors, 1534 * since we cannot communicate with the pci card anyway. 1535 */ 1536 if (pci_channel_offline(phba->pcidev)) { 1537 spin_lock_irq(&phba->hbalock); 1538 phba->hba_flag &= ~DEFER_ERATT; 1539 spin_unlock_irq(&phba->hbalock); 1540 return; 1541 } 1542 1543 /* If resets are disabled then leave the HBA alone and return */ 1544 if (!phba->cfg_enable_hba_reset) 1545 return; 1546 1547 /* Send an internal error event to mgmt application */ 1548 lpfc_board_errevt_to_mgmt(phba); 1549 1550 if (phba->hba_flag & DEFER_ERATT) 1551 lpfc_handle_deferred_eratt(phba); 1552 1553 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1554 if (phba->work_hs & HS_FFER6) 1555 /* Re-establishing Link */ 1556 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1557 "1301 Re-establishing Link " 1558 "Data: x%x x%x x%x\n", 1559 phba->work_hs, phba->work_status[0], 1560 phba->work_status[1]); 1561 if (phba->work_hs & HS_FFER8) 1562 /* Device Zeroization */ 1563 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1564 "2861 Host Authentication device " 1565 "zeroization Data:x%x x%x x%x\n", 1566 phba->work_hs, phba->work_status[0], 1567 phba->work_status[1]); 1568 1569 spin_lock_irq(&phba->hbalock); 1570 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1571 spin_unlock_irq(&phba->hbalock); 1572 1573 /* 1574 * Firmware stops when it triggled erratt with HS_FFER6. 1575 * That could cause the I/Os dropped by the firmware. 1576 * Error iocb (I/O) on txcmplq and let the SCSI layer 1577 * retry it after re-establishing link. 1578 */ 1579 lpfc_sli_abort_fcp_rings(phba); 1580 1581 /* 1582 * There was a firmware error. Take the hba offline and then 1583 * attempt to restart it. 1584 */ 1585 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1586 lpfc_offline(phba); 1587 lpfc_sli_brdrestart(phba); 1588 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1589 lpfc_unblock_mgmt_io(phba); 1590 return; 1591 } 1592 lpfc_unblock_mgmt_io(phba); 1593 } else if (phba->work_hs & HS_CRIT_TEMP) { 1594 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1595 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1596 temp_event_data.event_code = LPFC_CRIT_TEMP; 1597 temp_event_data.data = (uint32_t)temperature; 1598 1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1600 "0406 Adapter maximum temperature exceeded " 1601 "(%ld), taking this port offline " 1602 "Data: x%x x%x x%x\n", 1603 temperature, phba->work_hs, 1604 phba->work_status[0], phba->work_status[1]); 1605 1606 shost = lpfc_shost_from_vport(phba->pport); 1607 fc_host_post_vendor_event(shost, fc_get_event_number(), 1608 sizeof(temp_event_data), 1609 (char *) &temp_event_data, 1610 SCSI_NL_VID_TYPE_PCI 1611 | PCI_VENDOR_ID_EMULEX); 1612 1613 spin_lock_irq(&phba->hbalock); 1614 phba->over_temp_state = HBA_OVER_TEMP; 1615 spin_unlock_irq(&phba->hbalock); 1616 lpfc_offline_eratt(phba); 1617 1618 } else { 1619 /* The if clause above forces this code path when the status 1620 * failure is a value other than FFER6. Do not call the offline 1621 * twice. This is the adapter hardware error path. 1622 */ 1623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1624 "0457 Adapter Hardware Error " 1625 "Data: x%x x%x x%x\n", 1626 phba->work_hs, 1627 phba->work_status[0], phba->work_status[1]); 1628 1629 event_data = FC_REG_DUMP_EVENT; 1630 shost = lpfc_shost_from_vport(vport); 1631 fc_host_post_vendor_event(shost, fc_get_event_number(), 1632 sizeof(event_data), (char *) &event_data, 1633 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1634 1635 lpfc_offline_eratt(phba); 1636 } 1637 return; 1638 } 1639 1640 /** 1641 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1642 * @phba: pointer to lpfc hba data structure. 1643 * @mbx_action: flag for mailbox shutdown action. 1644 * 1645 * This routine is invoked to perform an SLI4 port PCI function reset in 1646 * response to port status register polling attention. It waits for port 1647 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1648 * During this process, interrupt vectors are freed and later requested 1649 * for handling possible port resource change. 1650 **/ 1651 static int 1652 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1653 bool en_rn_msg) 1654 { 1655 int rc; 1656 uint32_t intr_mode; 1657 1658 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1659 LPFC_SLI_INTF_IF_TYPE_2) { 1660 /* 1661 * On error status condition, driver need to wait for port 1662 * ready before performing reset. 1663 */ 1664 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1665 if (rc) 1666 return rc; 1667 } 1668 1669 /* need reset: attempt for port recovery */ 1670 if (en_rn_msg) 1671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1672 "2887 Reset Needed: Attempting Port " 1673 "Recovery...\n"); 1674 lpfc_offline_prep(phba, mbx_action); 1675 lpfc_offline(phba); 1676 /* release interrupt for possible resource change */ 1677 lpfc_sli4_disable_intr(phba); 1678 lpfc_sli_brdrestart(phba); 1679 /* request and enable interrupt */ 1680 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1681 if (intr_mode == LPFC_INTR_ERROR) { 1682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1683 "3175 Failed to enable interrupt\n"); 1684 return -EIO; 1685 } 1686 phba->intr_mode = intr_mode; 1687 rc = lpfc_online(phba); 1688 if (rc == 0) 1689 lpfc_unblock_mgmt_io(phba); 1690 1691 return rc; 1692 } 1693 1694 /** 1695 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1696 * @phba: pointer to lpfc hba data structure. 1697 * 1698 * This routine is invoked to handle the SLI4 HBA hardware error attention 1699 * conditions. 1700 **/ 1701 static void 1702 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1703 { 1704 struct lpfc_vport *vport = phba->pport; 1705 uint32_t event_data; 1706 struct Scsi_Host *shost; 1707 uint32_t if_type; 1708 struct lpfc_register portstat_reg = {0}; 1709 uint32_t reg_err1, reg_err2; 1710 uint32_t uerrlo_reg, uemasklo_reg; 1711 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1712 bool en_rn_msg = true; 1713 struct temp_event temp_event_data; 1714 struct lpfc_register portsmphr_reg; 1715 int rc, i; 1716 1717 /* If the pci channel is offline, ignore possible errors, since 1718 * we cannot communicate with the pci card anyway. 1719 */ 1720 if (pci_channel_offline(phba->pcidev)) 1721 return; 1722 1723 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1724 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1725 switch (if_type) { 1726 case LPFC_SLI_INTF_IF_TYPE_0: 1727 pci_rd_rc1 = lpfc_readl( 1728 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1729 &uerrlo_reg); 1730 pci_rd_rc2 = lpfc_readl( 1731 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1732 &uemasklo_reg); 1733 /* consider PCI bus read error as pci_channel_offline */ 1734 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1735 return; 1736 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1737 lpfc_sli4_offline_eratt(phba); 1738 return; 1739 } 1740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1741 "7623 Checking UE recoverable"); 1742 1743 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1744 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1745 &portsmphr_reg.word0)) 1746 continue; 1747 1748 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1749 &portsmphr_reg); 1750 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1751 LPFC_PORT_SEM_UE_RECOVERABLE) 1752 break; 1753 /*Sleep for 1Sec, before checking SEMAPHORE */ 1754 msleep(1000); 1755 } 1756 1757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1758 "4827 smphr_port_status x%x : Waited %dSec", 1759 smphr_port_status, i); 1760 1761 /* Recoverable UE, reset the HBA device */ 1762 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1763 LPFC_PORT_SEM_UE_RECOVERABLE) { 1764 for (i = 0; i < 20; i++) { 1765 msleep(1000); 1766 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1767 &portsmphr_reg.word0) && 1768 (LPFC_POST_STAGE_PORT_READY == 1769 bf_get(lpfc_port_smphr_port_status, 1770 &portsmphr_reg))) { 1771 rc = lpfc_sli4_port_sta_fn_reset(phba, 1772 LPFC_MBX_NO_WAIT, en_rn_msg); 1773 if (rc == 0) 1774 return; 1775 lpfc_printf_log(phba, 1776 KERN_ERR, LOG_INIT, 1777 "4215 Failed to recover UE"); 1778 break; 1779 } 1780 } 1781 } 1782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1783 "7624 Firmware not ready: Failing UE recovery," 1784 " waited %dSec", i); 1785 lpfc_sli4_offline_eratt(phba); 1786 break; 1787 1788 case LPFC_SLI_INTF_IF_TYPE_2: 1789 pci_rd_rc1 = lpfc_readl( 1790 phba->sli4_hba.u.if_type2.STATUSregaddr, 1791 &portstat_reg.word0); 1792 /* consider PCI bus read error as pci_channel_offline */ 1793 if (pci_rd_rc1 == -EIO) { 1794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1795 "3151 PCI bus read access failure: x%x\n", 1796 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1797 return; 1798 } 1799 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1800 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1801 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1803 "2889 Port Overtemperature event, " 1804 "taking port offline Data: x%x x%x\n", 1805 reg_err1, reg_err2); 1806 1807 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1808 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1809 temp_event_data.event_code = LPFC_CRIT_TEMP; 1810 temp_event_data.data = 0xFFFFFFFF; 1811 1812 shost = lpfc_shost_from_vport(phba->pport); 1813 fc_host_post_vendor_event(shost, fc_get_event_number(), 1814 sizeof(temp_event_data), 1815 (char *)&temp_event_data, 1816 SCSI_NL_VID_TYPE_PCI 1817 | PCI_VENDOR_ID_EMULEX); 1818 1819 spin_lock_irq(&phba->hbalock); 1820 phba->over_temp_state = HBA_OVER_TEMP; 1821 spin_unlock_irq(&phba->hbalock); 1822 lpfc_sli4_offline_eratt(phba); 1823 return; 1824 } 1825 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1826 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1828 "3143 Port Down: Firmware Update " 1829 "Detected\n"); 1830 en_rn_msg = false; 1831 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1832 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1834 "3144 Port Down: Debug Dump\n"); 1835 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1836 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1838 "3145 Port Down: Provisioning\n"); 1839 1840 /* If resets are disabled then leave the HBA alone and return */ 1841 if (!phba->cfg_enable_hba_reset) 1842 return; 1843 1844 /* Check port status register for function reset */ 1845 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1846 en_rn_msg); 1847 if (rc == 0) { 1848 /* don't report event on forced debug dump */ 1849 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1850 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1851 return; 1852 else 1853 break; 1854 } 1855 /* fall through for not able to recover */ 1856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1857 "3152 Unrecoverable error, bring the port " 1858 "offline\n"); 1859 lpfc_sli4_offline_eratt(phba); 1860 break; 1861 case LPFC_SLI_INTF_IF_TYPE_1: 1862 default: 1863 break; 1864 } 1865 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1866 "3123 Report dump event to upper layer\n"); 1867 /* Send an internal error event to mgmt application */ 1868 lpfc_board_errevt_to_mgmt(phba); 1869 1870 event_data = FC_REG_DUMP_EVENT; 1871 shost = lpfc_shost_from_vport(vport); 1872 fc_host_post_vendor_event(shost, fc_get_event_number(), 1873 sizeof(event_data), (char *) &event_data, 1874 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1875 } 1876 1877 /** 1878 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1879 * @phba: pointer to lpfc HBA data structure. 1880 * 1881 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1882 * routine from the API jump table function pointer from the lpfc_hba struct. 1883 * 1884 * Return codes 1885 * 0 - success. 1886 * Any other value - error. 1887 **/ 1888 void 1889 lpfc_handle_eratt(struct lpfc_hba *phba) 1890 { 1891 (*phba->lpfc_handle_eratt)(phba); 1892 } 1893 1894 /** 1895 * lpfc_handle_latt - The HBA link event handler 1896 * @phba: pointer to lpfc hba data structure. 1897 * 1898 * This routine is invoked from the worker thread to handle a HBA host 1899 * attention link event. SLI3 only. 1900 **/ 1901 void 1902 lpfc_handle_latt(struct lpfc_hba *phba) 1903 { 1904 struct lpfc_vport *vport = phba->pport; 1905 struct lpfc_sli *psli = &phba->sli; 1906 LPFC_MBOXQ_t *pmb; 1907 volatile uint32_t control; 1908 struct lpfc_dmabuf *mp; 1909 int rc = 0; 1910 1911 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1912 if (!pmb) { 1913 rc = 1; 1914 goto lpfc_handle_latt_err_exit; 1915 } 1916 1917 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1918 if (!mp) { 1919 rc = 2; 1920 goto lpfc_handle_latt_free_pmb; 1921 } 1922 1923 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1924 if (!mp->virt) { 1925 rc = 3; 1926 goto lpfc_handle_latt_free_mp; 1927 } 1928 1929 /* Cleanup any outstanding ELS commands */ 1930 lpfc_els_flush_all_cmd(phba); 1931 1932 psli->slistat.link_event++; 1933 lpfc_read_topology(phba, pmb, mp); 1934 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1935 pmb->vport = vport; 1936 /* Block ELS IOCBs until we have processed this mbox command */ 1937 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1938 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1939 if (rc == MBX_NOT_FINISHED) { 1940 rc = 4; 1941 goto lpfc_handle_latt_free_mbuf; 1942 } 1943 1944 /* Clear Link Attention in HA REG */ 1945 spin_lock_irq(&phba->hbalock); 1946 writel(HA_LATT, phba->HAregaddr); 1947 readl(phba->HAregaddr); /* flush */ 1948 spin_unlock_irq(&phba->hbalock); 1949 1950 return; 1951 1952 lpfc_handle_latt_free_mbuf: 1953 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1954 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1955 lpfc_handle_latt_free_mp: 1956 kfree(mp); 1957 lpfc_handle_latt_free_pmb: 1958 mempool_free(pmb, phba->mbox_mem_pool); 1959 lpfc_handle_latt_err_exit: 1960 /* Enable Link attention interrupts */ 1961 spin_lock_irq(&phba->hbalock); 1962 psli->sli_flag |= LPFC_PROCESS_LA; 1963 control = readl(phba->HCregaddr); 1964 control |= HC_LAINT_ENA; 1965 writel(control, phba->HCregaddr); 1966 readl(phba->HCregaddr); /* flush */ 1967 1968 /* Clear Link Attention in HA REG */ 1969 writel(HA_LATT, phba->HAregaddr); 1970 readl(phba->HAregaddr); /* flush */ 1971 spin_unlock_irq(&phba->hbalock); 1972 lpfc_linkdown(phba); 1973 phba->link_state = LPFC_HBA_ERROR; 1974 1975 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1976 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1977 1978 return; 1979 } 1980 1981 /** 1982 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1983 * @phba: pointer to lpfc hba data structure. 1984 * @vpd: pointer to the vital product data. 1985 * @len: length of the vital product data in bytes. 1986 * 1987 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1988 * an array of characters. In this routine, the ModelName, ProgramType, and 1989 * ModelDesc, etc. fields of the phba data structure will be populated. 1990 * 1991 * Return codes 1992 * 0 - pointer to the VPD passed in is NULL 1993 * 1 - success 1994 **/ 1995 int 1996 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1997 { 1998 uint8_t lenlo, lenhi; 1999 int Length; 2000 int i, j; 2001 int finished = 0; 2002 int index = 0; 2003 2004 if (!vpd) 2005 return 0; 2006 2007 /* Vital Product */ 2008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2009 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2010 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2011 (uint32_t) vpd[3]); 2012 while (!finished && (index < (len - 4))) { 2013 switch (vpd[index]) { 2014 case 0x82: 2015 case 0x91: 2016 index += 1; 2017 lenlo = vpd[index]; 2018 index += 1; 2019 lenhi = vpd[index]; 2020 index += 1; 2021 i = ((((unsigned short)lenhi) << 8) + lenlo); 2022 index += i; 2023 break; 2024 case 0x90: 2025 index += 1; 2026 lenlo = vpd[index]; 2027 index += 1; 2028 lenhi = vpd[index]; 2029 index += 1; 2030 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2031 if (Length > len - index) 2032 Length = len - index; 2033 while (Length > 0) { 2034 /* Look for Serial Number */ 2035 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2036 index += 2; 2037 i = vpd[index]; 2038 index += 1; 2039 j = 0; 2040 Length -= (3+i); 2041 while(i--) { 2042 phba->SerialNumber[j++] = vpd[index++]; 2043 if (j == 31) 2044 break; 2045 } 2046 phba->SerialNumber[j] = 0; 2047 continue; 2048 } 2049 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2050 phba->vpd_flag |= VPD_MODEL_DESC; 2051 index += 2; 2052 i = vpd[index]; 2053 index += 1; 2054 j = 0; 2055 Length -= (3+i); 2056 while(i--) { 2057 phba->ModelDesc[j++] = vpd[index++]; 2058 if (j == 255) 2059 break; 2060 } 2061 phba->ModelDesc[j] = 0; 2062 continue; 2063 } 2064 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2065 phba->vpd_flag |= VPD_MODEL_NAME; 2066 index += 2; 2067 i = vpd[index]; 2068 index += 1; 2069 j = 0; 2070 Length -= (3+i); 2071 while(i--) { 2072 phba->ModelName[j++] = vpd[index++]; 2073 if (j == 79) 2074 break; 2075 } 2076 phba->ModelName[j] = 0; 2077 continue; 2078 } 2079 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2080 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2081 index += 2; 2082 i = vpd[index]; 2083 index += 1; 2084 j = 0; 2085 Length -= (3+i); 2086 while(i--) { 2087 phba->ProgramType[j++] = vpd[index++]; 2088 if (j == 255) 2089 break; 2090 } 2091 phba->ProgramType[j] = 0; 2092 continue; 2093 } 2094 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2095 phba->vpd_flag |= VPD_PORT; 2096 index += 2; 2097 i = vpd[index]; 2098 index += 1; 2099 j = 0; 2100 Length -= (3+i); 2101 while(i--) { 2102 if ((phba->sli_rev == LPFC_SLI_REV4) && 2103 (phba->sli4_hba.pport_name_sta == 2104 LPFC_SLI4_PPNAME_GET)) { 2105 j++; 2106 index++; 2107 } else 2108 phba->Port[j++] = vpd[index++]; 2109 if (j == 19) 2110 break; 2111 } 2112 if ((phba->sli_rev != LPFC_SLI_REV4) || 2113 (phba->sli4_hba.pport_name_sta == 2114 LPFC_SLI4_PPNAME_NON)) 2115 phba->Port[j] = 0; 2116 continue; 2117 } 2118 else { 2119 index += 2; 2120 i = vpd[index]; 2121 index += 1; 2122 index += i; 2123 Length -= (3 + i); 2124 } 2125 } 2126 finished = 0; 2127 break; 2128 case 0x78: 2129 finished = 1; 2130 break; 2131 default: 2132 index ++; 2133 break; 2134 } 2135 } 2136 2137 return(1); 2138 } 2139 2140 /** 2141 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2142 * @phba: pointer to lpfc hba data structure. 2143 * @mdp: pointer to the data structure to hold the derived model name. 2144 * @descp: pointer to the data structure to hold the derived description. 2145 * 2146 * This routine retrieves HBA's description based on its registered PCI device 2147 * ID. The @descp passed into this function points to an array of 256 chars. It 2148 * shall be returned with the model name, maximum speed, and the host bus type. 2149 * The @mdp passed into this function points to an array of 80 chars. When the 2150 * function returns, the @mdp will be filled with the model name. 2151 **/ 2152 static void 2153 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2154 { 2155 lpfc_vpd_t *vp; 2156 uint16_t dev_id = phba->pcidev->device; 2157 int max_speed; 2158 int GE = 0; 2159 int oneConnect = 0; /* default is not a oneConnect */ 2160 struct { 2161 char *name; 2162 char *bus; 2163 char *function; 2164 } m = {"<Unknown>", "", ""}; 2165 2166 if (mdp && mdp[0] != '\0' 2167 && descp && descp[0] != '\0') 2168 return; 2169 2170 if (phba->lmt & LMT_32Gb) 2171 max_speed = 32; 2172 else if (phba->lmt & LMT_16Gb) 2173 max_speed = 16; 2174 else if (phba->lmt & LMT_10Gb) 2175 max_speed = 10; 2176 else if (phba->lmt & LMT_8Gb) 2177 max_speed = 8; 2178 else if (phba->lmt & LMT_4Gb) 2179 max_speed = 4; 2180 else if (phba->lmt & LMT_2Gb) 2181 max_speed = 2; 2182 else if (phba->lmt & LMT_1Gb) 2183 max_speed = 1; 2184 else 2185 max_speed = 0; 2186 2187 vp = &phba->vpd; 2188 2189 switch (dev_id) { 2190 case PCI_DEVICE_ID_FIREFLY: 2191 m = (typeof(m)){"LP6000", "PCI", 2192 "Obsolete, Unsupported Fibre Channel Adapter"}; 2193 break; 2194 case PCI_DEVICE_ID_SUPERFLY: 2195 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2196 m = (typeof(m)){"LP7000", "PCI", ""}; 2197 else 2198 m = (typeof(m)){"LP7000E", "PCI", ""}; 2199 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2200 break; 2201 case PCI_DEVICE_ID_DRAGONFLY: 2202 m = (typeof(m)){"LP8000", "PCI", 2203 "Obsolete, Unsupported Fibre Channel Adapter"}; 2204 break; 2205 case PCI_DEVICE_ID_CENTAUR: 2206 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2207 m = (typeof(m)){"LP9002", "PCI", ""}; 2208 else 2209 m = (typeof(m)){"LP9000", "PCI", ""}; 2210 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2211 break; 2212 case PCI_DEVICE_ID_RFLY: 2213 m = (typeof(m)){"LP952", "PCI", 2214 "Obsolete, Unsupported Fibre Channel Adapter"}; 2215 break; 2216 case PCI_DEVICE_ID_PEGASUS: 2217 m = (typeof(m)){"LP9802", "PCI-X", 2218 "Obsolete, Unsupported Fibre Channel Adapter"}; 2219 break; 2220 case PCI_DEVICE_ID_THOR: 2221 m = (typeof(m)){"LP10000", "PCI-X", 2222 "Obsolete, Unsupported Fibre Channel Adapter"}; 2223 break; 2224 case PCI_DEVICE_ID_VIPER: 2225 m = (typeof(m)){"LPX1000", "PCI-X", 2226 "Obsolete, Unsupported Fibre Channel Adapter"}; 2227 break; 2228 case PCI_DEVICE_ID_PFLY: 2229 m = (typeof(m)){"LP982", "PCI-X", 2230 "Obsolete, Unsupported Fibre Channel Adapter"}; 2231 break; 2232 case PCI_DEVICE_ID_TFLY: 2233 m = (typeof(m)){"LP1050", "PCI-X", 2234 "Obsolete, Unsupported Fibre Channel Adapter"}; 2235 break; 2236 case PCI_DEVICE_ID_HELIOS: 2237 m = (typeof(m)){"LP11000", "PCI-X2", 2238 "Obsolete, Unsupported Fibre Channel Adapter"}; 2239 break; 2240 case PCI_DEVICE_ID_HELIOS_SCSP: 2241 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2242 "Obsolete, Unsupported Fibre Channel Adapter"}; 2243 break; 2244 case PCI_DEVICE_ID_HELIOS_DCSP: 2245 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2246 "Obsolete, Unsupported Fibre Channel Adapter"}; 2247 break; 2248 case PCI_DEVICE_ID_NEPTUNE: 2249 m = (typeof(m)){"LPe1000", "PCIe", 2250 "Obsolete, Unsupported Fibre Channel Adapter"}; 2251 break; 2252 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2253 m = (typeof(m)){"LPe1000-SP", "PCIe", 2254 "Obsolete, Unsupported Fibre Channel Adapter"}; 2255 break; 2256 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2257 m = (typeof(m)){"LPe1002-SP", "PCIe", 2258 "Obsolete, Unsupported Fibre Channel Adapter"}; 2259 break; 2260 case PCI_DEVICE_ID_BMID: 2261 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2262 break; 2263 case PCI_DEVICE_ID_BSMB: 2264 m = (typeof(m)){"LP111", "PCI-X2", 2265 "Obsolete, Unsupported Fibre Channel Adapter"}; 2266 break; 2267 case PCI_DEVICE_ID_ZEPHYR: 2268 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2269 break; 2270 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2271 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2272 break; 2273 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2274 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2275 GE = 1; 2276 break; 2277 case PCI_DEVICE_ID_ZMID: 2278 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2279 break; 2280 case PCI_DEVICE_ID_ZSMB: 2281 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2282 break; 2283 case PCI_DEVICE_ID_LP101: 2284 m = (typeof(m)){"LP101", "PCI-X", 2285 "Obsolete, Unsupported Fibre Channel Adapter"}; 2286 break; 2287 case PCI_DEVICE_ID_LP10000S: 2288 m = (typeof(m)){"LP10000-S", "PCI", 2289 "Obsolete, Unsupported Fibre Channel Adapter"}; 2290 break; 2291 case PCI_DEVICE_ID_LP11000S: 2292 m = (typeof(m)){"LP11000-S", "PCI-X2", 2293 "Obsolete, Unsupported Fibre Channel Adapter"}; 2294 break; 2295 case PCI_DEVICE_ID_LPE11000S: 2296 m = (typeof(m)){"LPe11000-S", "PCIe", 2297 "Obsolete, Unsupported Fibre Channel Adapter"}; 2298 break; 2299 case PCI_DEVICE_ID_SAT: 2300 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2301 break; 2302 case PCI_DEVICE_ID_SAT_MID: 2303 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2304 break; 2305 case PCI_DEVICE_ID_SAT_SMB: 2306 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2307 break; 2308 case PCI_DEVICE_ID_SAT_DCSP: 2309 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2310 break; 2311 case PCI_DEVICE_ID_SAT_SCSP: 2312 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2313 break; 2314 case PCI_DEVICE_ID_SAT_S: 2315 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2316 break; 2317 case PCI_DEVICE_ID_HORNET: 2318 m = (typeof(m)){"LP21000", "PCIe", 2319 "Obsolete, Unsupported FCoE Adapter"}; 2320 GE = 1; 2321 break; 2322 case PCI_DEVICE_ID_PROTEUS_VF: 2323 m = (typeof(m)){"LPev12000", "PCIe IOV", 2324 "Obsolete, Unsupported Fibre Channel Adapter"}; 2325 break; 2326 case PCI_DEVICE_ID_PROTEUS_PF: 2327 m = (typeof(m)){"LPev12000", "PCIe IOV", 2328 "Obsolete, Unsupported Fibre Channel Adapter"}; 2329 break; 2330 case PCI_DEVICE_ID_PROTEUS_S: 2331 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2332 "Obsolete, Unsupported Fibre Channel Adapter"}; 2333 break; 2334 case PCI_DEVICE_ID_TIGERSHARK: 2335 oneConnect = 1; 2336 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2337 break; 2338 case PCI_DEVICE_ID_TOMCAT: 2339 oneConnect = 1; 2340 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2341 break; 2342 case PCI_DEVICE_ID_FALCON: 2343 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2344 "EmulexSecure Fibre"}; 2345 break; 2346 case PCI_DEVICE_ID_BALIUS: 2347 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2348 "Obsolete, Unsupported Fibre Channel Adapter"}; 2349 break; 2350 case PCI_DEVICE_ID_LANCER_FC: 2351 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2352 break; 2353 case PCI_DEVICE_ID_LANCER_FC_VF: 2354 m = (typeof(m)){"LPe16000", "PCIe", 2355 "Obsolete, Unsupported Fibre Channel Adapter"}; 2356 break; 2357 case PCI_DEVICE_ID_LANCER_FCOE: 2358 oneConnect = 1; 2359 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2360 break; 2361 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2362 oneConnect = 1; 2363 m = (typeof(m)){"OCe15100", "PCIe", 2364 "Obsolete, Unsupported FCoE"}; 2365 break; 2366 case PCI_DEVICE_ID_LANCER_G6_FC: 2367 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_SKYHAWK: 2370 case PCI_DEVICE_ID_SKYHAWK_VF: 2371 oneConnect = 1; 2372 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2373 break; 2374 default: 2375 m = (typeof(m)){"Unknown", "", ""}; 2376 break; 2377 } 2378 2379 if (mdp && mdp[0] == '\0') 2380 snprintf(mdp, 79,"%s", m.name); 2381 /* 2382 * oneConnect hba requires special processing, they are all initiators 2383 * and we put the port number on the end 2384 */ 2385 if (descp && descp[0] == '\0') { 2386 if (oneConnect) 2387 snprintf(descp, 255, 2388 "Emulex OneConnect %s, %s Initiator %s", 2389 m.name, m.function, 2390 phba->Port); 2391 else if (max_speed == 0) 2392 snprintf(descp, 255, 2393 "Emulex %s %s %s", 2394 m.name, m.bus, m.function); 2395 else 2396 snprintf(descp, 255, 2397 "Emulex %s %d%s %s %s", 2398 m.name, max_speed, (GE) ? "GE" : "Gb", 2399 m.bus, m.function); 2400 } 2401 } 2402 2403 /** 2404 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2405 * @phba: pointer to lpfc hba data structure. 2406 * @pring: pointer to a IOCB ring. 2407 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2408 * 2409 * This routine posts a given number of IOCBs with the associated DMA buffer 2410 * descriptors specified by the cnt argument to the given IOCB ring. 2411 * 2412 * Return codes 2413 * The number of IOCBs NOT able to be posted to the IOCB ring. 2414 **/ 2415 int 2416 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2417 { 2418 IOCB_t *icmd; 2419 struct lpfc_iocbq *iocb; 2420 struct lpfc_dmabuf *mp1, *mp2; 2421 2422 cnt += pring->missbufcnt; 2423 2424 /* While there are buffers to post */ 2425 while (cnt > 0) { 2426 /* Allocate buffer for command iocb */ 2427 iocb = lpfc_sli_get_iocbq(phba); 2428 if (iocb == NULL) { 2429 pring->missbufcnt = cnt; 2430 return cnt; 2431 } 2432 icmd = &iocb->iocb; 2433 2434 /* 2 buffers can be posted per command */ 2435 /* Allocate buffer to post */ 2436 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2437 if (mp1) 2438 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2439 if (!mp1 || !mp1->virt) { 2440 kfree(mp1); 2441 lpfc_sli_release_iocbq(phba, iocb); 2442 pring->missbufcnt = cnt; 2443 return cnt; 2444 } 2445 2446 INIT_LIST_HEAD(&mp1->list); 2447 /* Allocate buffer to post */ 2448 if (cnt > 1) { 2449 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2450 if (mp2) 2451 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2452 &mp2->phys); 2453 if (!mp2 || !mp2->virt) { 2454 kfree(mp2); 2455 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2456 kfree(mp1); 2457 lpfc_sli_release_iocbq(phba, iocb); 2458 pring->missbufcnt = cnt; 2459 return cnt; 2460 } 2461 2462 INIT_LIST_HEAD(&mp2->list); 2463 } else { 2464 mp2 = NULL; 2465 } 2466 2467 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2468 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2469 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2470 icmd->ulpBdeCount = 1; 2471 cnt--; 2472 if (mp2) { 2473 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2474 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2475 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2476 cnt--; 2477 icmd->ulpBdeCount = 2; 2478 } 2479 2480 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2481 icmd->ulpLe = 1; 2482 2483 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2484 IOCB_ERROR) { 2485 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2486 kfree(mp1); 2487 cnt++; 2488 if (mp2) { 2489 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2490 kfree(mp2); 2491 cnt++; 2492 } 2493 lpfc_sli_release_iocbq(phba, iocb); 2494 pring->missbufcnt = cnt; 2495 return cnt; 2496 } 2497 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2498 if (mp2) 2499 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2500 } 2501 pring->missbufcnt = 0; 2502 return 0; 2503 } 2504 2505 /** 2506 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2507 * @phba: pointer to lpfc hba data structure. 2508 * 2509 * This routine posts initial receive IOCB buffers to the ELS ring. The 2510 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2511 * set to 64 IOCBs. SLI3 only. 2512 * 2513 * Return codes 2514 * 0 - success (currently always success) 2515 **/ 2516 static int 2517 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2518 { 2519 struct lpfc_sli *psli = &phba->sli; 2520 2521 /* Ring 0, ELS / CT buffers */ 2522 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2523 /* Ring 2 - FCP no buffers needed */ 2524 2525 return 0; 2526 } 2527 2528 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2529 2530 /** 2531 * lpfc_sha_init - Set up initial array of hash table entries 2532 * @HashResultPointer: pointer to an array as hash table. 2533 * 2534 * This routine sets up the initial values to the array of hash table entries 2535 * for the LC HBAs. 2536 **/ 2537 static void 2538 lpfc_sha_init(uint32_t * HashResultPointer) 2539 { 2540 HashResultPointer[0] = 0x67452301; 2541 HashResultPointer[1] = 0xEFCDAB89; 2542 HashResultPointer[2] = 0x98BADCFE; 2543 HashResultPointer[3] = 0x10325476; 2544 HashResultPointer[4] = 0xC3D2E1F0; 2545 } 2546 2547 /** 2548 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2549 * @HashResultPointer: pointer to an initial/result hash table. 2550 * @HashWorkingPointer: pointer to an working hash table. 2551 * 2552 * This routine iterates an initial hash table pointed by @HashResultPointer 2553 * with the values from the working hash table pointeed by @HashWorkingPointer. 2554 * The results are putting back to the initial hash table, returned through 2555 * the @HashResultPointer as the result hash table. 2556 **/ 2557 static void 2558 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2559 { 2560 int t; 2561 uint32_t TEMP; 2562 uint32_t A, B, C, D, E; 2563 t = 16; 2564 do { 2565 HashWorkingPointer[t] = 2566 S(1, 2567 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2568 8] ^ 2569 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2570 } while (++t <= 79); 2571 t = 0; 2572 A = HashResultPointer[0]; 2573 B = HashResultPointer[1]; 2574 C = HashResultPointer[2]; 2575 D = HashResultPointer[3]; 2576 E = HashResultPointer[4]; 2577 2578 do { 2579 if (t < 20) { 2580 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2581 } else if (t < 40) { 2582 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2583 } else if (t < 60) { 2584 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2585 } else { 2586 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2587 } 2588 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2589 E = D; 2590 D = C; 2591 C = S(30, B); 2592 B = A; 2593 A = TEMP; 2594 } while (++t <= 79); 2595 2596 HashResultPointer[0] += A; 2597 HashResultPointer[1] += B; 2598 HashResultPointer[2] += C; 2599 HashResultPointer[3] += D; 2600 HashResultPointer[4] += E; 2601 2602 } 2603 2604 /** 2605 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2606 * @RandomChallenge: pointer to the entry of host challenge random number array. 2607 * @HashWorking: pointer to the entry of the working hash array. 2608 * 2609 * This routine calculates the working hash array referred by @HashWorking 2610 * from the challenge random numbers associated with the host, referred by 2611 * @RandomChallenge. The result is put into the entry of the working hash 2612 * array and returned by reference through @HashWorking. 2613 **/ 2614 static void 2615 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2616 { 2617 *HashWorking = (*RandomChallenge ^ *HashWorking); 2618 } 2619 2620 /** 2621 * lpfc_hba_init - Perform special handling for LC HBA initialization 2622 * @phba: pointer to lpfc hba data structure. 2623 * @hbainit: pointer to an array of unsigned 32-bit integers. 2624 * 2625 * This routine performs the special handling for LC HBA initialization. 2626 **/ 2627 void 2628 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2629 { 2630 int t; 2631 uint32_t *HashWorking; 2632 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2633 2634 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2635 if (!HashWorking) 2636 return; 2637 2638 HashWorking[0] = HashWorking[78] = *pwwnn++; 2639 HashWorking[1] = HashWorking[79] = *pwwnn; 2640 2641 for (t = 0; t < 7; t++) 2642 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2643 2644 lpfc_sha_init(hbainit); 2645 lpfc_sha_iterate(hbainit, HashWorking); 2646 kfree(HashWorking); 2647 } 2648 2649 /** 2650 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2651 * @vport: pointer to a virtual N_Port data structure. 2652 * 2653 * This routine performs the necessary cleanups before deleting the @vport. 2654 * It invokes the discovery state machine to perform necessary state 2655 * transitions and to release the ndlps associated with the @vport. Note, 2656 * the physical port is treated as @vport 0. 2657 **/ 2658 void 2659 lpfc_cleanup(struct lpfc_vport *vport) 2660 { 2661 struct lpfc_hba *phba = vport->phba; 2662 struct lpfc_nodelist *ndlp, *next_ndlp; 2663 int i = 0; 2664 2665 if (phba->link_state > LPFC_LINK_DOWN) 2666 lpfc_port_link_failure(vport); 2667 2668 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2669 if (!NLP_CHK_NODE_ACT(ndlp)) { 2670 ndlp = lpfc_enable_node(vport, ndlp, 2671 NLP_STE_UNUSED_NODE); 2672 if (!ndlp) 2673 continue; 2674 spin_lock_irq(&phba->ndlp_lock); 2675 NLP_SET_FREE_REQ(ndlp); 2676 spin_unlock_irq(&phba->ndlp_lock); 2677 /* Trigger the release of the ndlp memory */ 2678 lpfc_nlp_put(ndlp); 2679 continue; 2680 } 2681 spin_lock_irq(&phba->ndlp_lock); 2682 if (NLP_CHK_FREE_REQ(ndlp)) { 2683 /* The ndlp should not be in memory free mode already */ 2684 spin_unlock_irq(&phba->ndlp_lock); 2685 continue; 2686 } else 2687 /* Indicate request for freeing ndlp memory */ 2688 NLP_SET_FREE_REQ(ndlp); 2689 spin_unlock_irq(&phba->ndlp_lock); 2690 2691 if (vport->port_type != LPFC_PHYSICAL_PORT && 2692 ndlp->nlp_DID == Fabric_DID) { 2693 /* Just free up ndlp with Fabric_DID for vports */ 2694 lpfc_nlp_put(ndlp); 2695 continue; 2696 } 2697 2698 /* take care of nodes in unused state before the state 2699 * machine taking action. 2700 */ 2701 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2702 lpfc_nlp_put(ndlp); 2703 continue; 2704 } 2705 2706 if (ndlp->nlp_type & NLP_FABRIC) 2707 lpfc_disc_state_machine(vport, ndlp, NULL, 2708 NLP_EVT_DEVICE_RECOVERY); 2709 2710 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { 2711 /* Remove the NVME transport reference now and 2712 * continue to remove the node. 2713 */ 2714 lpfc_nlp_put(ndlp); 2715 } 2716 2717 lpfc_disc_state_machine(vport, ndlp, NULL, 2718 NLP_EVT_DEVICE_RM); 2719 } 2720 2721 /* At this point, ALL ndlp's should be gone 2722 * because of the previous NLP_EVT_DEVICE_RM. 2723 * Lets wait for this to happen, if needed. 2724 */ 2725 while (!list_empty(&vport->fc_nodes)) { 2726 if (i++ > 3000) { 2727 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2728 "0233 Nodelist not empty\n"); 2729 list_for_each_entry_safe(ndlp, next_ndlp, 2730 &vport->fc_nodes, nlp_listp) { 2731 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2732 LOG_NODE, 2733 "0282 did:x%x ndlp:x%p " 2734 "usgmap:x%x refcnt:%d\n", 2735 ndlp->nlp_DID, (void *)ndlp, 2736 ndlp->nlp_usg_map, 2737 kref_read(&ndlp->kref)); 2738 } 2739 break; 2740 } 2741 2742 /* Wait for any activity on ndlps to settle */ 2743 msleep(10); 2744 } 2745 lpfc_cleanup_vports_rrqs(vport, NULL); 2746 } 2747 2748 /** 2749 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2750 * @vport: pointer to a virtual N_Port data structure. 2751 * 2752 * This routine stops all the timers associated with a @vport. This function 2753 * is invoked before disabling or deleting a @vport. Note that the physical 2754 * port is treated as @vport 0. 2755 **/ 2756 void 2757 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2758 { 2759 del_timer_sync(&vport->els_tmofunc); 2760 del_timer_sync(&vport->delayed_disc_tmo); 2761 lpfc_can_disctmo(vport); 2762 return; 2763 } 2764 2765 /** 2766 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2767 * @phba: pointer to lpfc hba data structure. 2768 * 2769 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2770 * caller of this routine should already hold the host lock. 2771 **/ 2772 void 2773 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2774 { 2775 /* Clear pending FCF rediscovery wait flag */ 2776 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2777 2778 /* Now, try to stop the timer */ 2779 del_timer(&phba->fcf.redisc_wait); 2780 } 2781 2782 /** 2783 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2784 * @phba: pointer to lpfc hba data structure. 2785 * 2786 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2787 * checks whether the FCF rediscovery wait timer is pending with the host 2788 * lock held before proceeding with disabling the timer and clearing the 2789 * wait timer pendig flag. 2790 **/ 2791 void 2792 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2793 { 2794 spin_lock_irq(&phba->hbalock); 2795 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2796 /* FCF rediscovery timer already fired or stopped */ 2797 spin_unlock_irq(&phba->hbalock); 2798 return; 2799 } 2800 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2801 /* Clear failover in progress flags */ 2802 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2803 spin_unlock_irq(&phba->hbalock); 2804 } 2805 2806 /** 2807 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2808 * @phba: pointer to lpfc hba data structure. 2809 * 2810 * This routine stops all the timers associated with a HBA. This function is 2811 * invoked before either putting a HBA offline or unloading the driver. 2812 **/ 2813 void 2814 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2815 { 2816 lpfc_stop_vport_timers(phba->pport); 2817 del_timer_sync(&phba->sli.mbox_tmo); 2818 del_timer_sync(&phba->fabric_block_timer); 2819 del_timer_sync(&phba->eratt_poll); 2820 del_timer_sync(&phba->hb_tmofunc); 2821 if (phba->sli_rev == LPFC_SLI_REV4) { 2822 del_timer_sync(&phba->rrq_tmr); 2823 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2824 } 2825 phba->hb_outstanding = 0; 2826 2827 switch (phba->pci_dev_grp) { 2828 case LPFC_PCI_DEV_LP: 2829 /* Stop any LightPulse device specific driver timers */ 2830 del_timer_sync(&phba->fcp_poll_timer); 2831 break; 2832 case LPFC_PCI_DEV_OC: 2833 /* Stop any OneConnect device sepcific driver timers */ 2834 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2835 break; 2836 default: 2837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2838 "0297 Invalid device group (x%x)\n", 2839 phba->pci_dev_grp); 2840 break; 2841 } 2842 return; 2843 } 2844 2845 /** 2846 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2847 * @phba: pointer to lpfc hba data structure. 2848 * 2849 * This routine marks a HBA's management interface as blocked. Once the HBA's 2850 * management interface is marked as blocked, all the user space access to 2851 * the HBA, whether they are from sysfs interface or libdfc interface will 2852 * all be blocked. The HBA is set to block the management interface when the 2853 * driver prepares the HBA interface for online or offline. 2854 **/ 2855 static void 2856 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2857 { 2858 unsigned long iflag; 2859 uint8_t actcmd = MBX_HEARTBEAT; 2860 unsigned long timeout; 2861 2862 spin_lock_irqsave(&phba->hbalock, iflag); 2863 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2864 spin_unlock_irqrestore(&phba->hbalock, iflag); 2865 if (mbx_action == LPFC_MBX_NO_WAIT) 2866 return; 2867 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2868 spin_lock_irqsave(&phba->hbalock, iflag); 2869 if (phba->sli.mbox_active) { 2870 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2871 /* Determine how long we might wait for the active mailbox 2872 * command to be gracefully completed by firmware. 2873 */ 2874 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2875 phba->sli.mbox_active) * 1000) + jiffies; 2876 } 2877 spin_unlock_irqrestore(&phba->hbalock, iflag); 2878 2879 /* Wait for the outstnading mailbox command to complete */ 2880 while (phba->sli.mbox_active) { 2881 /* Check active mailbox complete status every 2ms */ 2882 msleep(2); 2883 if (time_after(jiffies, timeout)) { 2884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2885 "2813 Mgmt IO is Blocked %x " 2886 "- mbox cmd %x still active\n", 2887 phba->sli.sli_flag, actcmd); 2888 break; 2889 } 2890 } 2891 } 2892 2893 /** 2894 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2895 * @phba: pointer to lpfc hba data structure. 2896 * 2897 * Allocate RPIs for all active remote nodes. This is needed whenever 2898 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2899 * is to fixup the temporary rpi assignments. 2900 **/ 2901 void 2902 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2903 { 2904 struct lpfc_nodelist *ndlp, *next_ndlp; 2905 struct lpfc_vport **vports; 2906 int i, rpi; 2907 unsigned long flags; 2908 2909 if (phba->sli_rev != LPFC_SLI_REV4) 2910 return; 2911 2912 vports = lpfc_create_vport_work_array(phba); 2913 if (vports == NULL) 2914 return; 2915 2916 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2917 if (vports[i]->load_flag & FC_UNLOADING) 2918 continue; 2919 2920 list_for_each_entry_safe(ndlp, next_ndlp, 2921 &vports[i]->fc_nodes, 2922 nlp_listp) { 2923 if (!NLP_CHK_NODE_ACT(ndlp)) 2924 continue; 2925 rpi = lpfc_sli4_alloc_rpi(phba); 2926 if (rpi == LPFC_RPI_ALLOC_ERROR) { 2927 spin_lock_irqsave(&phba->ndlp_lock, flags); 2928 NLP_CLR_NODE_ACT(ndlp); 2929 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2930 continue; 2931 } 2932 ndlp->nlp_rpi = rpi; 2933 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 2934 "0009 rpi:%x DID:%x " 2935 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 2936 ndlp->nlp_DID, ndlp->nlp_flag, 2937 ndlp->nlp_usg_map, ndlp); 2938 } 2939 } 2940 lpfc_destroy_vport_work_array(phba, vports); 2941 } 2942 2943 /** 2944 * lpfc_online - Initialize and bring a HBA online 2945 * @phba: pointer to lpfc hba data structure. 2946 * 2947 * This routine initializes the HBA and brings a HBA online. During this 2948 * process, the management interface is blocked to prevent user space access 2949 * to the HBA interfering with the driver initialization. 2950 * 2951 * Return codes 2952 * 0 - successful 2953 * 1 - failed 2954 **/ 2955 int 2956 lpfc_online(struct lpfc_hba *phba) 2957 { 2958 struct lpfc_vport *vport; 2959 struct lpfc_vport **vports; 2960 int i; 2961 bool vpis_cleared = false; 2962 2963 if (!phba) 2964 return 0; 2965 vport = phba->pport; 2966 2967 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2968 return 0; 2969 2970 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2971 "0458 Bring Adapter online\n"); 2972 2973 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2974 2975 if (phba->sli_rev == LPFC_SLI_REV4) { 2976 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2977 lpfc_unblock_mgmt_io(phba); 2978 return 1; 2979 } 2980 spin_lock_irq(&phba->hbalock); 2981 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2982 vpis_cleared = true; 2983 spin_unlock_irq(&phba->hbalock); 2984 } else { 2985 lpfc_sli_queue_init(phba); 2986 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2987 lpfc_unblock_mgmt_io(phba); 2988 return 1; 2989 } 2990 } 2991 2992 vports = lpfc_create_vport_work_array(phba); 2993 if (vports != NULL) { 2994 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2995 struct Scsi_Host *shost; 2996 shost = lpfc_shost_from_vport(vports[i]); 2997 spin_lock_irq(shost->host_lock); 2998 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2999 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3000 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3001 if (phba->sli_rev == LPFC_SLI_REV4) { 3002 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3003 if ((vpis_cleared) && 3004 (vports[i]->port_type != 3005 LPFC_PHYSICAL_PORT)) 3006 vports[i]->vpi = 0; 3007 } 3008 spin_unlock_irq(shost->host_lock); 3009 } 3010 } 3011 lpfc_destroy_vport_work_array(phba, vports); 3012 3013 lpfc_unblock_mgmt_io(phba); 3014 return 0; 3015 } 3016 3017 /** 3018 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3019 * @phba: pointer to lpfc hba data structure. 3020 * 3021 * This routine marks a HBA's management interface as not blocked. Once the 3022 * HBA's management interface is marked as not blocked, all the user space 3023 * access to the HBA, whether they are from sysfs interface or libdfc 3024 * interface will be allowed. The HBA is set to block the management interface 3025 * when the driver prepares the HBA interface for online or offline and then 3026 * set to unblock the management interface afterwards. 3027 **/ 3028 void 3029 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3030 { 3031 unsigned long iflag; 3032 3033 spin_lock_irqsave(&phba->hbalock, iflag); 3034 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3035 spin_unlock_irqrestore(&phba->hbalock, iflag); 3036 } 3037 3038 /** 3039 * lpfc_offline_prep - Prepare a HBA to be brought offline 3040 * @phba: pointer to lpfc hba data structure. 3041 * 3042 * This routine is invoked to prepare a HBA to be brought offline. It performs 3043 * unregistration login to all the nodes on all vports and flushes the mailbox 3044 * queue to make it ready to be brought offline. 3045 **/ 3046 void 3047 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3048 { 3049 struct lpfc_vport *vport = phba->pport; 3050 struct lpfc_nodelist *ndlp, *next_ndlp; 3051 struct lpfc_vport **vports; 3052 struct Scsi_Host *shost; 3053 int i; 3054 3055 if (vport->fc_flag & FC_OFFLINE_MODE) 3056 return; 3057 3058 lpfc_block_mgmt_io(phba, mbx_action); 3059 3060 lpfc_linkdown(phba); 3061 3062 /* Issue an unreg_login to all nodes on all vports */ 3063 vports = lpfc_create_vport_work_array(phba); 3064 if (vports != NULL) { 3065 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3066 if (vports[i]->load_flag & FC_UNLOADING) 3067 continue; 3068 shost = lpfc_shost_from_vport(vports[i]); 3069 spin_lock_irq(shost->host_lock); 3070 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3071 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3072 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3073 spin_unlock_irq(shost->host_lock); 3074 3075 shost = lpfc_shost_from_vport(vports[i]); 3076 list_for_each_entry_safe(ndlp, next_ndlp, 3077 &vports[i]->fc_nodes, 3078 nlp_listp) { 3079 if (!NLP_CHK_NODE_ACT(ndlp)) 3080 continue; 3081 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3082 continue; 3083 if (ndlp->nlp_type & NLP_FABRIC) { 3084 lpfc_disc_state_machine(vports[i], ndlp, 3085 NULL, NLP_EVT_DEVICE_RECOVERY); 3086 lpfc_disc_state_machine(vports[i], ndlp, 3087 NULL, NLP_EVT_DEVICE_RM); 3088 } 3089 spin_lock_irq(shost->host_lock); 3090 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3091 spin_unlock_irq(shost->host_lock); 3092 /* 3093 * Whenever an SLI4 port goes offline, free the 3094 * RPI. Get a new RPI when the adapter port 3095 * comes back online. 3096 */ 3097 if (phba->sli_rev == LPFC_SLI_REV4) { 3098 lpfc_printf_vlog(ndlp->vport, 3099 KERN_INFO, LOG_NODE, 3100 "0011 lpfc_offline: " 3101 "ndlp:x%p did %x " 3102 "usgmap:x%x rpi:%x\n", 3103 ndlp, ndlp->nlp_DID, 3104 ndlp->nlp_usg_map, 3105 ndlp->nlp_rpi); 3106 3107 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3108 } 3109 lpfc_unreg_rpi(vports[i], ndlp); 3110 } 3111 } 3112 } 3113 lpfc_destroy_vport_work_array(phba, vports); 3114 3115 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3116 } 3117 3118 /** 3119 * lpfc_offline - Bring a HBA offline 3120 * @phba: pointer to lpfc hba data structure. 3121 * 3122 * This routine actually brings a HBA offline. It stops all the timers 3123 * associated with the HBA, brings down the SLI layer, and eventually 3124 * marks the HBA as in offline state for the upper layer protocol. 3125 **/ 3126 void 3127 lpfc_offline(struct lpfc_hba *phba) 3128 { 3129 struct Scsi_Host *shost; 3130 struct lpfc_vport **vports; 3131 int i; 3132 3133 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3134 return; 3135 3136 /* stop port and all timers associated with this hba */ 3137 lpfc_stop_port(phba); 3138 vports = lpfc_create_vport_work_array(phba); 3139 if (vports != NULL) 3140 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3141 lpfc_stop_vport_timers(vports[i]); 3142 lpfc_destroy_vport_work_array(phba, vports); 3143 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3144 "0460 Bring Adapter offline\n"); 3145 /* Bring down the SLI Layer and cleanup. The HBA is offline 3146 now. */ 3147 lpfc_sli_hba_down(phba); 3148 spin_lock_irq(&phba->hbalock); 3149 phba->work_ha = 0; 3150 spin_unlock_irq(&phba->hbalock); 3151 vports = lpfc_create_vport_work_array(phba); 3152 if (vports != NULL) 3153 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3154 shost = lpfc_shost_from_vport(vports[i]); 3155 spin_lock_irq(shost->host_lock); 3156 vports[i]->work_port_events = 0; 3157 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3158 spin_unlock_irq(shost->host_lock); 3159 } 3160 lpfc_destroy_vport_work_array(phba, vports); 3161 } 3162 3163 /** 3164 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3165 * @phba: pointer to lpfc hba data structure. 3166 * 3167 * This routine is to free all the SCSI buffers and IOCBs from the driver 3168 * list back to kernel. It is called from lpfc_pci_remove_one to free 3169 * the internal resources before the device is removed from the system. 3170 **/ 3171 static void 3172 lpfc_scsi_free(struct lpfc_hba *phba) 3173 { 3174 struct lpfc_scsi_buf *sb, *sb_next; 3175 3176 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3177 return; 3178 3179 spin_lock_irq(&phba->hbalock); 3180 3181 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3182 3183 spin_lock(&phba->scsi_buf_list_put_lock); 3184 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3185 list) { 3186 list_del(&sb->list); 3187 pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3188 sb->dma_handle); 3189 kfree(sb); 3190 phba->total_scsi_bufs--; 3191 } 3192 spin_unlock(&phba->scsi_buf_list_put_lock); 3193 3194 spin_lock(&phba->scsi_buf_list_get_lock); 3195 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3196 list) { 3197 list_del(&sb->list); 3198 pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3199 sb->dma_handle); 3200 kfree(sb); 3201 phba->total_scsi_bufs--; 3202 } 3203 spin_unlock(&phba->scsi_buf_list_get_lock); 3204 spin_unlock_irq(&phba->hbalock); 3205 } 3206 /** 3207 * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists 3208 * @phba: pointer to lpfc hba data structure. 3209 * 3210 * This routine is to free all the NVME buffers and IOCBs from the driver 3211 * list back to kernel. It is called from lpfc_pci_remove_one to free 3212 * the internal resources before the device is removed from the system. 3213 **/ 3214 static void 3215 lpfc_nvme_free(struct lpfc_hba *phba) 3216 { 3217 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; 3218 3219 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3220 return; 3221 3222 spin_lock_irq(&phba->hbalock); 3223 3224 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3225 spin_lock(&phba->nvme_buf_list_put_lock); 3226 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3227 &phba->lpfc_nvme_buf_list_put, list) { 3228 list_del(&lpfc_ncmd->list); 3229 pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3230 lpfc_ncmd->dma_handle); 3231 kfree(lpfc_ncmd); 3232 phba->total_nvme_bufs--; 3233 } 3234 spin_unlock(&phba->nvme_buf_list_put_lock); 3235 3236 spin_lock(&phba->nvme_buf_list_get_lock); 3237 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3238 &phba->lpfc_nvme_buf_list_get, list) { 3239 list_del(&lpfc_ncmd->list); 3240 pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3241 lpfc_ncmd->dma_handle); 3242 kfree(lpfc_ncmd); 3243 phba->total_nvme_bufs--; 3244 } 3245 spin_unlock(&phba->nvme_buf_list_get_lock); 3246 spin_unlock_irq(&phba->hbalock); 3247 } 3248 /** 3249 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3250 * @phba: pointer to lpfc hba data structure. 3251 * 3252 * This routine first calculates the sizes of the current els and allocated 3253 * scsi sgl lists, and then goes through all sgls to updates the physical 3254 * XRIs assigned due to port function reset. During port initialization, the 3255 * current els and allocated scsi sgl lists are 0s. 3256 * 3257 * Return codes 3258 * 0 - successful (for now, it always returns 0) 3259 **/ 3260 int 3261 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3262 { 3263 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3264 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3265 LIST_HEAD(els_sgl_list); 3266 int rc; 3267 3268 /* 3269 * update on pci function's els xri-sgl list 3270 */ 3271 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3272 3273 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3274 /* els xri-sgl expanded */ 3275 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3276 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3277 "3157 ELS xri-sgl count increased from " 3278 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3279 els_xri_cnt); 3280 /* allocate the additional els sgls */ 3281 for (i = 0; i < xri_cnt; i++) { 3282 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3283 GFP_KERNEL); 3284 if (sglq_entry == NULL) { 3285 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3286 "2562 Failure to allocate an " 3287 "ELS sgl entry:%d\n", i); 3288 rc = -ENOMEM; 3289 goto out_free_mem; 3290 } 3291 sglq_entry->buff_type = GEN_BUFF_TYPE; 3292 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3293 &sglq_entry->phys); 3294 if (sglq_entry->virt == NULL) { 3295 kfree(sglq_entry); 3296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3297 "2563 Failure to allocate an " 3298 "ELS mbuf:%d\n", i); 3299 rc = -ENOMEM; 3300 goto out_free_mem; 3301 } 3302 sglq_entry->sgl = sglq_entry->virt; 3303 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3304 sglq_entry->state = SGL_FREED; 3305 list_add_tail(&sglq_entry->list, &els_sgl_list); 3306 } 3307 spin_lock_irq(&phba->hbalock); 3308 spin_lock(&phba->sli4_hba.sgl_list_lock); 3309 list_splice_init(&els_sgl_list, 3310 &phba->sli4_hba.lpfc_els_sgl_list); 3311 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3312 spin_unlock_irq(&phba->hbalock); 3313 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3314 /* els xri-sgl shrinked */ 3315 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3316 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3317 "3158 ELS xri-sgl count decreased from " 3318 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3319 els_xri_cnt); 3320 spin_lock_irq(&phba->hbalock); 3321 spin_lock(&phba->sli4_hba.sgl_list_lock); 3322 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3323 &els_sgl_list); 3324 /* release extra els sgls from list */ 3325 for (i = 0; i < xri_cnt; i++) { 3326 list_remove_head(&els_sgl_list, 3327 sglq_entry, struct lpfc_sglq, list); 3328 if (sglq_entry) { 3329 __lpfc_mbuf_free(phba, sglq_entry->virt, 3330 sglq_entry->phys); 3331 kfree(sglq_entry); 3332 } 3333 } 3334 list_splice_init(&els_sgl_list, 3335 &phba->sli4_hba.lpfc_els_sgl_list); 3336 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3337 spin_unlock_irq(&phba->hbalock); 3338 } else 3339 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3340 "3163 ELS xri-sgl count unchanged: %d\n", 3341 els_xri_cnt); 3342 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3343 3344 /* update xris to els sgls on the list */ 3345 sglq_entry = NULL; 3346 sglq_entry_next = NULL; 3347 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3348 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3349 lxri = lpfc_sli4_next_xritag(phba); 3350 if (lxri == NO_XRI) { 3351 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3352 "2400 Failed to allocate xri for " 3353 "ELS sgl\n"); 3354 rc = -ENOMEM; 3355 goto out_free_mem; 3356 } 3357 sglq_entry->sli4_lxritag = lxri; 3358 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3359 } 3360 return 0; 3361 3362 out_free_mem: 3363 lpfc_free_els_sgl_list(phba); 3364 return rc; 3365 } 3366 3367 /** 3368 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3369 * @phba: pointer to lpfc hba data structure. 3370 * 3371 * This routine first calculates the sizes of the current els and allocated 3372 * scsi sgl lists, and then goes through all sgls to updates the physical 3373 * XRIs assigned due to port function reset. During port initialization, the 3374 * current els and allocated scsi sgl lists are 0s. 3375 * 3376 * Return codes 3377 * 0 - successful (for now, it always returns 0) 3378 **/ 3379 int 3380 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3381 { 3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3383 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3384 uint16_t nvmet_xri_cnt; 3385 LIST_HEAD(nvmet_sgl_list); 3386 int rc; 3387 3388 /* 3389 * update on pci function's nvmet xri-sgl list 3390 */ 3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3392 3393 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3394 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3395 3396 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3397 /* els xri-sgl expanded */ 3398 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3399 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3400 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3401 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3402 /* allocate the additional nvmet sgls */ 3403 for (i = 0; i < xri_cnt; i++) { 3404 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3405 GFP_KERNEL); 3406 if (sglq_entry == NULL) { 3407 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3408 "6303 Failure to allocate an " 3409 "NVMET sgl entry:%d\n", i); 3410 rc = -ENOMEM; 3411 goto out_free_mem; 3412 } 3413 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3414 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3415 &sglq_entry->phys); 3416 if (sglq_entry->virt == NULL) { 3417 kfree(sglq_entry); 3418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3419 "6304 Failure to allocate an " 3420 "NVMET buf:%d\n", i); 3421 rc = -ENOMEM; 3422 goto out_free_mem; 3423 } 3424 sglq_entry->sgl = sglq_entry->virt; 3425 memset(sglq_entry->sgl, 0, 3426 phba->cfg_sg_dma_buf_size); 3427 sglq_entry->state = SGL_FREED; 3428 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3429 } 3430 spin_lock_irq(&phba->hbalock); 3431 spin_lock(&phba->sli4_hba.sgl_list_lock); 3432 list_splice_init(&nvmet_sgl_list, 3433 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3434 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3435 spin_unlock_irq(&phba->hbalock); 3436 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3437 /* nvmet xri-sgl shrunk */ 3438 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3439 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3440 "6305 NVMET xri-sgl count decreased from " 3441 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3442 nvmet_xri_cnt); 3443 spin_lock_irq(&phba->hbalock); 3444 spin_lock(&phba->sli4_hba.sgl_list_lock); 3445 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3446 &nvmet_sgl_list); 3447 /* release extra nvmet sgls from list */ 3448 for (i = 0; i < xri_cnt; i++) { 3449 list_remove_head(&nvmet_sgl_list, 3450 sglq_entry, struct lpfc_sglq, list); 3451 if (sglq_entry) { 3452 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3453 sglq_entry->phys); 3454 kfree(sglq_entry); 3455 } 3456 } 3457 list_splice_init(&nvmet_sgl_list, 3458 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3459 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3460 spin_unlock_irq(&phba->hbalock); 3461 } else 3462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3463 "6306 NVMET xri-sgl count unchanged: %d\n", 3464 nvmet_xri_cnt); 3465 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3466 3467 /* update xris to nvmet sgls on the list */ 3468 sglq_entry = NULL; 3469 sglq_entry_next = NULL; 3470 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3471 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3472 lxri = lpfc_sli4_next_xritag(phba); 3473 if (lxri == NO_XRI) { 3474 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3475 "6307 Failed to allocate xri for " 3476 "NVMET sgl\n"); 3477 rc = -ENOMEM; 3478 goto out_free_mem; 3479 } 3480 sglq_entry->sli4_lxritag = lxri; 3481 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3482 } 3483 return 0; 3484 3485 out_free_mem: 3486 lpfc_free_nvmet_sgl_list(phba); 3487 return rc; 3488 } 3489 3490 /** 3491 * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping 3492 * @phba: pointer to lpfc hba data structure. 3493 * 3494 * This routine first calculates the sizes of the current els and allocated 3495 * scsi sgl lists, and then goes through all sgls to updates the physical 3496 * XRIs assigned due to port function reset. During port initialization, the 3497 * current els and allocated scsi sgl lists are 0s. 3498 * 3499 * Return codes 3500 * 0 - successful (for now, it always returns 0) 3501 **/ 3502 int 3503 lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) 3504 { 3505 struct lpfc_scsi_buf *psb, *psb_next; 3506 uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; 3507 LIST_HEAD(scsi_sgl_list); 3508 int rc; 3509 3510 /* 3511 * update on pci function's els xri-sgl list 3512 */ 3513 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3514 phba->total_scsi_bufs = 0; 3515 3516 /* 3517 * update on pci function's allocated scsi xri-sgl list 3518 */ 3519 /* maximum number of xris available for scsi buffers */ 3520 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3521 els_xri_cnt; 3522 3523 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3524 return 0; 3525 3526 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3527 phba->sli4_hba.scsi_xri_max = /* Split them up */ 3528 (phba->sli4_hba.scsi_xri_max * 3529 phba->cfg_xri_split) / 100; 3530 3531 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3532 spin_lock(&phba->scsi_buf_list_put_lock); 3533 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3534 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3535 spin_unlock(&phba->scsi_buf_list_put_lock); 3536 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3537 3538 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3539 "6060 Current allocated SCSI xri-sgl count:%d, " 3540 "maximum SCSI xri count:%d (split:%d)\n", 3541 phba->sli4_hba.scsi_xri_cnt, 3542 phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); 3543 3544 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3545 /* max scsi xri shrinked below the allocated scsi buffers */ 3546 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3547 phba->sli4_hba.scsi_xri_max; 3548 /* release the extra allocated scsi buffers */ 3549 for (i = 0; i < scsi_xri_cnt; i++) { 3550 list_remove_head(&scsi_sgl_list, psb, 3551 struct lpfc_scsi_buf, list); 3552 if (psb) { 3553 pci_pool_free(phba->lpfc_sg_dma_buf_pool, 3554 psb->data, psb->dma_handle); 3555 kfree(psb); 3556 } 3557 } 3558 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3559 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3560 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3561 } 3562 3563 /* update xris associated to remaining allocated scsi buffers */ 3564 psb = NULL; 3565 psb_next = NULL; 3566 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3567 lxri = lpfc_sli4_next_xritag(phba); 3568 if (lxri == NO_XRI) { 3569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3570 "2560 Failed to allocate xri for " 3571 "scsi buffer\n"); 3572 rc = -ENOMEM; 3573 goto out_free_mem; 3574 } 3575 psb->cur_iocbq.sli4_lxritag = lxri; 3576 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3577 } 3578 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3579 spin_lock(&phba->scsi_buf_list_put_lock); 3580 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3581 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3582 spin_unlock(&phba->scsi_buf_list_put_lock); 3583 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3584 return 0; 3585 3586 out_free_mem: 3587 lpfc_scsi_free(phba); 3588 return rc; 3589 } 3590 3591 static uint64_t 3592 lpfc_get_wwpn(struct lpfc_hba *phba) 3593 { 3594 uint64_t wwn; 3595 int rc; 3596 LPFC_MBOXQ_t *mboxq; 3597 MAILBOX_t *mb; 3598 3599 if (phba->sli_rev < LPFC_SLI_REV4) { 3600 /* Reset the port first */ 3601 lpfc_sli_brdrestart(phba); 3602 rc = lpfc_sli_chipset_init(phba); 3603 if (rc) 3604 return (uint64_t)-1; 3605 } 3606 3607 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3608 GFP_KERNEL); 3609 if (!mboxq) 3610 return (uint64_t)-1; 3611 3612 /* First get WWN of HBA instance */ 3613 lpfc_read_nv(phba, mboxq); 3614 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3615 if (rc != MBX_SUCCESS) { 3616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3617 "6019 Mailbox failed , mbxCmd x%x " 3618 "READ_NV, mbxStatus x%x\n", 3619 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 3620 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 3621 mempool_free(mboxq, phba->mbox_mem_pool); 3622 return (uint64_t) -1; 3623 } 3624 mb = &mboxq->u.mb; 3625 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 3626 /* wwn is WWPN of HBA instance */ 3627 mempool_free(mboxq, phba->mbox_mem_pool); 3628 if (phba->sli_rev == LPFC_SLI_REV4) 3629 return be64_to_cpu(wwn); 3630 else 3631 return (((wwn & 0xffffffff00000000) >> 32) | 3632 ((wwn & 0x00000000ffffffff) << 32)); 3633 3634 } 3635 3636 /** 3637 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping 3638 * @phba: pointer to lpfc hba data structure. 3639 * 3640 * This routine first calculates the sizes of the current els and allocated 3641 * scsi sgl lists, and then goes through all sgls to updates the physical 3642 * XRIs assigned due to port function reset. During port initialization, the 3643 * current els and allocated scsi sgl lists are 0s. 3644 * 3645 * Return codes 3646 * 0 - successful (for now, it always returns 0) 3647 **/ 3648 int 3649 lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) 3650 { 3651 struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 3652 uint16_t i, lxri, els_xri_cnt; 3653 uint16_t nvme_xri_cnt, nvme_xri_max; 3654 LIST_HEAD(nvme_sgl_list); 3655 int rc; 3656 3657 phba->total_nvme_bufs = 0; 3658 3659 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3660 return 0; 3661 /* 3662 * update on pci function's allocated nvme xri-sgl list 3663 */ 3664 3665 /* maximum number of xris available for nvme buffers */ 3666 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3667 nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3668 phba->sli4_hba.nvme_xri_max = nvme_xri_max; 3669 phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; 3670 3671 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3672 "6074 Current allocated NVME xri-sgl count:%d, " 3673 "maximum NVME xri count:%d\n", 3674 phba->sli4_hba.nvme_xri_cnt, 3675 phba->sli4_hba.nvme_xri_max); 3676 3677 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3678 spin_lock(&phba->nvme_buf_list_put_lock); 3679 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); 3680 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); 3681 spin_unlock(&phba->nvme_buf_list_put_lock); 3682 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3683 3684 if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { 3685 /* max nvme xri shrunk below the allocated nvme buffers */ 3686 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3687 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - 3688 phba->sli4_hba.nvme_xri_max; 3689 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3690 /* release the extra allocated nvme buffers */ 3691 for (i = 0; i < nvme_xri_cnt; i++) { 3692 list_remove_head(&nvme_sgl_list, lpfc_ncmd, 3693 struct lpfc_nvme_buf, list); 3694 if (lpfc_ncmd) { 3695 pci_pool_free(phba->lpfc_sg_dma_buf_pool, 3696 lpfc_ncmd->data, 3697 lpfc_ncmd->dma_handle); 3698 kfree(lpfc_ncmd); 3699 } 3700 } 3701 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3702 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; 3703 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3704 } 3705 3706 /* update xris associated to remaining allocated nvme buffers */ 3707 lpfc_ncmd = NULL; 3708 lpfc_ncmd_next = NULL; 3709 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3710 &nvme_sgl_list, list) { 3711 lxri = lpfc_sli4_next_xritag(phba); 3712 if (lxri == NO_XRI) { 3713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3714 "6075 Failed to allocate xri for " 3715 "nvme buffer\n"); 3716 rc = -ENOMEM; 3717 goto out_free_mem; 3718 } 3719 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 3720 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3721 } 3722 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3723 spin_lock(&phba->nvme_buf_list_put_lock); 3724 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); 3725 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 3726 spin_unlock(&phba->nvme_buf_list_put_lock); 3727 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3728 return 0; 3729 3730 out_free_mem: 3731 lpfc_nvme_free(phba); 3732 return rc; 3733 } 3734 3735 /** 3736 * lpfc_create_port - Create an FC port 3737 * @phba: pointer to lpfc hba data structure. 3738 * @instance: a unique integer ID to this FC port. 3739 * @dev: pointer to the device data structure. 3740 * 3741 * This routine creates a FC port for the upper layer protocol. The FC port 3742 * can be created on top of either a physical port or a virtual port provided 3743 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3744 * and associates the FC port created before adding the shost into the SCSI 3745 * layer. 3746 * 3747 * Return codes 3748 * @vport - pointer to the virtual N_Port data structure. 3749 * NULL - port create failed. 3750 **/ 3751 struct lpfc_vport * 3752 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3753 { 3754 struct lpfc_vport *vport; 3755 struct Scsi_Host *shost = NULL; 3756 int error = 0; 3757 int i; 3758 uint64_t wwn; 3759 bool use_no_reset_hba = false; 3760 3761 wwn = lpfc_get_wwpn(phba); 3762 3763 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 3764 if (wwn == lpfc_no_hba_reset[i]) { 3765 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3766 "6020 Setting use_no_reset port=%llx\n", 3767 wwn); 3768 use_no_reset_hba = true; 3769 break; 3770 } 3771 } 3772 3773 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 3774 if (dev != &phba->pcidev->dev) { 3775 shost = scsi_host_alloc(&lpfc_vport_template, 3776 sizeof(struct lpfc_vport)); 3777 } else { 3778 if (!use_no_reset_hba) 3779 shost = scsi_host_alloc(&lpfc_template, 3780 sizeof(struct lpfc_vport)); 3781 else 3782 shost = scsi_host_alloc(&lpfc_template_no_hr, 3783 sizeof(struct lpfc_vport)); 3784 } 3785 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 3786 shost = scsi_host_alloc(&lpfc_template_nvme, 3787 sizeof(struct lpfc_vport)); 3788 } 3789 if (!shost) 3790 goto out; 3791 3792 vport = (struct lpfc_vport *) shost->hostdata; 3793 vport->phba = phba; 3794 vport->load_flag |= FC_LOADING; 3795 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3796 vport->fc_rscn_flush = 0; 3797 lpfc_get_vport_cfgparam(vport); 3798 3799 shost->unique_id = instance; 3800 shost->max_id = LPFC_MAX_TARGET; 3801 shost->max_lun = vport->cfg_max_luns; 3802 shost->this_id = -1; 3803 shost->max_cmd_len = 16; 3804 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3805 if (phba->sli_rev == LPFC_SLI_REV4) { 3806 shost->dma_boundary = 3807 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3808 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3809 } 3810 3811 /* 3812 * Set initial can_queue value since 0 is no longer supported and 3813 * scsi_add_host will fail. This will be adjusted later based on the 3814 * max xri value determined in hba setup. 3815 */ 3816 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3817 if (dev != &phba->pcidev->dev) { 3818 shost->transportt = lpfc_vport_transport_template; 3819 vport->port_type = LPFC_NPIV_PORT; 3820 } else { 3821 shost->transportt = lpfc_transport_template; 3822 vport->port_type = LPFC_PHYSICAL_PORT; 3823 } 3824 3825 /* Initialize all internally managed lists. */ 3826 INIT_LIST_HEAD(&vport->fc_nodes); 3827 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3828 spin_lock_init(&vport->work_port_lock); 3829 3830 setup_timer(&vport->fc_disctmo, lpfc_disc_timeout, 3831 (unsigned long)vport); 3832 3833 setup_timer(&vport->els_tmofunc, lpfc_els_timeout, 3834 (unsigned long)vport); 3835 3836 setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 3837 (unsigned long)vport); 3838 3839 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3840 if (error) 3841 goto out_put_shost; 3842 3843 spin_lock_irq(&phba->hbalock); 3844 list_add_tail(&vport->listentry, &phba->port_list); 3845 spin_unlock_irq(&phba->hbalock); 3846 return vport; 3847 3848 out_put_shost: 3849 scsi_host_put(shost); 3850 out: 3851 return NULL; 3852 } 3853 3854 /** 3855 * destroy_port - destroy an FC port 3856 * @vport: pointer to an lpfc virtual N_Port data structure. 3857 * 3858 * This routine destroys a FC port from the upper layer protocol. All the 3859 * resources associated with the port are released. 3860 **/ 3861 void 3862 destroy_port(struct lpfc_vport *vport) 3863 { 3864 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3865 struct lpfc_hba *phba = vport->phba; 3866 3867 lpfc_debugfs_terminate(vport); 3868 fc_remove_host(shost); 3869 scsi_remove_host(shost); 3870 3871 spin_lock_irq(&phba->hbalock); 3872 list_del_init(&vport->listentry); 3873 spin_unlock_irq(&phba->hbalock); 3874 3875 lpfc_cleanup(vport); 3876 return; 3877 } 3878 3879 /** 3880 * lpfc_get_instance - Get a unique integer ID 3881 * 3882 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3883 * uses the kernel idr facility to perform the task. 3884 * 3885 * Return codes: 3886 * instance - a unique integer ID allocated as the new instance. 3887 * -1 - lpfc get instance failed. 3888 **/ 3889 int 3890 lpfc_get_instance(void) 3891 { 3892 int ret; 3893 3894 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3895 return ret < 0 ? -1 : ret; 3896 } 3897 3898 /** 3899 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3900 * @shost: pointer to SCSI host data structure. 3901 * @time: elapsed time of the scan in jiffies. 3902 * 3903 * This routine is called by the SCSI layer with a SCSI host to determine 3904 * whether the scan host is finished. 3905 * 3906 * Note: there is no scan_start function as adapter initialization will have 3907 * asynchronously kicked off the link initialization. 3908 * 3909 * Return codes 3910 * 0 - SCSI host scan is not over yet. 3911 * 1 - SCSI host scan is over. 3912 **/ 3913 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3914 { 3915 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3916 struct lpfc_hba *phba = vport->phba; 3917 int stat = 0; 3918 3919 spin_lock_irq(shost->host_lock); 3920 3921 if (vport->load_flag & FC_UNLOADING) { 3922 stat = 1; 3923 goto finished; 3924 } 3925 if (time >= msecs_to_jiffies(30 * 1000)) { 3926 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3927 "0461 Scanning longer than 30 " 3928 "seconds. Continuing initialization\n"); 3929 stat = 1; 3930 goto finished; 3931 } 3932 if (time >= msecs_to_jiffies(15 * 1000) && 3933 phba->link_state <= LPFC_LINK_DOWN) { 3934 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3935 "0465 Link down longer than 15 " 3936 "seconds. Continuing initialization\n"); 3937 stat = 1; 3938 goto finished; 3939 } 3940 3941 if (vport->port_state != LPFC_VPORT_READY) 3942 goto finished; 3943 if (vport->num_disc_nodes || vport->fc_prli_sent) 3944 goto finished; 3945 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3946 goto finished; 3947 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3948 goto finished; 3949 3950 stat = 1; 3951 3952 finished: 3953 spin_unlock_irq(shost->host_lock); 3954 return stat; 3955 } 3956 3957 /** 3958 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3959 * @shost: pointer to SCSI host data structure. 3960 * 3961 * This routine initializes a given SCSI host attributes on a FC port. The 3962 * SCSI host can be either on top of a physical port or a virtual port. 3963 **/ 3964 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3965 { 3966 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3967 struct lpfc_hba *phba = vport->phba; 3968 /* 3969 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3970 */ 3971 3972 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3973 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3974 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3975 3976 memset(fc_host_supported_fc4s(shost), 0, 3977 sizeof(fc_host_supported_fc4s(shost))); 3978 fc_host_supported_fc4s(shost)[2] = 1; 3979 fc_host_supported_fc4s(shost)[7] = 1; 3980 3981 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3982 sizeof fc_host_symbolic_name(shost)); 3983 3984 fc_host_supported_speeds(shost) = 0; 3985 if (phba->lmt & LMT_32Gb) 3986 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 3987 if (phba->lmt & LMT_16Gb) 3988 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3989 if (phba->lmt & LMT_10Gb) 3990 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3991 if (phba->lmt & LMT_8Gb) 3992 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3993 if (phba->lmt & LMT_4Gb) 3994 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3995 if (phba->lmt & LMT_2Gb) 3996 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3997 if (phba->lmt & LMT_1Gb) 3998 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3999 4000 fc_host_maxframe_size(shost) = 4001 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4002 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4003 4004 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4005 4006 /* This value is also unchanging */ 4007 memset(fc_host_active_fc4s(shost), 0, 4008 sizeof(fc_host_active_fc4s(shost))); 4009 fc_host_active_fc4s(shost)[2] = 1; 4010 fc_host_active_fc4s(shost)[7] = 1; 4011 4012 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4013 spin_lock_irq(shost->host_lock); 4014 vport->load_flag &= ~FC_LOADING; 4015 spin_unlock_irq(shost->host_lock); 4016 } 4017 4018 /** 4019 * lpfc_stop_port_s3 - Stop SLI3 device port 4020 * @phba: pointer to lpfc hba data structure. 4021 * 4022 * This routine is invoked to stop an SLI3 device port, it stops the device 4023 * from generating interrupts and stops the device driver's timers for the 4024 * device. 4025 **/ 4026 static void 4027 lpfc_stop_port_s3(struct lpfc_hba *phba) 4028 { 4029 /* Clear all interrupt enable conditions */ 4030 writel(0, phba->HCregaddr); 4031 readl(phba->HCregaddr); /* flush */ 4032 /* Clear all pending interrupts */ 4033 writel(0xffffffff, phba->HAregaddr); 4034 readl(phba->HAregaddr); /* flush */ 4035 4036 /* Reset some HBA SLI setup states */ 4037 lpfc_stop_hba_timers(phba); 4038 phba->pport->work_port_events = 0; 4039 } 4040 4041 /** 4042 * lpfc_stop_port_s4 - Stop SLI4 device port 4043 * @phba: pointer to lpfc hba data structure. 4044 * 4045 * This routine is invoked to stop an SLI4 device port, it stops the device 4046 * from generating interrupts and stops the device driver's timers for the 4047 * device. 4048 **/ 4049 static void 4050 lpfc_stop_port_s4(struct lpfc_hba *phba) 4051 { 4052 /* Reset some HBA SLI4 setup states */ 4053 lpfc_stop_hba_timers(phba); 4054 phba->pport->work_port_events = 0; 4055 phba->sli4_hba.intr_enable = 0; 4056 } 4057 4058 /** 4059 * lpfc_stop_port - Wrapper function for stopping hba port 4060 * @phba: Pointer to HBA context object. 4061 * 4062 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4063 * the API jump table function pointer from the lpfc_hba struct. 4064 **/ 4065 void 4066 lpfc_stop_port(struct lpfc_hba *phba) 4067 { 4068 phba->lpfc_stop_port(phba); 4069 } 4070 4071 /** 4072 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4073 * @phba: Pointer to hba for which this call is being executed. 4074 * 4075 * This routine starts the timer waiting for the FCF rediscovery to complete. 4076 **/ 4077 void 4078 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4079 { 4080 unsigned long fcf_redisc_wait_tmo = 4081 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4082 /* Start fcf rediscovery wait period timer */ 4083 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4084 spin_lock_irq(&phba->hbalock); 4085 /* Allow action to new fcf asynchronous event */ 4086 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4087 /* Mark the FCF rediscovery pending state */ 4088 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4089 spin_unlock_irq(&phba->hbalock); 4090 } 4091 4092 /** 4093 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4094 * @ptr: Map to lpfc_hba data structure pointer. 4095 * 4096 * This routine is invoked when waiting for FCF table rediscover has been 4097 * timed out. If new FCF record(s) has (have) been discovered during the 4098 * wait period, a new FCF event shall be added to the FCOE async event 4099 * list, and then worker thread shall be waked up for processing from the 4100 * worker thread context. 4101 **/ 4102 static void 4103 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 4104 { 4105 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 4106 4107 /* Don't send FCF rediscovery event if timer cancelled */ 4108 spin_lock_irq(&phba->hbalock); 4109 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4110 spin_unlock_irq(&phba->hbalock); 4111 return; 4112 } 4113 /* Clear FCF rediscovery timer pending flag */ 4114 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4115 /* FCF rediscovery event to worker thread */ 4116 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4117 spin_unlock_irq(&phba->hbalock); 4118 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4119 "2776 FCF rediscover quiescent timer expired\n"); 4120 /* wake up worker thread */ 4121 lpfc_worker_wake_up(phba); 4122 } 4123 4124 /** 4125 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4126 * @phba: pointer to lpfc hba data structure. 4127 * @acqe_link: pointer to the async link completion queue entry. 4128 * 4129 * This routine is to parse the SLI4 link-attention link fault code and 4130 * translate it into the base driver's read link attention mailbox command 4131 * status. 4132 * 4133 * Return: Link-attention status in terms of base driver's coding. 4134 **/ 4135 static uint16_t 4136 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4137 struct lpfc_acqe_link *acqe_link) 4138 { 4139 uint16_t latt_fault; 4140 4141 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4142 case LPFC_ASYNC_LINK_FAULT_NONE: 4143 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4144 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4145 latt_fault = 0; 4146 break; 4147 default: 4148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4149 "0398 Invalid link fault code: x%x\n", 4150 bf_get(lpfc_acqe_link_fault, acqe_link)); 4151 latt_fault = MBXERR_ERROR; 4152 break; 4153 } 4154 return latt_fault; 4155 } 4156 4157 /** 4158 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4159 * @phba: pointer to lpfc hba data structure. 4160 * @acqe_link: pointer to the async link completion queue entry. 4161 * 4162 * This routine is to parse the SLI4 link attention type and translate it 4163 * into the base driver's link attention type coding. 4164 * 4165 * Return: Link attention type in terms of base driver's coding. 4166 **/ 4167 static uint8_t 4168 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4169 struct lpfc_acqe_link *acqe_link) 4170 { 4171 uint8_t att_type; 4172 4173 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4174 case LPFC_ASYNC_LINK_STATUS_DOWN: 4175 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4176 att_type = LPFC_ATT_LINK_DOWN; 4177 break; 4178 case LPFC_ASYNC_LINK_STATUS_UP: 4179 /* Ignore physical link up events - wait for logical link up */ 4180 att_type = LPFC_ATT_RESERVED; 4181 break; 4182 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4183 att_type = LPFC_ATT_LINK_UP; 4184 break; 4185 default: 4186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4187 "0399 Invalid link attention type: x%x\n", 4188 bf_get(lpfc_acqe_link_status, acqe_link)); 4189 att_type = LPFC_ATT_RESERVED; 4190 break; 4191 } 4192 return att_type; 4193 } 4194 4195 /** 4196 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4197 * @phba: pointer to lpfc hba data structure. 4198 * 4199 * This routine is to get an SLI3 FC port's link speed in Mbps. 4200 * 4201 * Return: link speed in terms of Mbps. 4202 **/ 4203 uint32_t 4204 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4205 { 4206 uint32_t link_speed; 4207 4208 if (!lpfc_is_link_up(phba)) 4209 return 0; 4210 4211 if (phba->sli_rev <= LPFC_SLI_REV3) { 4212 switch (phba->fc_linkspeed) { 4213 case LPFC_LINK_SPEED_1GHZ: 4214 link_speed = 1000; 4215 break; 4216 case LPFC_LINK_SPEED_2GHZ: 4217 link_speed = 2000; 4218 break; 4219 case LPFC_LINK_SPEED_4GHZ: 4220 link_speed = 4000; 4221 break; 4222 case LPFC_LINK_SPEED_8GHZ: 4223 link_speed = 8000; 4224 break; 4225 case LPFC_LINK_SPEED_10GHZ: 4226 link_speed = 10000; 4227 break; 4228 case LPFC_LINK_SPEED_16GHZ: 4229 link_speed = 16000; 4230 break; 4231 default: 4232 link_speed = 0; 4233 } 4234 } else { 4235 if (phba->sli4_hba.link_state.logical_speed) 4236 link_speed = 4237 phba->sli4_hba.link_state.logical_speed; 4238 else 4239 link_speed = phba->sli4_hba.link_state.speed; 4240 } 4241 return link_speed; 4242 } 4243 4244 /** 4245 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4246 * @phba: pointer to lpfc hba data structure. 4247 * @evt_code: asynchronous event code. 4248 * @speed_code: asynchronous event link speed code. 4249 * 4250 * This routine is to parse the giving SLI4 async event link speed code into 4251 * value of Mbps for the link speed. 4252 * 4253 * Return: link speed in terms of Mbps. 4254 **/ 4255 static uint32_t 4256 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4257 uint8_t speed_code) 4258 { 4259 uint32_t port_speed; 4260 4261 switch (evt_code) { 4262 case LPFC_TRAILER_CODE_LINK: 4263 switch (speed_code) { 4264 case LPFC_ASYNC_LINK_SPEED_ZERO: 4265 port_speed = 0; 4266 break; 4267 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4268 port_speed = 10; 4269 break; 4270 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4271 port_speed = 100; 4272 break; 4273 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4274 port_speed = 1000; 4275 break; 4276 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4277 port_speed = 10000; 4278 break; 4279 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4280 port_speed = 20000; 4281 break; 4282 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4283 port_speed = 25000; 4284 break; 4285 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4286 port_speed = 40000; 4287 break; 4288 default: 4289 port_speed = 0; 4290 } 4291 break; 4292 case LPFC_TRAILER_CODE_FC: 4293 switch (speed_code) { 4294 case LPFC_FC_LA_SPEED_UNKNOWN: 4295 port_speed = 0; 4296 break; 4297 case LPFC_FC_LA_SPEED_1G: 4298 port_speed = 1000; 4299 break; 4300 case LPFC_FC_LA_SPEED_2G: 4301 port_speed = 2000; 4302 break; 4303 case LPFC_FC_LA_SPEED_4G: 4304 port_speed = 4000; 4305 break; 4306 case LPFC_FC_LA_SPEED_8G: 4307 port_speed = 8000; 4308 break; 4309 case LPFC_FC_LA_SPEED_10G: 4310 port_speed = 10000; 4311 break; 4312 case LPFC_FC_LA_SPEED_16G: 4313 port_speed = 16000; 4314 break; 4315 case LPFC_FC_LA_SPEED_32G: 4316 port_speed = 32000; 4317 break; 4318 default: 4319 port_speed = 0; 4320 } 4321 break; 4322 default: 4323 port_speed = 0; 4324 } 4325 return port_speed; 4326 } 4327 4328 /** 4329 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4330 * @phba: pointer to lpfc hba data structure. 4331 * @acqe_link: pointer to the async link completion queue entry. 4332 * 4333 * This routine is to handle the SLI4 asynchronous FCoE link event. 4334 **/ 4335 static void 4336 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4337 struct lpfc_acqe_link *acqe_link) 4338 { 4339 struct lpfc_dmabuf *mp; 4340 LPFC_MBOXQ_t *pmb; 4341 MAILBOX_t *mb; 4342 struct lpfc_mbx_read_top *la; 4343 uint8_t att_type; 4344 int rc; 4345 4346 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4347 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4348 return; 4349 phba->fcoe_eventtag = acqe_link->event_tag; 4350 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4351 if (!pmb) { 4352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4353 "0395 The mboxq allocation failed\n"); 4354 return; 4355 } 4356 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4357 if (!mp) { 4358 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4359 "0396 The lpfc_dmabuf allocation failed\n"); 4360 goto out_free_pmb; 4361 } 4362 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4363 if (!mp->virt) { 4364 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4365 "0397 The mbuf allocation failed\n"); 4366 goto out_free_dmabuf; 4367 } 4368 4369 /* Cleanup any outstanding ELS commands */ 4370 lpfc_els_flush_all_cmd(phba); 4371 4372 /* Block ELS IOCBs until we have done process link event */ 4373 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4374 4375 /* Update link event statistics */ 4376 phba->sli.slistat.link_event++; 4377 4378 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4379 lpfc_read_topology(phba, pmb, mp); 4380 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4381 pmb->vport = phba->pport; 4382 4383 /* Keep the link status for extra SLI4 state machine reference */ 4384 phba->sli4_hba.link_state.speed = 4385 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4386 bf_get(lpfc_acqe_link_speed, acqe_link)); 4387 phba->sli4_hba.link_state.duplex = 4388 bf_get(lpfc_acqe_link_duplex, acqe_link); 4389 phba->sli4_hba.link_state.status = 4390 bf_get(lpfc_acqe_link_status, acqe_link); 4391 phba->sli4_hba.link_state.type = 4392 bf_get(lpfc_acqe_link_type, acqe_link); 4393 phba->sli4_hba.link_state.number = 4394 bf_get(lpfc_acqe_link_number, acqe_link); 4395 phba->sli4_hba.link_state.fault = 4396 bf_get(lpfc_acqe_link_fault, acqe_link); 4397 phba->sli4_hba.link_state.logical_speed = 4398 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4399 4400 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4401 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4402 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4403 "Logical speed:%dMbps Fault:%d\n", 4404 phba->sli4_hba.link_state.speed, 4405 phba->sli4_hba.link_state.topology, 4406 phba->sli4_hba.link_state.status, 4407 phba->sli4_hba.link_state.type, 4408 phba->sli4_hba.link_state.number, 4409 phba->sli4_hba.link_state.logical_speed, 4410 phba->sli4_hba.link_state.fault); 4411 /* 4412 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4413 * topology info. Note: Optional for non FC-AL ports. 4414 */ 4415 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4416 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4417 if (rc == MBX_NOT_FINISHED) 4418 goto out_free_dmabuf; 4419 return; 4420 } 4421 /* 4422 * For FCoE Mode: fill in all the topology information we need and call 4423 * the READ_TOPOLOGY completion routine to continue without actually 4424 * sending the READ_TOPOLOGY mailbox command to the port. 4425 */ 4426 /* Parse and translate status field */ 4427 mb = &pmb->u.mb; 4428 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 4429 4430 /* Parse and translate link attention fields */ 4431 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4432 la->eventTag = acqe_link->event_tag; 4433 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4434 bf_set(lpfc_mbx_read_top_link_spd, la, 4435 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4436 4437 /* Fake the the following irrelvant fields */ 4438 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4439 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4440 bf_set(lpfc_mbx_read_top_il, la, 0); 4441 bf_set(lpfc_mbx_read_top_pb, la, 0); 4442 bf_set(lpfc_mbx_read_top_fa, la, 0); 4443 bf_set(lpfc_mbx_read_top_mm, la, 0); 4444 4445 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4446 lpfc_mbx_cmpl_read_topology(phba, pmb); 4447 4448 return; 4449 4450 out_free_dmabuf: 4451 kfree(mp); 4452 out_free_pmb: 4453 mempool_free(pmb, phba->mbox_mem_pool); 4454 } 4455 4456 /** 4457 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4458 * @phba: pointer to lpfc hba data structure. 4459 * @acqe_fc: pointer to the async fc completion queue entry. 4460 * 4461 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4462 * that the event was received and then issue a read_topology mailbox command so 4463 * that the rest of the driver will treat it the same as SLI3. 4464 **/ 4465 static void 4466 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4467 { 4468 struct lpfc_dmabuf *mp; 4469 LPFC_MBOXQ_t *pmb; 4470 MAILBOX_t *mb; 4471 struct lpfc_mbx_read_top *la; 4472 int rc; 4473 4474 if (bf_get(lpfc_trailer_type, acqe_fc) != 4475 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4476 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4477 "2895 Non FC link Event detected.(%d)\n", 4478 bf_get(lpfc_trailer_type, acqe_fc)); 4479 return; 4480 } 4481 /* Keep the link status for extra SLI4 state machine reference */ 4482 phba->sli4_hba.link_state.speed = 4483 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4484 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4485 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4486 phba->sli4_hba.link_state.topology = 4487 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4488 phba->sli4_hba.link_state.status = 4489 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4490 phba->sli4_hba.link_state.type = 4491 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4492 phba->sli4_hba.link_state.number = 4493 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4494 phba->sli4_hba.link_state.fault = 4495 bf_get(lpfc_acqe_link_fault, acqe_fc); 4496 phba->sli4_hba.link_state.logical_speed = 4497 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4498 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4499 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4500 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4501 "%dMbps Fault:%d\n", 4502 phba->sli4_hba.link_state.speed, 4503 phba->sli4_hba.link_state.topology, 4504 phba->sli4_hba.link_state.status, 4505 phba->sli4_hba.link_state.type, 4506 phba->sli4_hba.link_state.number, 4507 phba->sli4_hba.link_state.logical_speed, 4508 phba->sli4_hba.link_state.fault); 4509 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4510 if (!pmb) { 4511 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4512 "2897 The mboxq allocation failed\n"); 4513 return; 4514 } 4515 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4516 if (!mp) { 4517 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4518 "2898 The lpfc_dmabuf allocation failed\n"); 4519 goto out_free_pmb; 4520 } 4521 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4522 if (!mp->virt) { 4523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4524 "2899 The mbuf allocation failed\n"); 4525 goto out_free_dmabuf; 4526 } 4527 4528 /* Cleanup any outstanding ELS commands */ 4529 lpfc_els_flush_all_cmd(phba); 4530 4531 /* Block ELS IOCBs until we have done process link event */ 4532 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4533 4534 /* Update link event statistics */ 4535 phba->sli.slistat.link_event++; 4536 4537 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4538 lpfc_read_topology(phba, pmb, mp); 4539 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4540 pmb->vport = phba->pport; 4541 4542 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4543 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 4544 4545 switch (phba->sli4_hba.link_state.status) { 4546 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 4547 phba->link_flag |= LS_MDS_LINK_DOWN; 4548 break; 4549 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 4550 phba->link_flag |= LS_MDS_LOOPBACK; 4551 break; 4552 default: 4553 break; 4554 } 4555 4556 /* Parse and translate status field */ 4557 mb = &pmb->u.mb; 4558 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4559 (void *)acqe_fc); 4560 4561 /* Parse and translate link attention fields */ 4562 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4563 la->eventTag = acqe_fc->event_tag; 4564 4565 if (phba->sli4_hba.link_state.status == 4566 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 4567 bf_set(lpfc_mbx_read_top_att_type, la, 4568 LPFC_FC_LA_TYPE_UNEXP_WWPN); 4569 } else { 4570 bf_set(lpfc_mbx_read_top_att_type, la, 4571 LPFC_FC_LA_TYPE_LINK_DOWN); 4572 } 4573 /* Invoke the mailbox command callback function */ 4574 lpfc_mbx_cmpl_read_topology(phba, pmb); 4575 4576 return; 4577 } 4578 4579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4580 if (rc == MBX_NOT_FINISHED) 4581 goto out_free_dmabuf; 4582 return; 4583 4584 out_free_dmabuf: 4585 kfree(mp); 4586 out_free_pmb: 4587 mempool_free(pmb, phba->mbox_mem_pool); 4588 } 4589 4590 /** 4591 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4592 * @phba: pointer to lpfc hba data structure. 4593 * @acqe_fc: pointer to the async SLI completion queue entry. 4594 * 4595 * This routine is to handle the SLI4 asynchronous SLI events. 4596 **/ 4597 static void 4598 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4599 { 4600 char port_name; 4601 char message[128]; 4602 uint8_t status; 4603 uint8_t evt_type; 4604 uint8_t operational = 0; 4605 struct temp_event temp_event_data; 4606 struct lpfc_acqe_misconfigured_event *misconfigured; 4607 struct Scsi_Host *shost; 4608 4609 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4610 4611 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4612 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4613 "x%08x SLI Event Type:%d\n", 4614 acqe_sli->event_data1, acqe_sli->event_data2, 4615 evt_type); 4616 4617 port_name = phba->Port[0]; 4618 if (port_name == 0x00) 4619 port_name = '?'; /* get port name is empty */ 4620 4621 switch (evt_type) { 4622 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4623 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4624 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4625 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4626 4627 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4628 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4629 acqe_sli->event_data1, port_name); 4630 4631 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 4632 shost = lpfc_shost_from_vport(phba->pport); 4633 fc_host_post_vendor_event(shost, fc_get_event_number(), 4634 sizeof(temp_event_data), 4635 (char *)&temp_event_data, 4636 SCSI_NL_VID_TYPE_PCI 4637 | PCI_VENDOR_ID_EMULEX); 4638 break; 4639 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4640 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4641 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4642 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4643 4644 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4645 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4646 acqe_sli->event_data1, port_name); 4647 4648 shost = lpfc_shost_from_vport(phba->pport); 4649 fc_host_post_vendor_event(shost, fc_get_event_number(), 4650 sizeof(temp_event_data), 4651 (char *)&temp_event_data, 4652 SCSI_NL_VID_TYPE_PCI 4653 | PCI_VENDOR_ID_EMULEX); 4654 break; 4655 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4656 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4657 &acqe_sli->event_data1; 4658 4659 /* fetch the status for this port */ 4660 switch (phba->sli4_hba.lnk_info.lnk_no) { 4661 case LPFC_LINK_NUMBER_0: 4662 status = bf_get(lpfc_sli_misconfigured_port0_state, 4663 &misconfigured->theEvent); 4664 operational = bf_get(lpfc_sli_misconfigured_port0_op, 4665 &misconfigured->theEvent); 4666 break; 4667 case LPFC_LINK_NUMBER_1: 4668 status = bf_get(lpfc_sli_misconfigured_port1_state, 4669 &misconfigured->theEvent); 4670 operational = bf_get(lpfc_sli_misconfigured_port1_op, 4671 &misconfigured->theEvent); 4672 break; 4673 case LPFC_LINK_NUMBER_2: 4674 status = bf_get(lpfc_sli_misconfigured_port2_state, 4675 &misconfigured->theEvent); 4676 operational = bf_get(lpfc_sli_misconfigured_port2_op, 4677 &misconfigured->theEvent); 4678 break; 4679 case LPFC_LINK_NUMBER_3: 4680 status = bf_get(lpfc_sli_misconfigured_port3_state, 4681 &misconfigured->theEvent); 4682 operational = bf_get(lpfc_sli_misconfigured_port3_op, 4683 &misconfigured->theEvent); 4684 break; 4685 default: 4686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4687 "3296 " 4688 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 4689 "event: Invalid link %d", 4690 phba->sli4_hba.lnk_info.lnk_no); 4691 return; 4692 } 4693 4694 /* Skip if optic state unchanged */ 4695 if (phba->sli4_hba.lnk_info.optic_state == status) 4696 return; 4697 4698 switch (status) { 4699 case LPFC_SLI_EVENT_STATUS_VALID: 4700 sprintf(message, "Physical Link is functional"); 4701 break; 4702 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4703 sprintf(message, "Optics faulted/incorrectly " 4704 "installed/not installed - Reseat optics, " 4705 "if issue not resolved, replace."); 4706 break; 4707 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4708 sprintf(message, 4709 "Optics of two types installed - Remove one " 4710 "optic or install matching pair of optics."); 4711 break; 4712 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4713 sprintf(message, "Incompatible optics - Replace with " 4714 "compatible optics for card to function."); 4715 break; 4716 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 4717 sprintf(message, "Unqualified optics - Replace with " 4718 "Avago optics for Warranty and Technical " 4719 "Support - Link is%s operational", 4720 (operational) ? " not" : ""); 4721 break; 4722 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 4723 sprintf(message, "Uncertified optics - Replace with " 4724 "Avago-certified optics to enable link " 4725 "operation - Link is%s operational", 4726 (operational) ? " not" : ""); 4727 break; 4728 default: 4729 /* firmware is reporting a status we don't know about */ 4730 sprintf(message, "Unknown event status x%02x", status); 4731 break; 4732 } 4733 phba->sli4_hba.lnk_info.optic_state = status; 4734 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4735 "3176 Port Name %c %s\n", port_name, message); 4736 break; 4737 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4738 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4739 "3192 Remote DPort Test Initiated - " 4740 "Event Data1:x%08x Event Data2: x%08x\n", 4741 acqe_sli->event_data1, acqe_sli->event_data2); 4742 break; 4743 default: 4744 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4745 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4746 "x%08x SLI Event Type:%d\n", 4747 acqe_sli->event_data1, acqe_sli->event_data2, 4748 evt_type); 4749 break; 4750 } 4751 } 4752 4753 /** 4754 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4755 * @vport: pointer to vport data structure. 4756 * 4757 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4758 * response to a CVL event. 4759 * 4760 * Return the pointer to the ndlp with the vport if successful, otherwise 4761 * return NULL. 4762 **/ 4763 static struct lpfc_nodelist * 4764 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4765 { 4766 struct lpfc_nodelist *ndlp; 4767 struct Scsi_Host *shost; 4768 struct lpfc_hba *phba; 4769 4770 if (!vport) 4771 return NULL; 4772 phba = vport->phba; 4773 if (!phba) 4774 return NULL; 4775 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4776 if (!ndlp) { 4777 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4778 ndlp = lpfc_nlp_init(vport, Fabric_DID); 4779 if (!ndlp) 4780 return 0; 4781 /* Set the node type */ 4782 ndlp->nlp_type |= NLP_FABRIC; 4783 /* Put ndlp onto node list */ 4784 lpfc_enqueue_node(vport, ndlp); 4785 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4786 /* re-setup ndlp without removing from node list */ 4787 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4788 if (!ndlp) 4789 return 0; 4790 } 4791 if ((phba->pport->port_state < LPFC_FLOGI) && 4792 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4793 return NULL; 4794 /* If virtual link is not yet instantiated ignore CVL */ 4795 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4796 && (vport->port_state != LPFC_VPORT_FAILED)) 4797 return NULL; 4798 shost = lpfc_shost_from_vport(vport); 4799 if (!shost) 4800 return NULL; 4801 lpfc_linkdown_port(vport); 4802 lpfc_cleanup_pending_mbox(vport); 4803 spin_lock_irq(shost->host_lock); 4804 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4805 spin_unlock_irq(shost->host_lock); 4806 4807 return ndlp; 4808 } 4809 4810 /** 4811 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4812 * @vport: pointer to lpfc hba data structure. 4813 * 4814 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4815 * response to a FCF dead event. 4816 **/ 4817 static void 4818 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4819 { 4820 struct lpfc_vport **vports; 4821 int i; 4822 4823 vports = lpfc_create_vport_work_array(phba); 4824 if (vports) 4825 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4826 lpfc_sli4_perform_vport_cvl(vports[i]); 4827 lpfc_destroy_vport_work_array(phba, vports); 4828 } 4829 4830 /** 4831 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4832 * @phba: pointer to lpfc hba data structure. 4833 * @acqe_link: pointer to the async fcoe completion queue entry. 4834 * 4835 * This routine is to handle the SLI4 asynchronous fcoe event. 4836 **/ 4837 static void 4838 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4839 struct lpfc_acqe_fip *acqe_fip) 4840 { 4841 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4842 int rc; 4843 struct lpfc_vport *vport; 4844 struct lpfc_nodelist *ndlp; 4845 struct Scsi_Host *shost; 4846 int active_vlink_present; 4847 struct lpfc_vport **vports; 4848 int i; 4849 4850 phba->fc_eventTag = acqe_fip->event_tag; 4851 phba->fcoe_eventtag = acqe_fip->event_tag; 4852 switch (event_type) { 4853 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4854 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4855 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4856 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4857 LOG_DISCOVERY, 4858 "2546 New FCF event, evt_tag:x%x, " 4859 "index:x%x\n", 4860 acqe_fip->event_tag, 4861 acqe_fip->index); 4862 else 4863 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4864 LOG_DISCOVERY, 4865 "2788 FCF param modified event, " 4866 "evt_tag:x%x, index:x%x\n", 4867 acqe_fip->event_tag, 4868 acqe_fip->index); 4869 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4870 /* 4871 * During period of FCF discovery, read the FCF 4872 * table record indexed by the event to update 4873 * FCF roundrobin failover eligible FCF bmask. 4874 */ 4875 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4876 LOG_DISCOVERY, 4877 "2779 Read FCF (x%x) for updating " 4878 "roundrobin FCF failover bmask\n", 4879 acqe_fip->index); 4880 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4881 } 4882 4883 /* If the FCF discovery is in progress, do nothing. */ 4884 spin_lock_irq(&phba->hbalock); 4885 if (phba->hba_flag & FCF_TS_INPROG) { 4886 spin_unlock_irq(&phba->hbalock); 4887 break; 4888 } 4889 /* If fast FCF failover rescan event is pending, do nothing */ 4890 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4891 spin_unlock_irq(&phba->hbalock); 4892 break; 4893 } 4894 4895 /* If the FCF has been in discovered state, do nothing. */ 4896 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4897 spin_unlock_irq(&phba->hbalock); 4898 break; 4899 } 4900 spin_unlock_irq(&phba->hbalock); 4901 4902 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4903 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4904 "2770 Start FCF table scan per async FCF " 4905 "event, evt_tag:x%x, index:x%x\n", 4906 acqe_fip->event_tag, acqe_fip->index); 4907 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4908 LPFC_FCOE_FCF_GET_FIRST); 4909 if (rc) 4910 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4911 "2547 Issue FCF scan read FCF mailbox " 4912 "command failed (x%x)\n", rc); 4913 break; 4914 4915 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4917 "2548 FCF Table full count 0x%x tag 0x%x\n", 4918 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4919 acqe_fip->event_tag); 4920 break; 4921 4922 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4923 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4924 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4925 "2549 FCF (x%x) disconnected from network, " 4926 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4927 /* 4928 * If we are in the middle of FCF failover process, clear 4929 * the corresponding FCF bit in the roundrobin bitmap. 4930 */ 4931 spin_lock_irq(&phba->hbalock); 4932 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 4933 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 4934 spin_unlock_irq(&phba->hbalock); 4935 /* Update FLOGI FCF failover eligible FCF bmask */ 4936 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4937 break; 4938 } 4939 spin_unlock_irq(&phba->hbalock); 4940 4941 /* If the event is not for currently used fcf do nothing */ 4942 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4943 break; 4944 4945 /* 4946 * Otherwise, request the port to rediscover the entire FCF 4947 * table for a fast recovery from case that the current FCF 4948 * is no longer valid as we are not in the middle of FCF 4949 * failover process already. 4950 */ 4951 spin_lock_irq(&phba->hbalock); 4952 /* Mark the fast failover process in progress */ 4953 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4954 spin_unlock_irq(&phba->hbalock); 4955 4956 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4957 "2771 Start FCF fast failover process due to " 4958 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4959 "\n", acqe_fip->event_tag, acqe_fip->index); 4960 rc = lpfc_sli4_redisc_fcf_table(phba); 4961 if (rc) { 4962 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4963 LOG_DISCOVERY, 4964 "2772 Issue FCF rediscover mabilbox " 4965 "command failed, fail through to FCF " 4966 "dead event\n"); 4967 spin_lock_irq(&phba->hbalock); 4968 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4969 spin_unlock_irq(&phba->hbalock); 4970 /* 4971 * Last resort will fail over by treating this 4972 * as a link down to FCF registration. 4973 */ 4974 lpfc_sli4_fcf_dead_failthrough(phba); 4975 } else { 4976 /* Reset FCF roundrobin bmask for new discovery */ 4977 lpfc_sli4_clear_fcf_rr_bmask(phba); 4978 /* 4979 * Handling fast FCF failover to a DEAD FCF event is 4980 * considered equalivant to receiving CVL to all vports. 4981 */ 4982 lpfc_sli4_perform_all_vport_cvl(phba); 4983 } 4984 break; 4985 case LPFC_FIP_EVENT_TYPE_CVL: 4986 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4987 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4988 "2718 Clear Virtual Link Received for VPI 0x%x" 4989 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4990 4991 vport = lpfc_find_vport_by_vpid(phba, 4992 acqe_fip->index); 4993 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4994 if (!ndlp) 4995 break; 4996 active_vlink_present = 0; 4997 4998 vports = lpfc_create_vport_work_array(phba); 4999 if (vports) { 5000 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5001 i++) { 5002 if ((!(vports[i]->fc_flag & 5003 FC_VPORT_CVL_RCVD)) && 5004 (vports[i]->port_state > LPFC_FDISC)) { 5005 active_vlink_present = 1; 5006 break; 5007 } 5008 } 5009 lpfc_destroy_vport_work_array(phba, vports); 5010 } 5011 5012 /* 5013 * Don't re-instantiate if vport is marked for deletion. 5014 * If we are here first then vport_delete is going to wait 5015 * for discovery to complete. 5016 */ 5017 if (!(vport->load_flag & FC_UNLOADING) && 5018 active_vlink_present) { 5019 /* 5020 * If there are other active VLinks present, 5021 * re-instantiate the Vlink using FDISC. 5022 */ 5023 mod_timer(&ndlp->nlp_delayfunc, 5024 jiffies + msecs_to_jiffies(1000)); 5025 shost = lpfc_shost_from_vport(vport); 5026 spin_lock_irq(shost->host_lock); 5027 ndlp->nlp_flag |= NLP_DELAY_TMO; 5028 spin_unlock_irq(shost->host_lock); 5029 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5030 vport->port_state = LPFC_FDISC; 5031 } else { 5032 /* 5033 * Otherwise, we request port to rediscover 5034 * the entire FCF table for a fast recovery 5035 * from possible case that the current FCF 5036 * is no longer valid if we are not already 5037 * in the FCF failover process. 5038 */ 5039 spin_lock_irq(&phba->hbalock); 5040 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5041 spin_unlock_irq(&phba->hbalock); 5042 break; 5043 } 5044 /* Mark the fast failover process in progress */ 5045 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5046 spin_unlock_irq(&phba->hbalock); 5047 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5048 LOG_DISCOVERY, 5049 "2773 Start FCF failover per CVL, " 5050 "evt_tag:x%x\n", acqe_fip->event_tag); 5051 rc = lpfc_sli4_redisc_fcf_table(phba); 5052 if (rc) { 5053 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5054 LOG_DISCOVERY, 5055 "2774 Issue FCF rediscover " 5056 "mabilbox command failed, " 5057 "through to CVL event\n"); 5058 spin_lock_irq(&phba->hbalock); 5059 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5060 spin_unlock_irq(&phba->hbalock); 5061 /* 5062 * Last resort will be re-try on the 5063 * the current registered FCF entry. 5064 */ 5065 lpfc_retry_pport_discovery(phba); 5066 } else 5067 /* 5068 * Reset FCF roundrobin bmask for new 5069 * discovery. 5070 */ 5071 lpfc_sli4_clear_fcf_rr_bmask(phba); 5072 } 5073 break; 5074 default: 5075 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5076 "0288 Unknown FCoE event type 0x%x event tag " 5077 "0x%x\n", event_type, acqe_fip->event_tag); 5078 break; 5079 } 5080 } 5081 5082 /** 5083 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5084 * @phba: pointer to lpfc hba data structure. 5085 * @acqe_link: pointer to the async dcbx completion queue entry. 5086 * 5087 * This routine is to handle the SLI4 asynchronous dcbx event. 5088 **/ 5089 static void 5090 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5091 struct lpfc_acqe_dcbx *acqe_dcbx) 5092 { 5093 phba->fc_eventTag = acqe_dcbx->event_tag; 5094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5095 "0290 The SLI4 DCBX asynchronous event is not " 5096 "handled yet\n"); 5097 } 5098 5099 /** 5100 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5101 * @phba: pointer to lpfc hba data structure. 5102 * @acqe_link: pointer to the async grp5 completion queue entry. 5103 * 5104 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5105 * is an asynchronous notified of a logical link speed change. The Port 5106 * reports the logical link speed in units of 10Mbps. 5107 **/ 5108 static void 5109 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5110 struct lpfc_acqe_grp5 *acqe_grp5) 5111 { 5112 uint16_t prev_ll_spd; 5113 5114 phba->fc_eventTag = acqe_grp5->event_tag; 5115 phba->fcoe_eventtag = acqe_grp5->event_tag; 5116 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5117 phba->sli4_hba.link_state.logical_speed = 5118 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5119 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5120 "2789 GRP5 Async Event: Updating logical link speed " 5121 "from %dMbps to %dMbps\n", prev_ll_spd, 5122 phba->sli4_hba.link_state.logical_speed); 5123 } 5124 5125 /** 5126 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5127 * @phba: pointer to lpfc hba data structure. 5128 * 5129 * This routine is invoked by the worker thread to process all the pending 5130 * SLI4 asynchronous events. 5131 **/ 5132 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5133 { 5134 struct lpfc_cq_event *cq_event; 5135 5136 /* First, declare the async event has been handled */ 5137 spin_lock_irq(&phba->hbalock); 5138 phba->hba_flag &= ~ASYNC_EVENT; 5139 spin_unlock_irq(&phba->hbalock); 5140 /* Now, handle all the async events */ 5141 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5142 /* Get the first event from the head of the event queue */ 5143 spin_lock_irq(&phba->hbalock); 5144 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5145 cq_event, struct lpfc_cq_event, list); 5146 spin_unlock_irq(&phba->hbalock); 5147 /* Process the asynchronous event */ 5148 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5149 case LPFC_TRAILER_CODE_LINK: 5150 lpfc_sli4_async_link_evt(phba, 5151 &cq_event->cqe.acqe_link); 5152 break; 5153 case LPFC_TRAILER_CODE_FCOE: 5154 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5155 break; 5156 case LPFC_TRAILER_CODE_DCBX: 5157 lpfc_sli4_async_dcbx_evt(phba, 5158 &cq_event->cqe.acqe_dcbx); 5159 break; 5160 case LPFC_TRAILER_CODE_GRP5: 5161 lpfc_sli4_async_grp5_evt(phba, 5162 &cq_event->cqe.acqe_grp5); 5163 break; 5164 case LPFC_TRAILER_CODE_FC: 5165 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5166 break; 5167 case LPFC_TRAILER_CODE_SLI: 5168 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5169 break; 5170 default: 5171 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5172 "1804 Invalid asynchrous event code: " 5173 "x%x\n", bf_get(lpfc_trailer_code, 5174 &cq_event->cqe.mcqe_cmpl)); 5175 break; 5176 } 5177 /* Free the completion event processed to the free pool */ 5178 lpfc_sli4_cq_event_release(phba, cq_event); 5179 } 5180 } 5181 5182 /** 5183 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5184 * @phba: pointer to lpfc hba data structure. 5185 * 5186 * This routine is invoked by the worker thread to process FCF table 5187 * rediscovery pending completion event. 5188 **/ 5189 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5190 { 5191 int rc; 5192 5193 spin_lock_irq(&phba->hbalock); 5194 /* Clear FCF rediscovery timeout event */ 5195 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5196 /* Clear driver fast failover FCF record flag */ 5197 phba->fcf.failover_rec.flag = 0; 5198 /* Set state for FCF fast failover */ 5199 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5200 spin_unlock_irq(&phba->hbalock); 5201 5202 /* Scan FCF table from the first entry to re-discover SAN */ 5203 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5204 "2777 Start post-quiescent FCF table scan\n"); 5205 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5206 if (rc) 5207 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5208 "2747 Issue FCF scan read FCF mailbox " 5209 "command failed 0x%x\n", rc); 5210 } 5211 5212 /** 5213 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5214 * @phba: pointer to lpfc hba data structure. 5215 * @dev_grp: The HBA PCI-Device group number. 5216 * 5217 * This routine is invoked to set up the per HBA PCI-Device group function 5218 * API jump table entries. 5219 * 5220 * Return: 0 if success, otherwise -ENODEV 5221 **/ 5222 int 5223 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5224 { 5225 int rc; 5226 5227 /* Set up lpfc PCI-device group */ 5228 phba->pci_dev_grp = dev_grp; 5229 5230 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5231 if (dev_grp == LPFC_PCI_DEV_OC) 5232 phba->sli_rev = LPFC_SLI_REV4; 5233 5234 /* Set up device INIT API function jump table */ 5235 rc = lpfc_init_api_table_setup(phba, dev_grp); 5236 if (rc) 5237 return -ENODEV; 5238 /* Set up SCSI API function jump table */ 5239 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5240 if (rc) 5241 return -ENODEV; 5242 /* Set up SLI API function jump table */ 5243 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5244 if (rc) 5245 return -ENODEV; 5246 /* Set up MBOX API function jump table */ 5247 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5248 if (rc) 5249 return -ENODEV; 5250 5251 return 0; 5252 } 5253 5254 /** 5255 * lpfc_log_intr_mode - Log the active interrupt mode 5256 * @phba: pointer to lpfc hba data structure. 5257 * @intr_mode: active interrupt mode adopted. 5258 * 5259 * This routine it invoked to log the currently used active interrupt mode 5260 * to the device. 5261 **/ 5262 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5263 { 5264 switch (intr_mode) { 5265 case 0: 5266 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5267 "0470 Enable INTx interrupt mode.\n"); 5268 break; 5269 case 1: 5270 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5271 "0481 Enabled MSI interrupt mode.\n"); 5272 break; 5273 case 2: 5274 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5275 "0480 Enabled MSI-X interrupt mode.\n"); 5276 break; 5277 default: 5278 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5279 "0482 Illegal interrupt mode.\n"); 5280 break; 5281 } 5282 return; 5283 } 5284 5285 /** 5286 * lpfc_enable_pci_dev - Enable a generic PCI device. 5287 * @phba: pointer to lpfc hba data structure. 5288 * 5289 * This routine is invoked to enable the PCI device that is common to all 5290 * PCI devices. 5291 * 5292 * Return codes 5293 * 0 - successful 5294 * other values - error 5295 **/ 5296 static int 5297 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5298 { 5299 struct pci_dev *pdev; 5300 5301 /* Obtain PCI device reference */ 5302 if (!phba->pcidev) 5303 goto out_error; 5304 else 5305 pdev = phba->pcidev; 5306 /* Enable PCI device */ 5307 if (pci_enable_device_mem(pdev)) 5308 goto out_error; 5309 /* Request PCI resource for the device */ 5310 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 5311 goto out_disable_device; 5312 /* Set up device as PCI master and save state for EEH */ 5313 pci_set_master(pdev); 5314 pci_try_set_mwi(pdev); 5315 pci_save_state(pdev); 5316 5317 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 5318 if (pci_is_pcie(pdev)) 5319 pdev->needs_freset = 1; 5320 5321 return 0; 5322 5323 out_disable_device: 5324 pci_disable_device(pdev); 5325 out_error: 5326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5327 "1401 Failed to enable pci device\n"); 5328 return -ENODEV; 5329 } 5330 5331 /** 5332 * lpfc_disable_pci_dev - Disable a generic PCI device. 5333 * @phba: pointer to lpfc hba data structure. 5334 * 5335 * This routine is invoked to disable the PCI device that is common to all 5336 * PCI devices. 5337 **/ 5338 static void 5339 lpfc_disable_pci_dev(struct lpfc_hba *phba) 5340 { 5341 struct pci_dev *pdev; 5342 5343 /* Obtain PCI device reference */ 5344 if (!phba->pcidev) 5345 return; 5346 else 5347 pdev = phba->pcidev; 5348 /* Release PCI resource and disable PCI device */ 5349 pci_release_mem_regions(pdev); 5350 pci_disable_device(pdev); 5351 5352 return; 5353 } 5354 5355 /** 5356 * lpfc_reset_hba - Reset a hba 5357 * @phba: pointer to lpfc hba data structure. 5358 * 5359 * This routine is invoked to reset a hba device. It brings the HBA 5360 * offline, performs a board restart, and then brings the board back 5361 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 5362 * on outstanding mailbox commands. 5363 **/ 5364 void 5365 lpfc_reset_hba(struct lpfc_hba *phba) 5366 { 5367 /* If resets are disabled then set error state and return. */ 5368 if (!phba->cfg_enable_hba_reset) { 5369 phba->link_state = LPFC_HBA_ERROR; 5370 return; 5371 } 5372 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 5373 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5374 else 5375 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 5376 lpfc_offline(phba); 5377 lpfc_sli_brdrestart(phba); 5378 lpfc_online(phba); 5379 lpfc_unblock_mgmt_io(phba); 5380 } 5381 5382 /** 5383 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 5384 * @phba: pointer to lpfc hba data structure. 5385 * 5386 * This function enables the PCI SR-IOV virtual functions to a physical 5387 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 5388 * enable the number of virtual functions to the physical function. As 5389 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 5390 * API call does not considered as an error condition for most of the device. 5391 **/ 5392 uint16_t 5393 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 5394 { 5395 struct pci_dev *pdev = phba->pcidev; 5396 uint16_t nr_virtfn; 5397 int pos; 5398 5399 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 5400 if (pos == 0) 5401 return 0; 5402 5403 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 5404 return nr_virtfn; 5405 } 5406 5407 /** 5408 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 5409 * @phba: pointer to lpfc hba data structure. 5410 * @nr_vfn: number of virtual functions to be enabled. 5411 * 5412 * This function enables the PCI SR-IOV virtual functions to a physical 5413 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 5414 * enable the number of virtual functions to the physical function. As 5415 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 5416 * API call does not considered as an error condition for most of the device. 5417 **/ 5418 int 5419 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 5420 { 5421 struct pci_dev *pdev = phba->pcidev; 5422 uint16_t max_nr_vfn; 5423 int rc; 5424 5425 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 5426 if (nr_vfn > max_nr_vfn) { 5427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5428 "3057 Requested vfs (%d) greater than " 5429 "supported vfs (%d)", nr_vfn, max_nr_vfn); 5430 return -EINVAL; 5431 } 5432 5433 rc = pci_enable_sriov(pdev, nr_vfn); 5434 if (rc) { 5435 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5436 "2806 Failed to enable sriov on this device " 5437 "with vfn number nr_vf:%d, rc:%d\n", 5438 nr_vfn, rc); 5439 } else 5440 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5441 "2807 Successful enable sriov on this device " 5442 "with vfn number nr_vf:%d\n", nr_vfn); 5443 return rc; 5444 } 5445 5446 /** 5447 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5448 * @phba: pointer to lpfc hba data structure. 5449 * 5450 * This routine is invoked to set up the driver internal resources before the 5451 * device specific resource setup to support the HBA device it attached to. 5452 * 5453 * Return codes 5454 * 0 - successful 5455 * other values - error 5456 **/ 5457 static int 5458 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5459 { 5460 struct lpfc_sli *psli = &phba->sli; 5461 5462 /* 5463 * Driver resources common to all SLI revisions 5464 */ 5465 atomic_set(&phba->fast_event_count, 0); 5466 spin_lock_init(&phba->hbalock); 5467 5468 /* Initialize ndlp management spinlock */ 5469 spin_lock_init(&phba->ndlp_lock); 5470 5471 INIT_LIST_HEAD(&phba->port_list); 5472 INIT_LIST_HEAD(&phba->work_list); 5473 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5474 5475 /* Initialize the wait queue head for the kernel thread */ 5476 init_waitqueue_head(&phba->work_waitq); 5477 5478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5479 "1403 Protocols supported %s %s %s\n", 5480 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 5481 "SCSI" : " "), 5482 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 5483 "NVME" : " "), 5484 (phba->nvmet_support ? "NVMET" : " ")); 5485 5486 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 5487 /* Initialize the scsi buffer list used by driver for scsi IO */ 5488 spin_lock_init(&phba->scsi_buf_list_get_lock); 5489 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5490 spin_lock_init(&phba->scsi_buf_list_put_lock); 5491 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5492 } 5493 5494 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 5495 (phba->nvmet_support == 0)) { 5496 /* Initialize the NVME buffer list used by driver for NVME IO */ 5497 spin_lock_init(&phba->nvme_buf_list_get_lock); 5498 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); 5499 spin_lock_init(&phba->nvme_buf_list_put_lock); 5500 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 5501 } 5502 5503 /* Initialize the fabric iocb list */ 5504 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5505 5506 /* Initialize list to save ELS buffers */ 5507 INIT_LIST_HEAD(&phba->elsbuf); 5508 5509 /* Initialize FCF connection rec list */ 5510 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5511 5512 /* Initialize OAS configuration list */ 5513 spin_lock_init(&phba->devicelock); 5514 INIT_LIST_HEAD(&phba->luns); 5515 5516 /* MBOX heartbeat timer */ 5517 setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba); 5518 /* Fabric block timer */ 5519 setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 5520 (unsigned long)phba); 5521 /* EA polling mode timer */ 5522 setup_timer(&phba->eratt_poll, lpfc_poll_eratt, 5523 (unsigned long)phba); 5524 /* Heartbeat timer */ 5525 setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba); 5526 5527 return 0; 5528 } 5529 5530 /** 5531 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 5532 * @phba: pointer to lpfc hba data structure. 5533 * 5534 * This routine is invoked to set up the driver internal resources specific to 5535 * support the SLI-3 HBA device it attached to. 5536 * 5537 * Return codes 5538 * 0 - successful 5539 * other values - error 5540 **/ 5541 static int 5542 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 5543 { 5544 int rc; 5545 5546 /* 5547 * Initialize timers used by driver 5548 */ 5549 5550 /* FCP polling mode timer */ 5551 setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout, 5552 (unsigned long)phba); 5553 5554 /* Host attention work mask setup */ 5555 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5556 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 5557 5558 /* Get all the module params for configuring this host */ 5559 lpfc_get_cfgparam(phba); 5560 /* Set up phase-1 common device driver resources */ 5561 5562 rc = lpfc_setup_driver_resource_phase1(phba); 5563 if (rc) 5564 return -ENODEV; 5565 5566 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 5567 phba->menlo_flag |= HBA_MENLO_SUPPORT; 5568 /* check for menlo minimum sg count */ 5569 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 5570 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 5571 } 5572 5573 if (!phba->sli.sli3_ring) 5574 phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING * 5575 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5576 if (!phba->sli.sli3_ring) 5577 return -ENOMEM; 5578 5579 /* 5580 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5581 * used to create the sg_dma_buf_pool must be dynamically calculated. 5582 */ 5583 5584 /* Initialize the host templates the configured values. */ 5585 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5586 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5587 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5588 5589 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5590 if (phba->cfg_enable_bg) { 5591 /* 5592 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5593 * the FCP rsp, and a BDE for each. Sice we have no control 5594 * over how many protection data segments the SCSI Layer 5595 * will hand us (ie: there could be one for every block 5596 * in the IO), we just allocate enough BDEs to accomidate 5597 * our max amount and we need to limit lpfc_sg_seg_cnt to 5598 * minimize the risk of running out. 5599 */ 5600 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5601 sizeof(struct fcp_rsp) + 5602 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5603 5604 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5605 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5606 5607 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5608 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5609 } else { 5610 /* 5611 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5612 * the FCP rsp, a BDE for each, and a BDE for up to 5613 * cfg_sg_seg_cnt data segments. 5614 */ 5615 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5616 sizeof(struct fcp_rsp) + 5617 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5618 5619 /* Total BDEs in BPL for scsi_sg_list */ 5620 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5621 } 5622 5623 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5624 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5625 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5626 phba->cfg_total_seg_cnt); 5627 5628 phba->max_vpi = LPFC_MAX_VPI; 5629 /* This will be set to correct value after config_port mbox */ 5630 phba->max_vports = 0; 5631 5632 /* 5633 * Initialize the SLI Layer to run with lpfc HBAs. 5634 */ 5635 lpfc_sli_setup(phba); 5636 lpfc_sli_queue_init(phba); 5637 5638 /* Allocate device driver memory */ 5639 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5640 return -ENOMEM; 5641 5642 /* 5643 * Enable sr-iov virtual functions if supported and configured 5644 * through the module parameter. 5645 */ 5646 if (phba->cfg_sriov_nr_virtfn > 0) { 5647 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5648 phba->cfg_sriov_nr_virtfn); 5649 if (rc) { 5650 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5651 "2808 Requested number of SR-IOV " 5652 "virtual functions (%d) is not " 5653 "supported\n", 5654 phba->cfg_sriov_nr_virtfn); 5655 phba->cfg_sriov_nr_virtfn = 0; 5656 } 5657 } 5658 5659 return 0; 5660 } 5661 5662 /** 5663 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5664 * @phba: pointer to lpfc hba data structure. 5665 * 5666 * This routine is invoked to unset the driver internal resources set up 5667 * specific for supporting the SLI-3 HBA device it attached to. 5668 **/ 5669 static void 5670 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5671 { 5672 /* Free device driver memory allocated */ 5673 lpfc_mem_free_all(phba); 5674 5675 return; 5676 } 5677 5678 /** 5679 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5680 * @phba: pointer to lpfc hba data structure. 5681 * 5682 * This routine is invoked to set up the driver internal resources specific to 5683 * support the SLI-4 HBA device it attached to. 5684 * 5685 * Return codes 5686 * 0 - successful 5687 * other values - error 5688 **/ 5689 static int 5690 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5691 { 5692 LPFC_MBOXQ_t *mboxq; 5693 MAILBOX_t *mb; 5694 int rc, i, max_buf_size; 5695 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5696 struct lpfc_mqe *mqe; 5697 int longs; 5698 int fof_vectors = 0; 5699 uint64_t wwn; 5700 5701 phba->sli4_hba.num_online_cpu = num_online_cpus(); 5702 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 5703 phba->sli4_hba.curr_disp_cpu = 0; 5704 5705 /* Get all the module params for configuring this host */ 5706 lpfc_get_cfgparam(phba); 5707 5708 /* Set up phase-1 common device driver resources */ 5709 rc = lpfc_setup_driver_resource_phase1(phba); 5710 if (rc) 5711 return -ENODEV; 5712 5713 /* Before proceed, wait for POST done and device ready */ 5714 rc = lpfc_sli4_post_status_check(phba); 5715 if (rc) 5716 return -ENODEV; 5717 5718 /* 5719 * Initialize timers used by driver 5720 */ 5721 5722 setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba); 5723 5724 /* FCF rediscover timer */ 5725 setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 5726 (unsigned long)phba); 5727 5728 /* 5729 * Control structure for handling external multi-buffer mailbox 5730 * command pass-through. 5731 */ 5732 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5733 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5734 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5735 5736 phba->max_vpi = LPFC_MAX_VPI; 5737 5738 /* This will be set to correct value after the read_config mbox */ 5739 phba->max_vports = 0; 5740 5741 /* Program the default value of vlan_id and fc_map */ 5742 phba->valid_vlan = 0; 5743 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5744 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5745 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5746 5747 /* 5748 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5749 * we will associate a new ring, for each EQ/CQ/WQ tuple. 5750 * The WQ create will allocate the ring. 5751 */ 5752 5753 /* 5754 * It doesn't matter what family our adapter is in, we are 5755 * limited to 2 Pages, 512 SGEs, for our SGL. 5756 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5757 */ 5758 max_buf_size = (2 * SLI4_PAGE_SIZE); 5759 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5760 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5761 5762 /* 5763 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 5764 * used to create the sg_dma_buf_pool must be calculated. 5765 */ 5766 if (phba->cfg_enable_bg) { 5767 /* 5768 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 5769 * the FCP rsp, and a SGE. Sice we have no control 5770 * over how many protection segments the SCSI Layer 5771 * will hand us (ie: there could be one for every block 5772 * in the IO), just allocate enough SGEs to accomidate 5773 * our max amount and we need to limit lpfc_sg_seg_cnt 5774 * to minimize the risk of running out. 5775 */ 5776 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5777 sizeof(struct fcp_rsp) + max_buf_size; 5778 5779 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5780 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5781 5782 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5783 phba->cfg_sg_seg_cnt = 5784 LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5785 } else { 5786 /* 5787 * The scsi_buf for a regular I/O holds the FCP cmnd, 5788 * the FCP rsp, a SGE for each, and a SGE for up to 5789 * cfg_sg_seg_cnt data segments. 5790 */ 5791 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5792 sizeof(struct fcp_rsp) + 5793 ((phba->cfg_sg_seg_cnt + 2) * 5794 sizeof(struct sli4_sge)); 5795 5796 /* Total SGEs for scsi_sg_list */ 5797 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5798 5799 /* 5800 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only 5801 * need to post 1 page for the SGL. 5802 */ 5803 } 5804 5805 /* Initialize the host templates with the updated values. */ 5806 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5807 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5808 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5809 5810 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5811 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5812 else 5813 phba->cfg_sg_dma_buf_size = 5814 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5815 5816 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5817 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5818 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5819 phba->cfg_total_seg_cnt); 5820 5821 /* Initialize buffer queue management fields */ 5822 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 5823 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5824 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5825 5826 /* 5827 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5828 */ 5829 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 5830 /* Initialize the Abort scsi buffer list used by driver */ 5831 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5832 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5833 } 5834 5835 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 5836 /* Initialize the Abort nvme buffer list used by driver */ 5837 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5838 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5839 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 5840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list); 5841 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 5842 5843 /* Fast-path XRI aborted CQ Event work queue list */ 5844 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); 5845 } 5846 5847 /* This abort list used by worker thread */ 5848 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 5849 spin_lock_init(&phba->sli4_hba.nvmet_io_lock); 5850 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 5851 5852 /* 5853 * Initialize driver internal slow-path work queues 5854 */ 5855 5856 /* Driver internel slow-path CQ Event pool */ 5857 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5858 /* Response IOCB work queue list */ 5859 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5860 /* Asynchronous event CQ Event work queue list */ 5861 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5862 /* Fast-path XRI aborted CQ Event work queue list */ 5863 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5864 /* Slow-path XRI aborted CQ Event work queue list */ 5865 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5866 /* Receive queue CQ Event work queue list */ 5867 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5868 5869 /* Initialize extent block lists. */ 5870 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5871 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5872 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5873 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5874 5875 /* Initialize mboxq lists. If the early init routines fail 5876 * these lists need to be correctly initialized. 5877 */ 5878 INIT_LIST_HEAD(&phba->sli.mboxq); 5879 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 5880 5881 /* initialize optic_state to 0xFF */ 5882 phba->sli4_hba.lnk_info.optic_state = 0xff; 5883 5884 /* Allocate device driver memory */ 5885 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5886 if (rc) 5887 return -ENOMEM; 5888 5889 /* IF Type 2 ports get initialized now. */ 5890 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5891 LPFC_SLI_INTF_IF_TYPE_2) { 5892 rc = lpfc_pci_function_reset(phba); 5893 if (unlikely(rc)) { 5894 rc = -ENODEV; 5895 goto out_free_mem; 5896 } 5897 phba->temp_sensor_support = 1; 5898 } 5899 5900 /* Create the bootstrap mailbox command */ 5901 rc = lpfc_create_bootstrap_mbox(phba); 5902 if (unlikely(rc)) 5903 goto out_free_mem; 5904 5905 /* Set up the host's endian order with the device. */ 5906 rc = lpfc_setup_endian_order(phba); 5907 if (unlikely(rc)) 5908 goto out_free_bsmbx; 5909 5910 /* Set up the hba's configuration parameters. */ 5911 rc = lpfc_sli4_read_config(phba); 5912 if (unlikely(rc)) 5913 goto out_free_bsmbx; 5914 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5915 if (unlikely(rc)) 5916 goto out_free_bsmbx; 5917 5918 /* IF Type 0 ports get initialized now. */ 5919 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5920 LPFC_SLI_INTF_IF_TYPE_0) { 5921 rc = lpfc_pci_function_reset(phba); 5922 if (unlikely(rc)) 5923 goto out_free_bsmbx; 5924 } 5925 5926 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5927 GFP_KERNEL); 5928 if (!mboxq) { 5929 rc = -ENOMEM; 5930 goto out_free_bsmbx; 5931 } 5932 5933 /* Check for NVMET being configured */ 5934 phba->nvmet_support = 0; 5935 if (lpfc_enable_nvmet_cnt) { 5936 5937 /* First get WWN of HBA instance */ 5938 lpfc_read_nv(phba, mboxq); 5939 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5940 if (rc != MBX_SUCCESS) { 5941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5942 "6016 Mailbox failed , mbxCmd x%x " 5943 "READ_NV, mbxStatus x%x\n", 5944 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5945 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 5946 mempool_free(mboxq, phba->mbox_mem_pool); 5947 rc = -EIO; 5948 goto out_free_bsmbx; 5949 } 5950 mb = &mboxq->u.mb; 5951 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 5952 sizeof(uint64_t)); 5953 wwn = cpu_to_be64(wwn); 5954 phba->sli4_hba.wwnn.u.name = wwn; 5955 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 5956 sizeof(uint64_t)); 5957 /* wwn is WWPN of HBA instance */ 5958 wwn = cpu_to_be64(wwn); 5959 phba->sli4_hba.wwpn.u.name = wwn; 5960 5961 /* Check to see if it matches any module parameter */ 5962 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5963 if (wwn == lpfc_enable_nvmet[i]) { 5964 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 5965 if (lpfc_nvmet_mem_alloc(phba)) 5966 break; 5967 5968 phba->nvmet_support = 1; /* a match */ 5969 5970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5971 "6017 NVME Target %016llx\n", 5972 wwn); 5973 #else 5974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5975 "6021 Can't enable NVME Target." 5976 " NVME_TARGET_FC infrastructure" 5977 " is not in kernel\n"); 5978 #endif 5979 break; 5980 } 5981 } 5982 } 5983 5984 lpfc_nvme_mod_param_dep(phba); 5985 5986 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5987 lpfc_supported_pages(mboxq); 5988 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5989 if (!rc) { 5990 mqe = &mboxq->u.mqe; 5991 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5992 LPFC_MAX_SUPPORTED_PAGES); 5993 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5994 switch (pn_page[i]) { 5995 case LPFC_SLI4_PARAMETERS: 5996 phba->sli4_hba.pc_sli4_params.supported = 1; 5997 break; 5998 default: 5999 break; 6000 } 6001 } 6002 /* Read the port's SLI4 Parameters capabilities if supported. */ 6003 if (phba->sli4_hba.pc_sli4_params.supported) 6004 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6005 if (rc) { 6006 mempool_free(mboxq, phba->mbox_mem_pool); 6007 rc = -EIO; 6008 goto out_free_bsmbx; 6009 } 6010 } 6011 6012 /* 6013 * Get sli4 parameters that override parameters from Port capabilities. 6014 * If this call fails, it isn't critical unless the SLI4 parameters come 6015 * back in conflict. 6016 */ 6017 rc = lpfc_get_sli4_parameters(phba, mboxq); 6018 if (rc) { 6019 if (phba->sli4_hba.extents_in_use && 6020 phba->sli4_hba.rpi_hdrs_in_use) { 6021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6022 "2999 Unsupported SLI4 Parameters " 6023 "Extents and RPI headers enabled.\n"); 6024 } 6025 mempool_free(mboxq, phba->mbox_mem_pool); 6026 goto out_free_bsmbx; 6027 } 6028 6029 mempool_free(mboxq, phba->mbox_mem_pool); 6030 6031 /* Verify OAS is supported */ 6032 lpfc_sli4_oas_verify(phba); 6033 if (phba->cfg_fof) 6034 fof_vectors = 1; 6035 6036 /* Verify all the SLI4 queues */ 6037 rc = lpfc_sli4_queue_verify(phba); 6038 if (rc) 6039 goto out_free_bsmbx; 6040 6041 /* Create driver internal CQE event pool */ 6042 rc = lpfc_sli4_cq_event_pool_create(phba); 6043 if (rc) 6044 goto out_free_bsmbx; 6045 6046 /* Initialize sgl lists per host */ 6047 lpfc_init_sgl_list(phba); 6048 6049 /* Allocate and initialize active sgl array */ 6050 rc = lpfc_init_active_sgl_array(phba); 6051 if (rc) { 6052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6053 "1430 Failed to initialize sgl list.\n"); 6054 goto out_destroy_cq_event_pool; 6055 } 6056 rc = lpfc_sli4_init_rpi_hdrs(phba); 6057 if (rc) { 6058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6059 "1432 Failed to initialize rpi headers.\n"); 6060 goto out_free_active_sgl; 6061 } 6062 6063 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6064 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6065 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 6066 GFP_KERNEL); 6067 if (!phba->fcf.fcf_rr_bmask) { 6068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6069 "2759 Failed allocate memory for FCF round " 6070 "robin failover bmask\n"); 6071 rc = -ENOMEM; 6072 goto out_remove_rpi_hdrs; 6073 } 6074 6075 phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, 6076 sizeof(struct lpfc_hba_eq_hdl), 6077 GFP_KERNEL); 6078 if (!phba->sli4_hba.hba_eq_hdl) { 6079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6080 "2572 Failed allocate memory for " 6081 "fast-path per-EQ handle array\n"); 6082 rc = -ENOMEM; 6083 goto out_free_fcf_rr_bmask; 6084 } 6085 6086 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, 6087 sizeof(struct lpfc_vector_map_info), 6088 GFP_KERNEL); 6089 if (!phba->sli4_hba.cpu_map) { 6090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6091 "3327 Failed allocate memory for msi-x " 6092 "interrupt vector mapping\n"); 6093 rc = -ENOMEM; 6094 goto out_free_hba_eq_hdl; 6095 } 6096 if (lpfc_used_cpu == NULL) { 6097 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), 6098 GFP_KERNEL); 6099 if (!lpfc_used_cpu) { 6100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6101 "3335 Failed allocate memory for msi-x " 6102 "interrupt vector mapping\n"); 6103 kfree(phba->sli4_hba.cpu_map); 6104 rc = -ENOMEM; 6105 goto out_free_hba_eq_hdl; 6106 } 6107 for (i = 0; i < lpfc_present_cpu; i++) 6108 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 6109 } 6110 6111 /* 6112 * Enable sr-iov virtual functions if supported and configured 6113 * through the module parameter. 6114 */ 6115 if (phba->cfg_sriov_nr_virtfn > 0) { 6116 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6117 phba->cfg_sriov_nr_virtfn); 6118 if (rc) { 6119 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6120 "3020 Requested number of SR-IOV " 6121 "virtual functions (%d) is not " 6122 "supported\n", 6123 phba->cfg_sriov_nr_virtfn); 6124 phba->cfg_sriov_nr_virtfn = 0; 6125 } 6126 } 6127 6128 return 0; 6129 6130 out_free_hba_eq_hdl: 6131 kfree(phba->sli4_hba.hba_eq_hdl); 6132 out_free_fcf_rr_bmask: 6133 kfree(phba->fcf.fcf_rr_bmask); 6134 out_remove_rpi_hdrs: 6135 lpfc_sli4_remove_rpi_hdrs(phba); 6136 out_free_active_sgl: 6137 lpfc_free_active_sgl(phba); 6138 out_destroy_cq_event_pool: 6139 lpfc_sli4_cq_event_pool_destroy(phba); 6140 out_free_bsmbx: 6141 lpfc_destroy_bootstrap_mbox(phba); 6142 out_free_mem: 6143 lpfc_mem_free(phba); 6144 return rc; 6145 } 6146 6147 /** 6148 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6149 * @phba: pointer to lpfc hba data structure. 6150 * 6151 * This routine is invoked to unset the driver internal resources set up 6152 * specific for supporting the SLI-4 HBA device it attached to. 6153 **/ 6154 static void 6155 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6156 { 6157 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6158 6159 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6160 kfree(phba->sli4_hba.cpu_map); 6161 phba->sli4_hba.num_present_cpu = 0; 6162 phba->sli4_hba.num_online_cpu = 0; 6163 phba->sli4_hba.curr_disp_cpu = 0; 6164 6165 /* Free memory allocated for fast-path work queue handles */ 6166 kfree(phba->sli4_hba.hba_eq_hdl); 6167 6168 /* Free the allocated rpi headers. */ 6169 lpfc_sli4_remove_rpi_hdrs(phba); 6170 lpfc_sli4_remove_rpis(phba); 6171 6172 /* Free eligible FCF index bmask */ 6173 kfree(phba->fcf.fcf_rr_bmask); 6174 6175 /* Free the ELS sgl list */ 6176 lpfc_free_active_sgl(phba); 6177 lpfc_free_els_sgl_list(phba); 6178 lpfc_free_nvmet_sgl_list(phba); 6179 6180 /* Free the completion queue EQ event pool */ 6181 lpfc_sli4_cq_event_release_all(phba); 6182 lpfc_sli4_cq_event_pool_destroy(phba); 6183 6184 /* Release resource identifiers. */ 6185 lpfc_sli4_dealloc_resource_identifiers(phba); 6186 6187 /* Free the bsmbx region. */ 6188 lpfc_destroy_bootstrap_mbox(phba); 6189 6190 /* Free the SLI Layer memory with SLI4 HBAs */ 6191 lpfc_mem_free_all(phba); 6192 6193 /* Free the current connect table */ 6194 list_for_each_entry_safe(conn_entry, next_conn_entry, 6195 &phba->fcf_conn_rec_list, list) { 6196 list_del_init(&conn_entry->list); 6197 kfree(conn_entry); 6198 } 6199 6200 return; 6201 } 6202 6203 /** 6204 * lpfc_init_api_table_setup - Set up init api function jump table 6205 * @phba: The hba struct for which this call is being executed. 6206 * @dev_grp: The HBA PCI-Device group number. 6207 * 6208 * This routine sets up the device INIT interface API function jump table 6209 * in @phba struct. 6210 * 6211 * Returns: 0 - success, -ENODEV - failure. 6212 **/ 6213 int 6214 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6215 { 6216 phba->lpfc_hba_init_link = lpfc_hba_init_link; 6217 phba->lpfc_hba_down_link = lpfc_hba_down_link; 6218 phba->lpfc_selective_reset = lpfc_selective_reset; 6219 switch (dev_grp) { 6220 case LPFC_PCI_DEV_LP: 6221 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 6222 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 6223 phba->lpfc_stop_port = lpfc_stop_port_s3; 6224 break; 6225 case LPFC_PCI_DEV_OC: 6226 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 6227 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 6228 phba->lpfc_stop_port = lpfc_stop_port_s4; 6229 break; 6230 default: 6231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6232 "1431 Invalid HBA PCI-device group: 0x%x\n", 6233 dev_grp); 6234 return -ENODEV; 6235 break; 6236 } 6237 return 0; 6238 } 6239 6240 /** 6241 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 6242 * @phba: pointer to lpfc hba data structure. 6243 * 6244 * This routine is invoked to set up the driver internal resources after the 6245 * device specific resource setup to support the HBA device it attached to. 6246 * 6247 * Return codes 6248 * 0 - successful 6249 * other values - error 6250 **/ 6251 static int 6252 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 6253 { 6254 int error; 6255 6256 /* Startup the kernel thread for this host adapter. */ 6257 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6258 "lpfc_worker_%d", phba->brd_no); 6259 if (IS_ERR(phba->worker_thread)) { 6260 error = PTR_ERR(phba->worker_thread); 6261 return error; 6262 } 6263 6264 return 0; 6265 } 6266 6267 /** 6268 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 6269 * @phba: pointer to lpfc hba data structure. 6270 * 6271 * This routine is invoked to unset the driver internal resources set up after 6272 * the device specific resource setup for supporting the HBA device it 6273 * attached to. 6274 **/ 6275 static void 6276 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 6277 { 6278 /* Stop kernel worker thread */ 6279 kthread_stop(phba->worker_thread); 6280 } 6281 6282 /** 6283 * lpfc_free_iocb_list - Free iocb list. 6284 * @phba: pointer to lpfc hba data structure. 6285 * 6286 * This routine is invoked to free the driver's IOCB list and memory. 6287 **/ 6288 void 6289 lpfc_free_iocb_list(struct lpfc_hba *phba) 6290 { 6291 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 6292 6293 spin_lock_irq(&phba->hbalock); 6294 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6295 &phba->lpfc_iocb_list, list) { 6296 list_del(&iocbq_entry->list); 6297 kfree(iocbq_entry); 6298 phba->total_iocbq_bufs--; 6299 } 6300 spin_unlock_irq(&phba->hbalock); 6301 6302 return; 6303 } 6304 6305 /** 6306 * lpfc_init_iocb_list - Allocate and initialize iocb list. 6307 * @phba: pointer to lpfc hba data structure. 6308 * 6309 * This routine is invoked to allocate and initizlize the driver's IOCB 6310 * list and set up the IOCB tag array accordingly. 6311 * 6312 * Return codes 6313 * 0 - successful 6314 * other values - error 6315 **/ 6316 int 6317 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 6318 { 6319 struct lpfc_iocbq *iocbq_entry = NULL; 6320 uint16_t iotag; 6321 int i; 6322 6323 /* Initialize and populate the iocb list per host. */ 6324 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6325 for (i = 0; i < iocb_count; i++) { 6326 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 6327 if (iocbq_entry == NULL) { 6328 printk(KERN_ERR "%s: only allocated %d iocbs of " 6329 "expected %d count. Unloading driver.\n", 6330 __func__, i, LPFC_IOCB_LIST_CNT); 6331 goto out_free_iocbq; 6332 } 6333 6334 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6335 if (iotag == 0) { 6336 kfree(iocbq_entry); 6337 printk(KERN_ERR "%s: failed to allocate IOTAG. " 6338 "Unloading driver.\n", __func__); 6339 goto out_free_iocbq; 6340 } 6341 iocbq_entry->sli4_lxritag = NO_XRI; 6342 iocbq_entry->sli4_xritag = NO_XRI; 6343 6344 spin_lock_irq(&phba->hbalock); 6345 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6346 phba->total_iocbq_bufs++; 6347 spin_unlock_irq(&phba->hbalock); 6348 } 6349 6350 return 0; 6351 6352 out_free_iocbq: 6353 lpfc_free_iocb_list(phba); 6354 6355 return -ENOMEM; 6356 } 6357 6358 /** 6359 * lpfc_free_sgl_list - Free a given sgl list. 6360 * @phba: pointer to lpfc hba data structure. 6361 * @sglq_list: pointer to the head of sgl list. 6362 * 6363 * This routine is invoked to free a give sgl list and memory. 6364 **/ 6365 void 6366 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 6367 { 6368 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6369 6370 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 6371 list_del(&sglq_entry->list); 6372 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 6373 kfree(sglq_entry); 6374 } 6375 } 6376 6377 /** 6378 * lpfc_free_els_sgl_list - Free els sgl list. 6379 * @phba: pointer to lpfc hba data structure. 6380 * 6381 * This routine is invoked to free the driver's els sgl list and memory. 6382 **/ 6383 static void 6384 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 6385 { 6386 LIST_HEAD(sglq_list); 6387 6388 /* Retrieve all els sgls from driver list */ 6389 spin_lock_irq(&phba->hbalock); 6390 spin_lock(&phba->sli4_hba.sgl_list_lock); 6391 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 6392 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6393 spin_unlock_irq(&phba->hbalock); 6394 6395 /* Now free the sgl list */ 6396 lpfc_free_sgl_list(phba, &sglq_list); 6397 } 6398 6399 /** 6400 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 6401 * @phba: pointer to lpfc hba data structure. 6402 * 6403 * This routine is invoked to free the driver's nvmet sgl list and memory. 6404 **/ 6405 static void 6406 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 6407 { 6408 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6409 LIST_HEAD(sglq_list); 6410 6411 /* Retrieve all nvmet sgls from driver list */ 6412 spin_lock_irq(&phba->hbalock); 6413 spin_lock(&phba->sli4_hba.sgl_list_lock); 6414 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 6415 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6416 spin_unlock_irq(&phba->hbalock); 6417 6418 /* Now free the sgl list */ 6419 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 6420 list_del(&sglq_entry->list); 6421 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 6422 kfree(sglq_entry); 6423 } 6424 } 6425 6426 /** 6427 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 6428 * @phba: pointer to lpfc hba data structure. 6429 * 6430 * This routine is invoked to allocate the driver's active sgl memory. 6431 * This array will hold the sglq_entry's for active IOs. 6432 **/ 6433 static int 6434 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 6435 { 6436 int size; 6437 size = sizeof(struct lpfc_sglq *); 6438 size *= phba->sli4_hba.max_cfg_param.max_xri; 6439 6440 phba->sli4_hba.lpfc_sglq_active_list = 6441 kzalloc(size, GFP_KERNEL); 6442 if (!phba->sli4_hba.lpfc_sglq_active_list) 6443 return -ENOMEM; 6444 return 0; 6445 } 6446 6447 /** 6448 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 6449 * @phba: pointer to lpfc hba data structure. 6450 * 6451 * This routine is invoked to walk through the array of active sglq entries 6452 * and free all of the resources. 6453 * This is just a place holder for now. 6454 **/ 6455 static void 6456 lpfc_free_active_sgl(struct lpfc_hba *phba) 6457 { 6458 kfree(phba->sli4_hba.lpfc_sglq_active_list); 6459 } 6460 6461 /** 6462 * lpfc_init_sgl_list - Allocate and initialize sgl list. 6463 * @phba: pointer to lpfc hba data structure. 6464 * 6465 * This routine is invoked to allocate and initizlize the driver's sgl 6466 * list and set up the sgl xritag tag array accordingly. 6467 * 6468 **/ 6469 static void 6470 lpfc_init_sgl_list(struct lpfc_hba *phba) 6471 { 6472 /* Initialize and populate the sglq list per host/VF. */ 6473 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 6474 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 6475 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 6476 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6477 6478 /* els xri-sgl book keeping */ 6479 phba->sli4_hba.els_xri_cnt = 0; 6480 6481 /* scsi xri-buffer book keeping */ 6482 phba->sli4_hba.scsi_xri_cnt = 0; 6483 6484 /* nvme xri-buffer book keeping */ 6485 phba->sli4_hba.nvme_xri_cnt = 0; 6486 } 6487 6488 /** 6489 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 6490 * @phba: pointer to lpfc hba data structure. 6491 * 6492 * This routine is invoked to post rpi header templates to the 6493 * port for those SLI4 ports that do not support extents. This routine 6494 * posts a PAGE_SIZE memory region to the port to hold up to 6495 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 6496 * and should be called only when interrupts are disabled. 6497 * 6498 * Return codes 6499 * 0 - successful 6500 * -ERROR - otherwise. 6501 **/ 6502 int 6503 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 6504 { 6505 int rc = 0; 6506 struct lpfc_rpi_hdr *rpi_hdr; 6507 6508 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 6509 if (!phba->sli4_hba.rpi_hdrs_in_use) 6510 return rc; 6511 if (phba->sli4_hba.extents_in_use) 6512 return -EIO; 6513 6514 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 6515 if (!rpi_hdr) { 6516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6517 "0391 Error during rpi post operation\n"); 6518 lpfc_sli4_remove_rpis(phba); 6519 rc = -ENODEV; 6520 } 6521 6522 return rc; 6523 } 6524 6525 /** 6526 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 6527 * @phba: pointer to lpfc hba data structure. 6528 * 6529 * This routine is invoked to allocate a single 4KB memory region to 6530 * support rpis and stores them in the phba. This single region 6531 * provides support for up to 64 rpis. The region is used globally 6532 * by the device. 6533 * 6534 * Returns: 6535 * A valid rpi hdr on success. 6536 * A NULL pointer on any failure. 6537 **/ 6538 struct lpfc_rpi_hdr * 6539 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 6540 { 6541 uint16_t rpi_limit, curr_rpi_range; 6542 struct lpfc_dmabuf *dmabuf; 6543 struct lpfc_rpi_hdr *rpi_hdr; 6544 6545 /* 6546 * If the SLI4 port supports extents, posting the rpi header isn't 6547 * required. Set the expected maximum count and let the actual value 6548 * get set when extents are fully allocated. 6549 */ 6550 if (!phba->sli4_hba.rpi_hdrs_in_use) 6551 return NULL; 6552 if (phba->sli4_hba.extents_in_use) 6553 return NULL; 6554 6555 /* The limit on the logical index is just the max_rpi count. */ 6556 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 6557 6558 spin_lock_irq(&phba->hbalock); 6559 /* 6560 * Establish the starting RPI in this header block. The starting 6561 * rpi is normalized to a zero base because the physical rpi is 6562 * port based. 6563 */ 6564 curr_rpi_range = phba->sli4_hba.next_rpi; 6565 spin_unlock_irq(&phba->hbalock); 6566 6567 /* Reached full RPI range */ 6568 if (curr_rpi_range == rpi_limit) 6569 return NULL; 6570 6571 /* 6572 * First allocate the protocol header region for the port. The 6573 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6574 */ 6575 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6576 if (!dmabuf) 6577 return NULL; 6578 6579 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6580 LPFC_HDR_TEMPLATE_SIZE, 6581 &dmabuf->phys, GFP_KERNEL); 6582 if (!dmabuf->virt) { 6583 rpi_hdr = NULL; 6584 goto err_free_dmabuf; 6585 } 6586 6587 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 6588 rpi_hdr = NULL; 6589 goto err_free_coherent; 6590 } 6591 6592 /* Save the rpi header data for cleanup later. */ 6593 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 6594 if (!rpi_hdr) 6595 goto err_free_coherent; 6596 6597 rpi_hdr->dmabuf = dmabuf; 6598 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6599 rpi_hdr->page_count = 1; 6600 spin_lock_irq(&phba->hbalock); 6601 6602 /* The rpi_hdr stores the logical index only. */ 6603 rpi_hdr->start_rpi = curr_rpi_range; 6604 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 6605 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6606 6607 spin_unlock_irq(&phba->hbalock); 6608 return rpi_hdr; 6609 6610 err_free_coherent: 6611 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6612 dmabuf->virt, dmabuf->phys); 6613 err_free_dmabuf: 6614 kfree(dmabuf); 6615 return NULL; 6616 } 6617 6618 /** 6619 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6620 * @phba: pointer to lpfc hba data structure. 6621 * 6622 * This routine is invoked to remove all memory resources allocated 6623 * to support rpis for SLI4 ports not supporting extents. This routine 6624 * presumes the caller has released all rpis consumed by fabric or port 6625 * logins and is prepared to have the header pages removed. 6626 **/ 6627 void 6628 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6629 { 6630 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6631 6632 if (!phba->sli4_hba.rpi_hdrs_in_use) 6633 goto exit; 6634 6635 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6636 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6637 list_del(&rpi_hdr->list); 6638 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6639 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6640 kfree(rpi_hdr->dmabuf); 6641 kfree(rpi_hdr); 6642 } 6643 exit: 6644 /* There are no rpis available to the port now. */ 6645 phba->sli4_hba.next_rpi = 0; 6646 } 6647 6648 /** 6649 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6650 * @pdev: pointer to pci device data structure. 6651 * 6652 * This routine is invoked to allocate the driver hba data structure for an 6653 * HBA device. If the allocation is successful, the phba reference to the 6654 * PCI device data structure is set. 6655 * 6656 * Return codes 6657 * pointer to @phba - successful 6658 * NULL - error 6659 **/ 6660 static struct lpfc_hba * 6661 lpfc_hba_alloc(struct pci_dev *pdev) 6662 { 6663 struct lpfc_hba *phba; 6664 6665 /* Allocate memory for HBA structure */ 6666 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6667 if (!phba) { 6668 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6669 return NULL; 6670 } 6671 6672 /* Set reference to PCI device in HBA structure */ 6673 phba->pcidev = pdev; 6674 6675 /* Assign an unused board number */ 6676 phba->brd_no = lpfc_get_instance(); 6677 if (phba->brd_no < 0) { 6678 kfree(phba); 6679 return NULL; 6680 } 6681 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 6682 6683 spin_lock_init(&phba->ct_ev_lock); 6684 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6685 6686 return phba; 6687 } 6688 6689 /** 6690 * lpfc_hba_free - Free driver hba data structure with a device. 6691 * @phba: pointer to lpfc hba data structure. 6692 * 6693 * This routine is invoked to free the driver hba data structure with an 6694 * HBA device. 6695 **/ 6696 static void 6697 lpfc_hba_free(struct lpfc_hba *phba) 6698 { 6699 /* Release the driver assigned board number */ 6700 idr_remove(&lpfc_hba_index, phba->brd_no); 6701 6702 /* Free memory allocated with sli3 rings */ 6703 kfree(phba->sli.sli3_ring); 6704 phba->sli.sli3_ring = NULL; 6705 6706 kfree(phba); 6707 return; 6708 } 6709 6710 /** 6711 * lpfc_create_shost - Create hba physical port with associated scsi host. 6712 * @phba: pointer to lpfc hba data structure. 6713 * 6714 * This routine is invoked to create HBA physical port and associate a SCSI 6715 * host with it. 6716 * 6717 * Return codes 6718 * 0 - successful 6719 * other values - error 6720 **/ 6721 static int 6722 lpfc_create_shost(struct lpfc_hba *phba) 6723 { 6724 struct lpfc_vport *vport; 6725 struct Scsi_Host *shost; 6726 6727 /* Initialize HBA FC structure */ 6728 phba->fc_edtov = FF_DEF_EDTOV; 6729 phba->fc_ratov = FF_DEF_RATOV; 6730 phba->fc_altov = FF_DEF_ALTOV; 6731 phba->fc_arbtov = FF_DEF_ARBTOV; 6732 6733 atomic_set(&phba->sdev_cnt, 0); 6734 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6735 if (!vport) 6736 return -ENODEV; 6737 6738 shost = lpfc_shost_from_vport(vport); 6739 phba->pport = vport; 6740 6741 if (phba->nvmet_support) { 6742 /* Only 1 vport (pport) will support NVME target */ 6743 if (phba->txrdy_payload_pool == NULL) { 6744 phba->txrdy_payload_pool = pci_pool_create( 6745 "txrdy_pool", phba->pcidev, 6746 TXRDY_PAYLOAD_LEN, 16, 0); 6747 if (phba->txrdy_payload_pool) { 6748 phba->targetport = NULL; 6749 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 6750 lpfc_printf_log(phba, KERN_INFO, 6751 LOG_INIT | LOG_NVME_DISC, 6752 "6076 NVME Target Found\n"); 6753 } 6754 } 6755 } 6756 6757 lpfc_debugfs_initialize(vport); 6758 /* Put reference to SCSI host to driver's device private data */ 6759 pci_set_drvdata(phba->pcidev, shost); 6760 6761 /* 6762 * At this point we are fully registered with PSA. In addition, 6763 * any initial discovery should be completed. 6764 */ 6765 vport->load_flag |= FC_ALLOW_FDMI; 6766 if (phba->cfg_enable_SmartSAN || 6767 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6768 6769 /* Setup appropriate attribute masks */ 6770 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6771 if (phba->cfg_enable_SmartSAN) 6772 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6773 else 6774 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 6775 } 6776 return 0; 6777 } 6778 6779 /** 6780 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6781 * @phba: pointer to lpfc hba data structure. 6782 * 6783 * This routine is invoked to destroy HBA physical port and the associated 6784 * SCSI host. 6785 **/ 6786 static void 6787 lpfc_destroy_shost(struct lpfc_hba *phba) 6788 { 6789 struct lpfc_vport *vport = phba->pport; 6790 6791 /* Destroy physical port that associated with the SCSI host */ 6792 destroy_port(vport); 6793 6794 return; 6795 } 6796 6797 /** 6798 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6799 * @phba: pointer to lpfc hba data structure. 6800 * @shost: the shost to be used to detect Block guard settings. 6801 * 6802 * This routine sets up the local Block guard protocol settings for @shost. 6803 * This routine also allocates memory for debugging bg buffers. 6804 **/ 6805 static void 6806 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6807 { 6808 uint32_t old_mask; 6809 uint32_t old_guard; 6810 6811 int pagecnt = 10; 6812 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6813 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6814 "1478 Registering BlockGuard with the " 6815 "SCSI layer\n"); 6816 6817 old_mask = phba->cfg_prot_mask; 6818 old_guard = phba->cfg_prot_guard; 6819 6820 /* Only allow supported values */ 6821 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6822 SHOST_DIX_TYPE0_PROTECTION | 6823 SHOST_DIX_TYPE1_PROTECTION); 6824 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 6825 SHOST_DIX_GUARD_CRC); 6826 6827 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6828 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6829 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6830 6831 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6832 if ((old_mask != phba->cfg_prot_mask) || 6833 (old_guard != phba->cfg_prot_guard)) 6834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6835 "1475 Registering BlockGuard with the " 6836 "SCSI layer: mask %d guard %d\n", 6837 phba->cfg_prot_mask, 6838 phba->cfg_prot_guard); 6839 6840 scsi_host_set_prot(shost, phba->cfg_prot_mask); 6841 scsi_host_set_guard(shost, phba->cfg_prot_guard); 6842 } else 6843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6844 "1479 Not Registering BlockGuard with the SCSI " 6845 "layer, Bad protection parameters: %d %d\n", 6846 old_mask, old_guard); 6847 } 6848 6849 if (!_dump_buf_data) { 6850 while (pagecnt) { 6851 spin_lock_init(&_dump_buf_lock); 6852 _dump_buf_data = 6853 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6854 if (_dump_buf_data) { 6855 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6856 "9043 BLKGRD: allocated %d pages for " 6857 "_dump_buf_data at 0x%p\n", 6858 (1 << pagecnt), _dump_buf_data); 6859 _dump_buf_data_order = pagecnt; 6860 memset(_dump_buf_data, 0, 6861 ((1 << PAGE_SHIFT) << pagecnt)); 6862 break; 6863 } else 6864 --pagecnt; 6865 } 6866 if (!_dump_buf_data_order) 6867 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6868 "9044 BLKGRD: ERROR unable to allocate " 6869 "memory for hexdump\n"); 6870 } else 6871 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6872 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6873 "\n", _dump_buf_data); 6874 if (!_dump_buf_dif) { 6875 while (pagecnt) { 6876 _dump_buf_dif = 6877 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6878 if (_dump_buf_dif) { 6879 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6880 "9046 BLKGRD: allocated %d pages for " 6881 "_dump_buf_dif at 0x%p\n", 6882 (1 << pagecnt), _dump_buf_dif); 6883 _dump_buf_dif_order = pagecnt; 6884 memset(_dump_buf_dif, 0, 6885 ((1 << PAGE_SHIFT) << pagecnt)); 6886 break; 6887 } else 6888 --pagecnt; 6889 } 6890 if (!_dump_buf_dif_order) 6891 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6892 "9047 BLKGRD: ERROR unable to allocate " 6893 "memory for hexdump\n"); 6894 } else 6895 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6896 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6897 _dump_buf_dif); 6898 } 6899 6900 /** 6901 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6902 * @phba: pointer to lpfc hba data structure. 6903 * 6904 * This routine is invoked to perform all the necessary post initialization 6905 * setup for the device. 6906 **/ 6907 static void 6908 lpfc_post_init_setup(struct lpfc_hba *phba) 6909 { 6910 struct Scsi_Host *shost; 6911 struct lpfc_adapter_event_header adapter_event; 6912 6913 /* Get the default values for Model Name and Description */ 6914 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6915 6916 /* 6917 * hba setup may have changed the hba_queue_depth so we need to 6918 * adjust the value of can_queue. 6919 */ 6920 shost = pci_get_drvdata(phba->pcidev); 6921 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6922 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6923 lpfc_setup_bg(phba, shost); 6924 6925 lpfc_host_attrib_init(shost); 6926 6927 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6928 spin_lock_irq(shost->host_lock); 6929 lpfc_poll_start_timer(phba); 6930 spin_unlock_irq(shost->host_lock); 6931 } 6932 6933 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6934 "0428 Perform SCSI scan\n"); 6935 /* Send board arrival event to upper layer */ 6936 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6937 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6938 fc_host_post_vendor_event(shost, fc_get_event_number(), 6939 sizeof(adapter_event), 6940 (char *) &adapter_event, 6941 LPFC_NL_VENDOR_ID); 6942 return; 6943 } 6944 6945 /** 6946 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6947 * @phba: pointer to lpfc hba data structure. 6948 * 6949 * This routine is invoked to set up the PCI device memory space for device 6950 * with SLI-3 interface spec. 6951 * 6952 * Return codes 6953 * 0 - successful 6954 * other values - error 6955 **/ 6956 static int 6957 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6958 { 6959 struct pci_dev *pdev; 6960 unsigned long bar0map_len, bar2map_len; 6961 int i, hbq_count; 6962 void *ptr; 6963 int error = -ENODEV; 6964 6965 /* Obtain PCI device reference */ 6966 if (!phba->pcidev) 6967 return error; 6968 else 6969 pdev = phba->pcidev; 6970 6971 /* Set the device DMA mask size */ 6972 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6973 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6974 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6975 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6976 return error; 6977 } 6978 } 6979 6980 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6981 * required by each mapping. 6982 */ 6983 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6984 bar0map_len = pci_resource_len(pdev, 0); 6985 6986 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6987 bar2map_len = pci_resource_len(pdev, 2); 6988 6989 /* Map HBA SLIM to a kernel virtual address. */ 6990 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6991 if (!phba->slim_memmap_p) { 6992 dev_printk(KERN_ERR, &pdev->dev, 6993 "ioremap failed for SLIM memory.\n"); 6994 goto out; 6995 } 6996 6997 /* Map HBA Control Registers to a kernel virtual address. */ 6998 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6999 if (!phba->ctrl_regs_memmap_p) { 7000 dev_printk(KERN_ERR, &pdev->dev, 7001 "ioremap failed for HBA control registers.\n"); 7002 goto out_iounmap_slim; 7003 } 7004 7005 /* Allocate memory for SLI-2 structures */ 7006 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7007 &phba->slim2p.phys, GFP_KERNEL); 7008 if (!phba->slim2p.virt) 7009 goto out_iounmap; 7010 7011 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7012 phba->mbox_ext = (phba->slim2p.virt + 7013 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7014 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7015 phba->IOCBs = (phba->slim2p.virt + 7016 offsetof(struct lpfc_sli2_slim, IOCBs)); 7017 7018 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7019 lpfc_sli_hbq_size(), 7020 &phba->hbqslimp.phys, 7021 GFP_KERNEL); 7022 if (!phba->hbqslimp.virt) 7023 goto out_free_slim; 7024 7025 hbq_count = lpfc_sli_hbq_count(); 7026 ptr = phba->hbqslimp.virt; 7027 for (i = 0; i < hbq_count; ++i) { 7028 phba->hbqs[i].hbq_virt = ptr; 7029 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7030 ptr += (lpfc_hbq_defs[i]->entry_count * 7031 sizeof(struct lpfc_hbq_entry)); 7032 } 7033 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7034 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7035 7036 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7037 7038 phba->MBslimaddr = phba->slim_memmap_p; 7039 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7040 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7041 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7042 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7043 7044 return 0; 7045 7046 out_free_slim: 7047 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7048 phba->slim2p.virt, phba->slim2p.phys); 7049 out_iounmap: 7050 iounmap(phba->ctrl_regs_memmap_p); 7051 out_iounmap_slim: 7052 iounmap(phba->slim_memmap_p); 7053 out: 7054 return error; 7055 } 7056 7057 /** 7058 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7059 * @phba: pointer to lpfc hba data structure. 7060 * 7061 * This routine is invoked to unset the PCI device memory space for device 7062 * with SLI-3 interface spec. 7063 **/ 7064 static void 7065 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7066 { 7067 struct pci_dev *pdev; 7068 7069 /* Obtain PCI device reference */ 7070 if (!phba->pcidev) 7071 return; 7072 else 7073 pdev = phba->pcidev; 7074 7075 /* Free coherent DMA memory allocated */ 7076 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7077 phba->hbqslimp.virt, phba->hbqslimp.phys); 7078 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7079 phba->slim2p.virt, phba->slim2p.phys); 7080 7081 /* I/O memory unmap */ 7082 iounmap(phba->ctrl_regs_memmap_p); 7083 iounmap(phba->slim_memmap_p); 7084 7085 return; 7086 } 7087 7088 /** 7089 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7090 * @phba: pointer to lpfc hba data structure. 7091 * 7092 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7093 * done and check status. 7094 * 7095 * Return 0 if successful, otherwise -ENODEV. 7096 **/ 7097 int 7098 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7099 { 7100 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7101 struct lpfc_register reg_data; 7102 int i, port_error = 0; 7103 uint32_t if_type; 7104 7105 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7106 memset(®_data, 0, sizeof(reg_data)); 7107 if (!phba->sli4_hba.PSMPHRregaddr) 7108 return -ENODEV; 7109 7110 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7111 for (i = 0; i < 3000; i++) { 7112 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7113 &portsmphr_reg.word0) || 7114 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7115 /* Port has a fatal POST error, break out */ 7116 port_error = -ENODEV; 7117 break; 7118 } 7119 if (LPFC_POST_STAGE_PORT_READY == 7120 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7121 break; 7122 msleep(10); 7123 } 7124 7125 /* 7126 * If there was a port error during POST, then don't proceed with 7127 * other register reads as the data may not be valid. Just exit. 7128 */ 7129 if (port_error) { 7130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7131 "1408 Port Failed POST - portsmphr=0x%x, " 7132 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7133 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7134 portsmphr_reg.word0, 7135 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7136 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7137 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7138 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7139 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7140 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7141 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7142 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7143 } else { 7144 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7145 "2534 Device Info: SLIFamily=0x%x, " 7146 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7147 "SLIHint_2=0x%x, FT=0x%x\n", 7148 bf_get(lpfc_sli_intf_sli_family, 7149 &phba->sli4_hba.sli_intf), 7150 bf_get(lpfc_sli_intf_slirev, 7151 &phba->sli4_hba.sli_intf), 7152 bf_get(lpfc_sli_intf_if_type, 7153 &phba->sli4_hba.sli_intf), 7154 bf_get(lpfc_sli_intf_sli_hint1, 7155 &phba->sli4_hba.sli_intf), 7156 bf_get(lpfc_sli_intf_sli_hint2, 7157 &phba->sli4_hba.sli_intf), 7158 bf_get(lpfc_sli_intf_func_type, 7159 &phba->sli4_hba.sli_intf)); 7160 /* 7161 * Check for other Port errors during the initialization 7162 * process. Fail the load if the port did not come up 7163 * correctly. 7164 */ 7165 if_type = bf_get(lpfc_sli_intf_if_type, 7166 &phba->sli4_hba.sli_intf); 7167 switch (if_type) { 7168 case LPFC_SLI_INTF_IF_TYPE_0: 7169 phba->sli4_hba.ue_mask_lo = 7170 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7171 phba->sli4_hba.ue_mask_hi = 7172 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7173 uerrlo_reg.word0 = 7174 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7175 uerrhi_reg.word0 = 7176 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7177 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7178 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7180 "1422 Unrecoverable Error " 7181 "Detected during POST " 7182 "uerr_lo_reg=0x%x, " 7183 "uerr_hi_reg=0x%x, " 7184 "ue_mask_lo_reg=0x%x, " 7185 "ue_mask_hi_reg=0x%x\n", 7186 uerrlo_reg.word0, 7187 uerrhi_reg.word0, 7188 phba->sli4_hba.ue_mask_lo, 7189 phba->sli4_hba.ue_mask_hi); 7190 port_error = -ENODEV; 7191 } 7192 break; 7193 case LPFC_SLI_INTF_IF_TYPE_2: 7194 /* Final checks. The port status should be clean. */ 7195 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7196 ®_data.word0) || 7197 (bf_get(lpfc_sliport_status_err, ®_data) && 7198 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7199 phba->work_status[0] = 7200 readl(phba->sli4_hba.u.if_type2. 7201 ERR1regaddr); 7202 phba->work_status[1] = 7203 readl(phba->sli4_hba.u.if_type2. 7204 ERR2regaddr); 7205 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7206 "2888 Unrecoverable port error " 7207 "following POST: port status reg " 7208 "0x%x, port_smphr reg 0x%x, " 7209 "error 1=0x%x, error 2=0x%x\n", 7210 reg_data.word0, 7211 portsmphr_reg.word0, 7212 phba->work_status[0], 7213 phba->work_status[1]); 7214 port_error = -ENODEV; 7215 } 7216 break; 7217 case LPFC_SLI_INTF_IF_TYPE_1: 7218 default: 7219 break; 7220 } 7221 } 7222 return port_error; 7223 } 7224 7225 /** 7226 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7227 * @phba: pointer to lpfc hba data structure. 7228 * @if_type: The SLI4 interface type getting configured. 7229 * 7230 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7231 * memory map. 7232 **/ 7233 static void 7234 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7235 { 7236 switch (if_type) { 7237 case LPFC_SLI_INTF_IF_TYPE_0: 7238 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7239 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7240 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7241 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7242 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7243 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7244 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7245 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7246 phba->sli4_hba.SLIINTFregaddr = 7247 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7248 break; 7249 case LPFC_SLI_INTF_IF_TYPE_2: 7250 phba->sli4_hba.u.if_type2.ERR1regaddr = 7251 phba->sli4_hba.conf_regs_memmap_p + 7252 LPFC_CTL_PORT_ER1_OFFSET; 7253 phba->sli4_hba.u.if_type2.ERR2regaddr = 7254 phba->sli4_hba.conf_regs_memmap_p + 7255 LPFC_CTL_PORT_ER2_OFFSET; 7256 phba->sli4_hba.u.if_type2.CTRLregaddr = 7257 phba->sli4_hba.conf_regs_memmap_p + 7258 LPFC_CTL_PORT_CTL_OFFSET; 7259 phba->sli4_hba.u.if_type2.STATUSregaddr = 7260 phba->sli4_hba.conf_regs_memmap_p + 7261 LPFC_CTL_PORT_STA_OFFSET; 7262 phba->sli4_hba.SLIINTFregaddr = 7263 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7264 phba->sli4_hba.PSMPHRregaddr = 7265 phba->sli4_hba.conf_regs_memmap_p + 7266 LPFC_CTL_PORT_SEM_OFFSET; 7267 phba->sli4_hba.RQDBregaddr = 7268 phba->sli4_hba.conf_regs_memmap_p + 7269 LPFC_ULP0_RQ_DOORBELL; 7270 phba->sli4_hba.WQDBregaddr = 7271 phba->sli4_hba.conf_regs_memmap_p + 7272 LPFC_ULP0_WQ_DOORBELL; 7273 phba->sli4_hba.EQCQDBregaddr = 7274 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 7275 phba->sli4_hba.MQDBregaddr = 7276 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 7277 phba->sli4_hba.BMBXregaddr = 7278 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 7279 break; 7280 case LPFC_SLI_INTF_IF_TYPE_1: 7281 default: 7282 dev_printk(KERN_ERR, &phba->pcidev->dev, 7283 "FATAL - unsupported SLI4 interface type - %d\n", 7284 if_type); 7285 break; 7286 } 7287 } 7288 7289 /** 7290 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 7291 * @phba: pointer to lpfc hba data structure. 7292 * 7293 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 7294 * memory map. 7295 **/ 7296 static void 7297 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 7298 { 7299 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7300 LPFC_SLIPORT_IF0_SMPHR; 7301 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7302 LPFC_HST_ISR0; 7303 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7304 LPFC_HST_IMR0; 7305 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7306 LPFC_HST_ISCR0; 7307 } 7308 7309 /** 7310 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 7311 * @phba: pointer to lpfc hba data structure. 7312 * @vf: virtual function number 7313 * 7314 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 7315 * based on the given viftual function number, @vf. 7316 * 7317 * Return 0 if successful, otherwise -ENODEV. 7318 **/ 7319 static int 7320 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 7321 { 7322 if (vf > LPFC_VIR_FUNC_MAX) 7323 return -ENODEV; 7324 7325 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7326 vf * LPFC_VFR_PAGE_SIZE + 7327 LPFC_ULP0_RQ_DOORBELL); 7328 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7329 vf * LPFC_VFR_PAGE_SIZE + 7330 LPFC_ULP0_WQ_DOORBELL); 7331 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7332 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 7333 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7334 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 7335 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7336 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 7337 return 0; 7338 } 7339 7340 /** 7341 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 7342 * @phba: pointer to lpfc hba data structure. 7343 * 7344 * This routine is invoked to create the bootstrap mailbox 7345 * region consistent with the SLI-4 interface spec. This 7346 * routine allocates all memory necessary to communicate 7347 * mailbox commands to the port and sets up all alignment 7348 * needs. No locks are expected to be held when calling 7349 * this routine. 7350 * 7351 * Return codes 7352 * 0 - successful 7353 * -ENOMEM - could not allocated memory. 7354 **/ 7355 static int 7356 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 7357 { 7358 uint32_t bmbx_size; 7359 struct lpfc_dmabuf *dmabuf; 7360 struct dma_address *dma_address; 7361 uint32_t pa_addr; 7362 uint64_t phys_addr; 7363 7364 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7365 if (!dmabuf) 7366 return -ENOMEM; 7367 7368 /* 7369 * The bootstrap mailbox region is comprised of 2 parts 7370 * plus an alignment restriction of 16 bytes. 7371 */ 7372 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7373 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7374 &dmabuf->phys, GFP_KERNEL); 7375 if (!dmabuf->virt) { 7376 kfree(dmabuf); 7377 return -ENOMEM; 7378 } 7379 7380 /* 7381 * Initialize the bootstrap mailbox pointers now so that the register 7382 * operations are simple later. The mailbox dma address is required 7383 * to be 16-byte aligned. Also align the virtual memory as each 7384 * maibox is copied into the bmbx mailbox region before issuing the 7385 * command to the port. 7386 */ 7387 phba->sli4_hba.bmbx.dmabuf = dmabuf; 7388 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 7389 7390 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 7391 LPFC_ALIGN_16_BYTE); 7392 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 7393 LPFC_ALIGN_16_BYTE); 7394 7395 /* 7396 * Set the high and low physical addresses now. The SLI4 alignment 7397 * requirement is 16 bytes and the mailbox is posted to the port 7398 * as two 30-bit addresses. The other data is a bit marking whether 7399 * the 30-bit address is the high or low address. 7400 * Upcast bmbx aphys to 64bits so shift instruction compiles 7401 * clean on 32 bit machines. 7402 */ 7403 dma_address = &phba->sli4_hba.bmbx.dma_address; 7404 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 7405 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 7406 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 7407 LPFC_BMBX_BIT1_ADDR_HI); 7408 7409 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 7410 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 7411 LPFC_BMBX_BIT1_ADDR_LO); 7412 return 0; 7413 } 7414 7415 /** 7416 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 7417 * @phba: pointer to lpfc hba data structure. 7418 * 7419 * This routine is invoked to teardown the bootstrap mailbox 7420 * region and release all host resources. This routine requires 7421 * the caller to ensure all mailbox commands recovered, no 7422 * additional mailbox comands are sent, and interrupts are disabled 7423 * before calling this routine. 7424 * 7425 **/ 7426 static void 7427 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 7428 { 7429 dma_free_coherent(&phba->pcidev->dev, 7430 phba->sli4_hba.bmbx.bmbx_size, 7431 phba->sli4_hba.bmbx.dmabuf->virt, 7432 phba->sli4_hba.bmbx.dmabuf->phys); 7433 7434 kfree(phba->sli4_hba.bmbx.dmabuf); 7435 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 7436 } 7437 7438 /** 7439 * lpfc_sli4_read_config - Get the config parameters. 7440 * @phba: pointer to lpfc hba data structure. 7441 * 7442 * This routine is invoked to read the configuration parameters from the HBA. 7443 * The configuration parameters are used to set the base and maximum values 7444 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 7445 * allocation for the port. 7446 * 7447 * Return codes 7448 * 0 - successful 7449 * -ENOMEM - No available memory 7450 * -EIO - The mailbox failed to complete successfully. 7451 **/ 7452 int 7453 lpfc_sli4_read_config(struct lpfc_hba *phba) 7454 { 7455 LPFC_MBOXQ_t *pmb; 7456 struct lpfc_mbx_read_config *rd_config; 7457 union lpfc_sli4_cfg_shdr *shdr; 7458 uint32_t shdr_status, shdr_add_status; 7459 struct lpfc_mbx_get_func_cfg *get_func_cfg; 7460 struct lpfc_rsrc_desc_fcfcoe *desc; 7461 char *pdesc_0; 7462 uint16_t forced_link_speed; 7463 uint32_t if_type; 7464 int length, i, rc = 0, rc2; 7465 7466 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7467 if (!pmb) { 7468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7469 "2011 Unable to allocate memory for issuing " 7470 "SLI_CONFIG_SPECIAL mailbox command\n"); 7471 return -ENOMEM; 7472 } 7473 7474 lpfc_read_config(phba, pmb); 7475 7476 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7477 if (rc != MBX_SUCCESS) { 7478 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7479 "2012 Mailbox failed , mbxCmd x%x " 7480 "READ_CONFIG, mbxStatus x%x\n", 7481 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7482 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7483 rc = -EIO; 7484 } else { 7485 rd_config = &pmb->u.mqe.un.rd_config; 7486 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 7487 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 7488 phba->sli4_hba.lnk_info.lnk_tp = 7489 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 7490 phba->sli4_hba.lnk_info.lnk_no = 7491 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 7492 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7493 "3081 lnk_type:%d, lnk_numb:%d\n", 7494 phba->sli4_hba.lnk_info.lnk_tp, 7495 phba->sli4_hba.lnk_info.lnk_no); 7496 } else 7497 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7498 "3082 Mailbox (x%x) returned ldv:x0\n", 7499 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 7500 phba->sli4_hba.extents_in_use = 7501 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 7502 phba->sli4_hba.max_cfg_param.max_xri = 7503 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 7504 phba->sli4_hba.max_cfg_param.xri_base = 7505 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 7506 phba->sli4_hba.max_cfg_param.max_vpi = 7507 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 7508 phba->sli4_hba.max_cfg_param.vpi_base = 7509 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 7510 phba->sli4_hba.max_cfg_param.max_rpi = 7511 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 7512 phba->sli4_hba.max_cfg_param.rpi_base = 7513 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 7514 phba->sli4_hba.max_cfg_param.max_vfi = 7515 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 7516 phba->sli4_hba.max_cfg_param.vfi_base = 7517 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 7518 phba->sli4_hba.max_cfg_param.max_fcfi = 7519 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 7520 phba->sli4_hba.max_cfg_param.max_eq = 7521 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 7522 phba->sli4_hba.max_cfg_param.max_rq = 7523 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 7524 phba->sli4_hba.max_cfg_param.max_wq = 7525 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 7526 phba->sli4_hba.max_cfg_param.max_cq = 7527 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 7528 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 7529 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 7530 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 7531 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 7532 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 7533 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 7534 phba->max_vports = phba->max_vpi; 7535 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7536 "2003 cfg params Extents? %d " 7537 "XRI(B:%d M:%d), " 7538 "VPI(B:%d M:%d) " 7539 "VFI(B:%d M:%d) " 7540 "RPI(B:%d M:%d) " 7541 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 7542 phba->sli4_hba.extents_in_use, 7543 phba->sli4_hba.max_cfg_param.xri_base, 7544 phba->sli4_hba.max_cfg_param.max_xri, 7545 phba->sli4_hba.max_cfg_param.vpi_base, 7546 phba->sli4_hba.max_cfg_param.max_vpi, 7547 phba->sli4_hba.max_cfg_param.vfi_base, 7548 phba->sli4_hba.max_cfg_param.max_vfi, 7549 phba->sli4_hba.max_cfg_param.rpi_base, 7550 phba->sli4_hba.max_cfg_param.max_rpi, 7551 phba->sli4_hba.max_cfg_param.max_fcfi, 7552 phba->sli4_hba.max_cfg_param.max_eq, 7553 phba->sli4_hba.max_cfg_param.max_cq, 7554 phba->sli4_hba.max_cfg_param.max_wq, 7555 phba->sli4_hba.max_cfg_param.max_rq); 7556 7557 } 7558 7559 if (rc) 7560 goto read_cfg_out; 7561 7562 /* Update link speed if forced link speed is supported */ 7563 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7564 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7565 forced_link_speed = 7566 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 7567 if (forced_link_speed) { 7568 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 7569 7570 switch (forced_link_speed) { 7571 case LINK_SPEED_1G: 7572 phba->cfg_link_speed = 7573 LPFC_USER_LINK_SPEED_1G; 7574 break; 7575 case LINK_SPEED_2G: 7576 phba->cfg_link_speed = 7577 LPFC_USER_LINK_SPEED_2G; 7578 break; 7579 case LINK_SPEED_4G: 7580 phba->cfg_link_speed = 7581 LPFC_USER_LINK_SPEED_4G; 7582 break; 7583 case LINK_SPEED_8G: 7584 phba->cfg_link_speed = 7585 LPFC_USER_LINK_SPEED_8G; 7586 break; 7587 case LINK_SPEED_10G: 7588 phba->cfg_link_speed = 7589 LPFC_USER_LINK_SPEED_10G; 7590 break; 7591 case LINK_SPEED_16G: 7592 phba->cfg_link_speed = 7593 LPFC_USER_LINK_SPEED_16G; 7594 break; 7595 case LINK_SPEED_32G: 7596 phba->cfg_link_speed = 7597 LPFC_USER_LINK_SPEED_32G; 7598 break; 7599 case 0xffff: 7600 phba->cfg_link_speed = 7601 LPFC_USER_LINK_SPEED_AUTO; 7602 break; 7603 default: 7604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7605 "0047 Unrecognized link " 7606 "speed : %d\n", 7607 forced_link_speed); 7608 phba->cfg_link_speed = 7609 LPFC_USER_LINK_SPEED_AUTO; 7610 } 7611 } 7612 } 7613 7614 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7615 length = phba->sli4_hba.max_cfg_param.max_xri - 7616 lpfc_sli4_get_els_iocb_cnt(phba); 7617 if (phba->cfg_hba_queue_depth > length) { 7618 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7619 "3361 HBA queue depth changed from %d to %d\n", 7620 phba->cfg_hba_queue_depth, length); 7621 phba->cfg_hba_queue_depth = length; 7622 } 7623 7624 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 7625 LPFC_SLI_INTF_IF_TYPE_2) 7626 goto read_cfg_out; 7627 7628 /* get the pf# and vf# for SLI4 if_type 2 port */ 7629 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 7630 sizeof(struct lpfc_sli4_cfg_mhdr)); 7631 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 7632 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 7633 length, LPFC_SLI4_MBX_EMBED); 7634 7635 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7636 shdr = (union lpfc_sli4_cfg_shdr *) 7637 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7638 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7639 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7640 if (rc2 || shdr_status || shdr_add_status) { 7641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7642 "3026 Mailbox failed , mbxCmd x%x " 7643 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 7644 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7645 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7646 goto read_cfg_out; 7647 } 7648 7649 /* search for fc_fcoe resrouce descriptor */ 7650 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 7651 7652 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 7653 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 7654 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 7655 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 7656 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 7657 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 7658 goto read_cfg_out; 7659 7660 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 7661 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 7662 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 7663 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 7664 phba->sli4_hba.iov.pf_number = 7665 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 7666 phba->sli4_hba.iov.vf_number = 7667 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 7668 break; 7669 } 7670 } 7671 7672 if (i < LPFC_RSRC_DESC_MAX_NUM) 7673 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7674 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 7675 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 7676 phba->sli4_hba.iov.vf_number); 7677 else 7678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7679 "3028 GET_FUNCTION_CONFIG: failed to find " 7680 "Resrouce Descriptor:x%x\n", 7681 LPFC_RSRC_DESC_TYPE_FCFCOE); 7682 7683 read_cfg_out: 7684 mempool_free(pmb, phba->mbox_mem_pool); 7685 return rc; 7686 } 7687 7688 /** 7689 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7690 * @phba: pointer to lpfc hba data structure. 7691 * 7692 * This routine is invoked to setup the port-side endian order when 7693 * the port if_type is 0. This routine has no function for other 7694 * if_types. 7695 * 7696 * Return codes 7697 * 0 - successful 7698 * -ENOMEM - No available memory 7699 * -EIO - The mailbox failed to complete successfully. 7700 **/ 7701 static int 7702 lpfc_setup_endian_order(struct lpfc_hba *phba) 7703 { 7704 LPFC_MBOXQ_t *mboxq; 7705 uint32_t if_type, rc = 0; 7706 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7707 HOST_ENDIAN_HIGH_WORD1}; 7708 7709 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7710 switch (if_type) { 7711 case LPFC_SLI_INTF_IF_TYPE_0: 7712 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7713 GFP_KERNEL); 7714 if (!mboxq) { 7715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7716 "0492 Unable to allocate memory for " 7717 "issuing SLI_CONFIG_SPECIAL mailbox " 7718 "command\n"); 7719 return -ENOMEM; 7720 } 7721 7722 /* 7723 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 7724 * two words to contain special data values and no other data. 7725 */ 7726 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 7727 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 7728 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7729 if (rc != MBX_SUCCESS) { 7730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7731 "0493 SLI_CONFIG_SPECIAL mailbox " 7732 "failed with status x%x\n", 7733 rc); 7734 rc = -EIO; 7735 } 7736 mempool_free(mboxq, phba->mbox_mem_pool); 7737 break; 7738 case LPFC_SLI_INTF_IF_TYPE_2: 7739 case LPFC_SLI_INTF_IF_TYPE_1: 7740 default: 7741 break; 7742 } 7743 return rc; 7744 } 7745 7746 /** 7747 * lpfc_sli4_queue_verify - Verify and update EQ counts 7748 * @phba: pointer to lpfc hba data structure. 7749 * 7750 * This routine is invoked to check the user settable queue counts for EQs. 7751 * After this routine is called the counts will be set to valid values that 7752 * adhere to the constraints of the system's interrupt vectors and the port's 7753 * queue resources. 7754 * 7755 * Return codes 7756 * 0 - successful 7757 * -ENOMEM - No available memory 7758 **/ 7759 static int 7760 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 7761 { 7762 int io_channel; 7763 int fof_vectors = phba->cfg_fof ? 1 : 0; 7764 7765 /* 7766 * Sanity check for configured queue parameters against the run-time 7767 * device parameters 7768 */ 7769 7770 /* Sanity check on HBA EQ parameters */ 7771 io_channel = phba->io_channel_irqs; 7772 7773 if (phba->sli4_hba.num_online_cpu < io_channel) { 7774 lpfc_printf_log(phba, 7775 KERN_ERR, LOG_INIT, 7776 "3188 Reducing IO channels to match number of " 7777 "online CPUs: from %d to %d\n", 7778 io_channel, phba->sli4_hba.num_online_cpu); 7779 io_channel = phba->sli4_hba.num_online_cpu; 7780 } 7781 7782 if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { 7783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7784 "2575 Reducing IO channels to match number of " 7785 "available EQs: from %d to %d\n", 7786 io_channel, 7787 phba->sli4_hba.max_cfg_param.max_eq); 7788 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; 7789 } 7790 7791 /* The actual number of FCP / NVME event queues adopted */ 7792 if (io_channel != phba->io_channel_irqs) 7793 phba->io_channel_irqs = io_channel; 7794 if (phba->cfg_fcp_io_channel > io_channel) 7795 phba->cfg_fcp_io_channel = io_channel; 7796 if (phba->cfg_nvme_io_channel > io_channel) 7797 phba->cfg_nvme_io_channel = io_channel; 7798 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) 7799 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; 7800 7801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7802 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", 7803 phba->io_channel_irqs, phba->cfg_fcp_io_channel, 7804 phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); 7805 7806 /* Get EQ depth from module parameter, fake the default for now */ 7807 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7808 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7809 7810 /* Get CQ depth from module parameter, fake the default for now */ 7811 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7812 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7813 return 0; 7814 } 7815 7816 static int 7817 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 7818 { 7819 struct lpfc_queue *qdesc; 7820 int cnt; 7821 7822 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7823 phba->sli4_hba.cq_ecount); 7824 if (!qdesc) { 7825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7826 "0508 Failed allocate fast-path NVME CQ (%d)\n", 7827 wqidx); 7828 return 1; 7829 } 7830 phba->sli4_hba.nvme_cq[wqidx] = qdesc; 7831 7832 cnt = LPFC_NVME_WQSIZE; 7833 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt); 7834 if (!qdesc) { 7835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7836 "0509 Failed allocate fast-path NVME WQ (%d)\n", 7837 wqidx); 7838 return 1; 7839 } 7840 phba->sli4_hba.nvme_wq[wqidx] = qdesc; 7841 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 7842 return 0; 7843 } 7844 7845 static int 7846 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 7847 { 7848 struct lpfc_queue *qdesc; 7849 uint32_t wqesize; 7850 7851 /* Create Fast Path FCP CQs */ 7852 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7853 phba->sli4_hba.cq_ecount); 7854 if (!qdesc) { 7855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7856 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 7857 return 1; 7858 } 7859 phba->sli4_hba.fcp_cq[wqidx] = qdesc; 7860 7861 /* Create Fast Path FCP WQs */ 7862 wqesize = (phba->fcp_embed_io) ? 7863 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 7864 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); 7865 if (!qdesc) { 7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7867 "0503 Failed allocate fast-path FCP WQ (%d)\n", 7868 wqidx); 7869 return 1; 7870 } 7871 phba->sli4_hba.fcp_wq[wqidx] = qdesc; 7872 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 7873 return 0; 7874 } 7875 7876 /** 7877 * lpfc_sli4_queue_create - Create all the SLI4 queues 7878 * @phba: pointer to lpfc hba data structure. 7879 * 7880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 7881 * operation. For each SLI4 queue type, the parameters such as queue entry 7882 * count (queue depth) shall be taken from the module parameter. For now, 7883 * we just use some constant number as place holder. 7884 * 7885 * Return codes 7886 * 0 - successful 7887 * -ENOMEM - No availble memory 7888 * -EIO - The mailbox failed to complete successfully. 7889 **/ 7890 int 7891 lpfc_sli4_queue_create(struct lpfc_hba *phba) 7892 { 7893 struct lpfc_queue *qdesc; 7894 int idx, io_channel; 7895 7896 /* 7897 * Create HBA Record arrays. 7898 * Both NVME and FCP will share that same vectors / EQs 7899 */ 7900 io_channel = phba->io_channel_irqs; 7901 if (!io_channel) 7902 return -ERANGE; 7903 7904 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 7905 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 7906 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 7907 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 7908 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 7909 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 7910 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7911 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7912 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7913 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7914 7915 phba->sli4_hba.hba_eq = kcalloc(io_channel, 7916 sizeof(struct lpfc_queue *), 7917 GFP_KERNEL); 7918 if (!phba->sli4_hba.hba_eq) { 7919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7920 "2576 Failed allocate memory for " 7921 "fast-path EQ record array\n"); 7922 goto out_error; 7923 } 7924 7925 if (phba->cfg_fcp_io_channel) { 7926 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, 7927 sizeof(struct lpfc_queue *), 7928 GFP_KERNEL); 7929 if (!phba->sli4_hba.fcp_cq) { 7930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7931 "2577 Failed allocate memory for " 7932 "fast-path CQ record array\n"); 7933 goto out_error; 7934 } 7935 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, 7936 sizeof(struct lpfc_queue *), 7937 GFP_KERNEL); 7938 if (!phba->sli4_hba.fcp_wq) { 7939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7940 "2578 Failed allocate memory for " 7941 "fast-path FCP WQ record array\n"); 7942 goto out_error; 7943 } 7944 /* 7945 * Since the first EQ can have multiple CQs associated with it, 7946 * this array is used to quickly see if we have a FCP fast-path 7947 * CQ match. 7948 */ 7949 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, 7950 sizeof(uint16_t), 7951 GFP_KERNEL); 7952 if (!phba->sli4_hba.fcp_cq_map) { 7953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7954 "2545 Failed allocate memory for " 7955 "fast-path CQ map\n"); 7956 goto out_error; 7957 } 7958 } 7959 7960 if (phba->cfg_nvme_io_channel) { 7961 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, 7962 sizeof(struct lpfc_queue *), 7963 GFP_KERNEL); 7964 if (!phba->sli4_hba.nvme_cq) { 7965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7966 "6077 Failed allocate memory for " 7967 "fast-path CQ record array\n"); 7968 goto out_error; 7969 } 7970 7971 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, 7972 sizeof(struct lpfc_queue *), 7973 GFP_KERNEL); 7974 if (!phba->sli4_hba.nvme_wq) { 7975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7976 "2581 Failed allocate memory for " 7977 "fast-path NVME WQ record array\n"); 7978 goto out_error; 7979 } 7980 7981 /* 7982 * Since the first EQ can have multiple CQs associated with it, 7983 * this array is used to quickly see if we have a NVME fast-path 7984 * CQ match. 7985 */ 7986 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, 7987 sizeof(uint16_t), 7988 GFP_KERNEL); 7989 if (!phba->sli4_hba.nvme_cq_map) { 7990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7991 "6078 Failed allocate memory for " 7992 "fast-path CQ map\n"); 7993 goto out_error; 7994 } 7995 7996 if (phba->nvmet_support) { 7997 phba->sli4_hba.nvmet_cqset = kcalloc( 7998 phba->cfg_nvmet_mrq, 7999 sizeof(struct lpfc_queue *), 8000 GFP_KERNEL); 8001 if (!phba->sli4_hba.nvmet_cqset) { 8002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8003 "3121 Fail allocate memory for " 8004 "fast-path CQ set array\n"); 8005 goto out_error; 8006 } 8007 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8008 phba->cfg_nvmet_mrq, 8009 sizeof(struct lpfc_queue *), 8010 GFP_KERNEL); 8011 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8013 "3122 Fail allocate memory for " 8014 "fast-path RQ set hdr array\n"); 8015 goto out_error; 8016 } 8017 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8018 phba->cfg_nvmet_mrq, 8019 sizeof(struct lpfc_queue *), 8020 GFP_KERNEL); 8021 if (!phba->sli4_hba.nvmet_mrq_data) { 8022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8023 "3124 Fail allocate memory for " 8024 "fast-path RQ set data array\n"); 8025 goto out_error; 8026 } 8027 } 8028 } 8029 8030 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8031 8032 /* Create HBA Event Queues (EQs) */ 8033 for (idx = 0; idx < io_channel; idx++) { 8034 /* Create EQs */ 8035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 8036 phba->sli4_hba.eq_ecount); 8037 if (!qdesc) { 8038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8039 "0497 Failed allocate EQ (%d)\n", idx); 8040 goto out_error; 8041 } 8042 phba->sli4_hba.hba_eq[idx] = qdesc; 8043 } 8044 8045 /* FCP and NVME io channels are not required to be balanced */ 8046 8047 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8048 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8049 goto out_error; 8050 8051 for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) 8052 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8053 goto out_error; 8054 8055 if (phba->nvmet_support) { 8056 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8057 qdesc = lpfc_sli4_queue_alloc(phba, 8058 phba->sli4_hba.cq_esize, 8059 phba->sli4_hba.cq_ecount); 8060 if (!qdesc) { 8061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8062 "3142 Failed allocate NVME " 8063 "CQ Set (%d)\n", idx); 8064 goto out_error; 8065 } 8066 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8067 } 8068 } 8069 8070 /* 8071 * Create Slow Path Completion Queues (CQs) 8072 */ 8073 8074 /* Create slow-path Mailbox Command Complete Queue */ 8075 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8076 phba->sli4_hba.cq_ecount); 8077 if (!qdesc) { 8078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8079 "0500 Failed allocate slow-path mailbox CQ\n"); 8080 goto out_error; 8081 } 8082 phba->sli4_hba.mbx_cq = qdesc; 8083 8084 /* Create slow-path ELS Complete Queue */ 8085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8086 phba->sli4_hba.cq_ecount); 8087 if (!qdesc) { 8088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8089 "0501 Failed allocate slow-path ELS CQ\n"); 8090 goto out_error; 8091 } 8092 phba->sli4_hba.els_cq = qdesc; 8093 8094 8095 /* 8096 * Create Slow Path Work Queues (WQs) 8097 */ 8098 8099 /* Create Mailbox Command Queue */ 8100 8101 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 8102 phba->sli4_hba.mq_ecount); 8103 if (!qdesc) { 8104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8105 "0505 Failed allocate slow-path MQ\n"); 8106 goto out_error; 8107 } 8108 phba->sli4_hba.mbx_wq = qdesc; 8109 8110 /* 8111 * Create ELS Work Queues 8112 */ 8113 8114 /* Create slow-path ELS Work Queue */ 8115 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 8116 phba->sli4_hba.wq_ecount); 8117 if (!qdesc) { 8118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8119 "0504 Failed allocate slow-path ELS WQ\n"); 8120 goto out_error; 8121 } 8122 phba->sli4_hba.els_wq = qdesc; 8123 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8124 8125 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8126 /* Create NVME LS Complete Queue */ 8127 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8128 phba->sli4_hba.cq_ecount); 8129 if (!qdesc) { 8130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8131 "6079 Failed allocate NVME LS CQ\n"); 8132 goto out_error; 8133 } 8134 phba->sli4_hba.nvmels_cq = qdesc; 8135 8136 /* Create NVME LS Work Queue */ 8137 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 8138 phba->sli4_hba.wq_ecount); 8139 if (!qdesc) { 8140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8141 "6080 Failed allocate NVME LS WQ\n"); 8142 goto out_error; 8143 } 8144 phba->sli4_hba.nvmels_wq = qdesc; 8145 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8146 } 8147 8148 /* 8149 * Create Receive Queue (RQ) 8150 */ 8151 8152 /* Create Receive Queue for header */ 8153 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 8154 phba->sli4_hba.rq_ecount); 8155 if (!qdesc) { 8156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8157 "0506 Failed allocate receive HRQ\n"); 8158 goto out_error; 8159 } 8160 phba->sli4_hba.hdr_rq = qdesc; 8161 8162 /* Create Receive Queue for data */ 8163 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 8164 phba->sli4_hba.rq_ecount); 8165 if (!qdesc) { 8166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8167 "0507 Failed allocate receive DRQ\n"); 8168 goto out_error; 8169 } 8170 phba->sli4_hba.dat_rq = qdesc; 8171 8172 if (phba->nvmet_support) { 8173 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8174 /* Create NVMET Receive Queue for header */ 8175 qdesc = lpfc_sli4_queue_alloc(phba, 8176 phba->sli4_hba.rq_esize, 8177 LPFC_NVMET_RQE_DEF_COUNT); 8178 if (!qdesc) { 8179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8180 "3146 Failed allocate " 8181 "receive HRQ\n"); 8182 goto out_error; 8183 } 8184 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 8185 8186 /* Only needed for header of RQ pair */ 8187 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), 8188 GFP_KERNEL); 8189 if (qdesc->rqbp == NULL) { 8190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8191 "6131 Failed allocate " 8192 "Header RQBP\n"); 8193 goto out_error; 8194 } 8195 8196 /* Create NVMET Receive Queue for data */ 8197 qdesc = lpfc_sli4_queue_alloc(phba, 8198 phba->sli4_hba.rq_esize, 8199 LPFC_NVMET_RQE_DEF_COUNT); 8200 if (!qdesc) { 8201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8202 "3156 Failed allocate " 8203 "receive DRQ\n"); 8204 goto out_error; 8205 } 8206 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 8207 } 8208 } 8209 8210 /* Create the Queues needed for Flash Optimized Fabric operations */ 8211 if (phba->cfg_fof) 8212 lpfc_fof_queue_create(phba); 8213 return 0; 8214 8215 out_error: 8216 lpfc_sli4_queue_destroy(phba); 8217 return -ENOMEM; 8218 } 8219 8220 static inline void 8221 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 8222 { 8223 if (*qp != NULL) { 8224 lpfc_sli4_queue_free(*qp); 8225 *qp = NULL; 8226 } 8227 } 8228 8229 static inline void 8230 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 8231 { 8232 int idx; 8233 8234 if (*qs == NULL) 8235 return; 8236 8237 for (idx = 0; idx < max; idx++) 8238 __lpfc_sli4_release_queue(&(*qs)[idx]); 8239 8240 kfree(*qs); 8241 *qs = NULL; 8242 } 8243 8244 static inline void 8245 lpfc_sli4_release_queue_map(uint16_t **qmap) 8246 { 8247 if (*qmap != NULL) { 8248 kfree(*qmap); 8249 *qmap = NULL; 8250 } 8251 } 8252 8253 /** 8254 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 8255 * @phba: pointer to lpfc hba data structure. 8256 * 8257 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 8258 * operation. 8259 * 8260 * Return codes 8261 * 0 - successful 8262 * -ENOMEM - No available memory 8263 * -EIO - The mailbox failed to complete successfully. 8264 **/ 8265 void 8266 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 8267 { 8268 if (phba->cfg_fof) 8269 lpfc_fof_queue_destroy(phba); 8270 8271 /* Release HBA eqs */ 8272 lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); 8273 8274 /* Release FCP cqs */ 8275 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, 8276 phba->cfg_fcp_io_channel); 8277 8278 /* Release FCP wqs */ 8279 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, 8280 phba->cfg_fcp_io_channel); 8281 8282 /* Release FCP CQ mapping array */ 8283 lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); 8284 8285 /* Release NVME cqs */ 8286 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, 8287 phba->cfg_nvme_io_channel); 8288 8289 /* Release NVME wqs */ 8290 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, 8291 phba->cfg_nvme_io_channel); 8292 8293 /* Release NVME CQ mapping array */ 8294 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); 8295 8296 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 8297 phba->cfg_nvmet_mrq); 8298 8299 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 8300 phba->cfg_nvmet_mrq); 8301 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 8302 phba->cfg_nvmet_mrq); 8303 8304 /* Release mailbox command work queue */ 8305 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 8306 8307 /* Release ELS work queue */ 8308 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 8309 8310 /* Release ELS work queue */ 8311 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 8312 8313 /* Release unsolicited receive queue */ 8314 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 8315 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 8316 8317 /* Release ELS complete queue */ 8318 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 8319 8320 /* Release NVME LS complete queue */ 8321 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 8322 8323 /* Release mailbox command complete queue */ 8324 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 8325 8326 /* Everything on this list has been freed */ 8327 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8328 } 8329 8330 int 8331 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 8332 { 8333 struct lpfc_rqb *rqbp; 8334 struct lpfc_dmabuf *h_buf; 8335 struct rqb_dmabuf *rqb_buffer; 8336 8337 rqbp = rq->rqbp; 8338 while (!list_empty(&rqbp->rqb_buffer_list)) { 8339 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 8340 struct lpfc_dmabuf, list); 8341 8342 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 8343 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 8344 rqbp->buffer_count--; 8345 } 8346 return 1; 8347 } 8348 8349 static int 8350 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 8351 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 8352 int qidx, uint32_t qtype) 8353 { 8354 struct lpfc_sli_ring *pring; 8355 int rc; 8356 8357 if (!eq || !cq || !wq) { 8358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8359 "6085 Fast-path %s (%d) not allocated\n", 8360 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 8361 return -ENOMEM; 8362 } 8363 8364 /* create the Cq first */ 8365 rc = lpfc_cq_create(phba, cq, eq, 8366 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 8367 if (rc) { 8368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8369 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 8370 qidx, (uint32_t)rc); 8371 return rc; 8372 } 8373 8374 if (qtype != LPFC_MBOX) { 8375 /* Setup nvme_cq_map for fast lookup */ 8376 if (cq_map) 8377 *cq_map = cq->queue_id; 8378 8379 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8380 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 8381 qidx, cq->queue_id, qidx, eq->queue_id); 8382 8383 /* create the wq */ 8384 rc = lpfc_wq_create(phba, wq, cq, qtype); 8385 if (rc) { 8386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8387 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", 8388 qidx, (uint32_t)rc); 8389 /* no need to tear down cq - caller will do so */ 8390 return rc; 8391 } 8392 8393 /* Bind this CQ/WQ to the NVME ring */ 8394 pring = wq->pring; 8395 pring->sli.sli4.wqp = (void *)wq; 8396 cq->pring = pring; 8397 8398 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8399 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 8400 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 8401 } else { 8402 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 8403 if (rc) { 8404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8405 "0539 Failed setup of slow-path MQ: " 8406 "rc = 0x%x\n", rc); 8407 /* no need to tear down cq - caller will do so */ 8408 return rc; 8409 } 8410 8411 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8412 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 8413 phba->sli4_hba.mbx_wq->queue_id, 8414 phba->sli4_hba.mbx_cq->queue_id); 8415 } 8416 8417 return 0; 8418 } 8419 8420 /** 8421 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 8422 * @phba: pointer to lpfc hba data structure. 8423 * 8424 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 8425 * operation. 8426 * 8427 * Return codes 8428 * 0 - successful 8429 * -ENOMEM - No available memory 8430 * -EIO - The mailbox failed to complete successfully. 8431 **/ 8432 int 8433 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 8434 { 8435 uint32_t shdr_status, shdr_add_status; 8436 union lpfc_sli4_cfg_shdr *shdr; 8437 LPFC_MBOXQ_t *mboxq; 8438 int qidx; 8439 uint32_t length, io_channel; 8440 int rc = -ENOMEM; 8441 8442 /* Check for dual-ULP support */ 8443 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8444 if (!mboxq) { 8445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8446 "3249 Unable to allocate memory for " 8447 "QUERY_FW_CFG mailbox command\n"); 8448 return -ENOMEM; 8449 } 8450 length = (sizeof(struct lpfc_mbx_query_fw_config) - 8451 sizeof(struct lpfc_sli4_cfg_mhdr)); 8452 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8453 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 8454 length, LPFC_SLI4_MBX_EMBED); 8455 8456 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8457 8458 shdr = (union lpfc_sli4_cfg_shdr *) 8459 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8460 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8461 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8462 if (shdr_status || shdr_add_status || rc) { 8463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8464 "3250 QUERY_FW_CFG mailbox failed with status " 8465 "x%x add_status x%x, mbx status x%x\n", 8466 shdr_status, shdr_add_status, rc); 8467 if (rc != MBX_TIMEOUT) 8468 mempool_free(mboxq, phba->mbox_mem_pool); 8469 rc = -ENXIO; 8470 goto out_error; 8471 } 8472 8473 phba->sli4_hba.fw_func_mode = 8474 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 8475 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 8476 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 8477 phba->sli4_hba.physical_port = 8478 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 8479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8480 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 8481 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 8482 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 8483 8484 if (rc != MBX_TIMEOUT) 8485 mempool_free(mboxq, phba->mbox_mem_pool); 8486 8487 /* 8488 * Set up HBA Event Queues (EQs) 8489 */ 8490 io_channel = phba->io_channel_irqs; 8491 8492 /* Set up HBA event queue */ 8493 if (io_channel && !phba->sli4_hba.hba_eq) { 8494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8495 "3147 Fast-path EQs not allocated\n"); 8496 rc = -ENOMEM; 8497 goto out_error; 8498 } 8499 for (qidx = 0; qidx < io_channel; qidx++) { 8500 if (!phba->sli4_hba.hba_eq[qidx]) { 8501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8502 "0522 Fast-path EQ (%d) not " 8503 "allocated\n", qidx); 8504 rc = -ENOMEM; 8505 goto out_destroy; 8506 } 8507 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], 8508 phba->cfg_fcp_imax); 8509 if (rc) { 8510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8511 "0523 Failed setup of fast-path EQ " 8512 "(%d), rc = 0x%x\n", qidx, 8513 (uint32_t)rc); 8514 goto out_destroy; 8515 } 8516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8517 "2584 HBA EQ setup: queue[%d]-id=%d\n", 8518 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); 8519 } 8520 8521 if (phba->cfg_nvme_io_channel) { 8522 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { 8523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8524 "6084 Fast-path NVME %s array not allocated\n", 8525 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); 8526 rc = -ENOMEM; 8527 goto out_destroy; 8528 } 8529 8530 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { 8531 rc = lpfc_create_wq_cq(phba, 8532 phba->sli4_hba.hba_eq[ 8533 qidx % io_channel], 8534 phba->sli4_hba.nvme_cq[qidx], 8535 phba->sli4_hba.nvme_wq[qidx], 8536 &phba->sli4_hba.nvme_cq_map[qidx], 8537 qidx, LPFC_NVME); 8538 if (rc) { 8539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8540 "6123 Failed to setup fastpath " 8541 "NVME WQ/CQ (%d), rc = 0x%x\n", 8542 qidx, (uint32_t)rc); 8543 goto out_destroy; 8544 } 8545 } 8546 } 8547 8548 if (phba->cfg_fcp_io_channel) { 8549 /* Set up fast-path FCP Response Complete Queue */ 8550 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { 8551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8552 "3148 Fast-path FCP %s array not allocated\n", 8553 phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); 8554 rc = -ENOMEM; 8555 goto out_destroy; 8556 } 8557 8558 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { 8559 rc = lpfc_create_wq_cq(phba, 8560 phba->sli4_hba.hba_eq[ 8561 qidx % io_channel], 8562 phba->sli4_hba.fcp_cq[qidx], 8563 phba->sli4_hba.fcp_wq[qidx], 8564 &phba->sli4_hba.fcp_cq_map[qidx], 8565 qidx, LPFC_FCP); 8566 if (rc) { 8567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8568 "0535 Failed to setup fastpath " 8569 "FCP WQ/CQ (%d), rc = 0x%x\n", 8570 qidx, (uint32_t)rc); 8571 goto out_destroy; 8572 } 8573 } 8574 } 8575 8576 /* 8577 * Set up Slow Path Complete Queues (CQs) 8578 */ 8579 8580 /* Set up slow-path MBOX CQ/MQ */ 8581 8582 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 8583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8584 "0528 %s not allocated\n", 8585 phba->sli4_hba.mbx_cq ? 8586 "Mailbox WQ" : "Mailbox CQ"); 8587 rc = -ENOMEM; 8588 goto out_destroy; 8589 } 8590 8591 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8592 phba->sli4_hba.mbx_cq, 8593 phba->sli4_hba.mbx_wq, 8594 NULL, 0, LPFC_MBOX); 8595 if (rc) { 8596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8597 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 8598 (uint32_t)rc); 8599 goto out_destroy; 8600 } 8601 if (phba->nvmet_support) { 8602 if (!phba->sli4_hba.nvmet_cqset) { 8603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8604 "3165 Fast-path NVME CQ Set " 8605 "array not allocated\n"); 8606 rc = -ENOMEM; 8607 goto out_destroy; 8608 } 8609 if (phba->cfg_nvmet_mrq > 1) { 8610 rc = lpfc_cq_create_set(phba, 8611 phba->sli4_hba.nvmet_cqset, 8612 phba->sli4_hba.hba_eq, 8613 LPFC_WCQ, LPFC_NVMET); 8614 if (rc) { 8615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8616 "3164 Failed setup of NVME CQ " 8617 "Set, rc = 0x%x\n", 8618 (uint32_t)rc); 8619 goto out_destroy; 8620 } 8621 } else { 8622 /* Set up NVMET Receive Complete Queue */ 8623 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 8624 phba->sli4_hba.hba_eq[0], 8625 LPFC_WCQ, LPFC_NVMET); 8626 if (rc) { 8627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8628 "6089 Failed setup NVMET CQ: " 8629 "rc = 0x%x\n", (uint32_t)rc); 8630 goto out_destroy; 8631 } 8632 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8633 "6090 NVMET CQ setup: cq-id=%d, " 8634 "parent eq-id=%d\n", 8635 phba->sli4_hba.nvmet_cqset[0]->queue_id, 8636 phba->sli4_hba.hba_eq[0]->queue_id); 8637 } 8638 } 8639 8640 /* Set up slow-path ELS WQ/CQ */ 8641 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 8642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8643 "0530 ELS %s not allocated\n", 8644 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 8645 rc = -ENOMEM; 8646 goto out_destroy; 8647 } 8648 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8649 phba->sli4_hba.els_cq, 8650 phba->sli4_hba.els_wq, 8651 NULL, 0, LPFC_ELS); 8652 if (rc) { 8653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8654 "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 8655 (uint32_t)rc); 8656 goto out_destroy; 8657 } 8658 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8659 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 8660 phba->sli4_hba.els_wq->queue_id, 8661 phba->sli4_hba.els_cq->queue_id); 8662 8663 if (phba->cfg_nvme_io_channel) { 8664 /* Set up NVME LS Complete Queue */ 8665 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 8666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8667 "6091 LS %s not allocated\n", 8668 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 8669 rc = -ENOMEM; 8670 goto out_destroy; 8671 } 8672 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8673 phba->sli4_hba.nvmels_cq, 8674 phba->sli4_hba.nvmels_wq, 8675 NULL, 0, LPFC_NVME_LS); 8676 if (rc) { 8677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8678 "0529 Failed setup of NVVME LS WQ/CQ: " 8679 "rc = 0x%x\n", (uint32_t)rc); 8680 goto out_destroy; 8681 } 8682 8683 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8684 "6096 ELS WQ setup: wq-id=%d, " 8685 "parent cq-id=%d\n", 8686 phba->sli4_hba.nvmels_wq->queue_id, 8687 phba->sli4_hba.nvmels_cq->queue_id); 8688 } 8689 8690 /* 8691 * Create NVMET Receive Queue (RQ) 8692 */ 8693 if (phba->nvmet_support) { 8694 if ((!phba->sli4_hba.nvmet_cqset) || 8695 (!phba->sli4_hba.nvmet_mrq_hdr) || 8696 (!phba->sli4_hba.nvmet_mrq_data)) { 8697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8698 "6130 MRQ CQ Queues not " 8699 "allocated\n"); 8700 rc = -ENOMEM; 8701 goto out_destroy; 8702 } 8703 if (phba->cfg_nvmet_mrq > 1) { 8704 rc = lpfc_mrq_create(phba, 8705 phba->sli4_hba.nvmet_mrq_hdr, 8706 phba->sli4_hba.nvmet_mrq_data, 8707 phba->sli4_hba.nvmet_cqset, 8708 LPFC_NVMET); 8709 if (rc) { 8710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8711 "6098 Failed setup of NVMET " 8712 "MRQ: rc = 0x%x\n", 8713 (uint32_t)rc); 8714 goto out_destroy; 8715 } 8716 8717 } else { 8718 rc = lpfc_rq_create(phba, 8719 phba->sli4_hba.nvmet_mrq_hdr[0], 8720 phba->sli4_hba.nvmet_mrq_data[0], 8721 phba->sli4_hba.nvmet_cqset[0], 8722 LPFC_NVMET); 8723 if (rc) { 8724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8725 "6057 Failed setup of NVMET " 8726 "Receive Queue: rc = 0x%x\n", 8727 (uint32_t)rc); 8728 goto out_destroy; 8729 } 8730 8731 lpfc_printf_log( 8732 phba, KERN_INFO, LOG_INIT, 8733 "6099 NVMET RQ setup: hdr-rq-id=%d, " 8734 "dat-rq-id=%d parent cq-id=%d\n", 8735 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 8736 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 8737 phba->sli4_hba.nvmet_cqset[0]->queue_id); 8738 8739 } 8740 } 8741 8742 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 8743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8744 "0540 Receive Queue not allocated\n"); 8745 rc = -ENOMEM; 8746 goto out_destroy; 8747 } 8748 8749 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 8750 phba->sli4_hba.els_cq, LPFC_USOL); 8751 if (rc) { 8752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8753 "0541 Failed setup of Receive Queue: " 8754 "rc = 0x%x\n", (uint32_t)rc); 8755 goto out_destroy; 8756 } 8757 8758 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8759 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 8760 "parent cq-id=%d\n", 8761 phba->sli4_hba.hdr_rq->queue_id, 8762 phba->sli4_hba.dat_rq->queue_id, 8763 phba->sli4_hba.els_cq->queue_id); 8764 8765 if (phba->cfg_fof) { 8766 rc = lpfc_fof_queue_setup(phba); 8767 if (rc) { 8768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8769 "0549 Failed setup of FOF Queues: " 8770 "rc = 0x%x\n", rc); 8771 goto out_destroy; 8772 } 8773 } 8774 8775 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 8776 lpfc_modify_hba_eq_delay(phba, qidx); 8777 8778 return 0; 8779 8780 out_destroy: 8781 lpfc_sli4_queue_unset(phba); 8782 out_error: 8783 return rc; 8784 } 8785 8786 /** 8787 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 8788 * @phba: pointer to lpfc hba data structure. 8789 * 8790 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 8791 * operation. 8792 * 8793 * Return codes 8794 * 0 - successful 8795 * -ENOMEM - No available memory 8796 * -EIO - The mailbox failed to complete successfully. 8797 **/ 8798 void 8799 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 8800 { 8801 int qidx; 8802 8803 /* Unset the queues created for Flash Optimized Fabric operations */ 8804 if (phba->cfg_fof) 8805 lpfc_fof_queue_destroy(phba); 8806 8807 /* Unset mailbox command work queue */ 8808 if (phba->sli4_hba.mbx_wq) 8809 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 8810 8811 /* Unset NVME LS work queue */ 8812 if (phba->sli4_hba.nvmels_wq) 8813 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 8814 8815 /* Unset ELS work queue */ 8816 if (phba->sli4_hba.els_wq) 8817 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 8818 8819 /* Unset unsolicited receive queue */ 8820 if (phba->sli4_hba.hdr_rq) 8821 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 8822 phba->sli4_hba.dat_rq); 8823 8824 /* Unset FCP work queue */ 8825 if (phba->sli4_hba.fcp_wq) 8826 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 8827 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); 8828 8829 /* Unset NVME work queue */ 8830 if (phba->sli4_hba.nvme_wq) { 8831 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 8832 lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); 8833 } 8834 8835 /* Unset mailbox command complete queue */ 8836 if (phba->sli4_hba.mbx_cq) 8837 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 8838 8839 /* Unset ELS complete queue */ 8840 if (phba->sli4_hba.els_cq) 8841 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 8842 8843 /* Unset NVME LS complete queue */ 8844 if (phba->sli4_hba.nvmels_cq) 8845 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 8846 8847 /* Unset NVME response complete queue */ 8848 if (phba->sli4_hba.nvme_cq) 8849 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 8850 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); 8851 8852 /* Unset NVMET MRQ queue */ 8853 if (phba->sli4_hba.nvmet_mrq_hdr) { 8854 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 8855 lpfc_rq_destroy(phba, 8856 phba->sli4_hba.nvmet_mrq_hdr[qidx], 8857 phba->sli4_hba.nvmet_mrq_data[qidx]); 8858 } 8859 8860 /* Unset NVMET CQ Set complete queue */ 8861 if (phba->sli4_hba.nvmet_cqset) { 8862 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 8863 lpfc_cq_destroy(phba, 8864 phba->sli4_hba.nvmet_cqset[qidx]); 8865 } 8866 8867 /* Unset FCP response complete queue */ 8868 if (phba->sli4_hba.fcp_cq) 8869 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 8870 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); 8871 8872 /* Unset fast-path event queue */ 8873 if (phba->sli4_hba.hba_eq) 8874 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 8875 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); 8876 } 8877 8878 /** 8879 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 8880 * @phba: pointer to lpfc hba data structure. 8881 * 8882 * This routine is invoked to allocate and set up a pool of completion queue 8883 * events. The body of the completion queue event is a completion queue entry 8884 * CQE. For now, this pool is used for the interrupt service routine to queue 8885 * the following HBA completion queue events for the worker thread to process: 8886 * - Mailbox asynchronous events 8887 * - Receive queue completion unsolicited events 8888 * Later, this can be used for all the slow-path events. 8889 * 8890 * Return codes 8891 * 0 - successful 8892 * -ENOMEM - No available memory 8893 **/ 8894 static int 8895 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 8896 { 8897 struct lpfc_cq_event *cq_event; 8898 int i; 8899 8900 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 8901 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 8902 if (!cq_event) 8903 goto out_pool_create_fail; 8904 list_add_tail(&cq_event->list, 8905 &phba->sli4_hba.sp_cqe_event_pool); 8906 } 8907 return 0; 8908 8909 out_pool_create_fail: 8910 lpfc_sli4_cq_event_pool_destroy(phba); 8911 return -ENOMEM; 8912 } 8913 8914 /** 8915 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 8916 * @phba: pointer to lpfc hba data structure. 8917 * 8918 * This routine is invoked to free the pool of completion queue events at 8919 * driver unload time. Note that, it is the responsibility of the driver 8920 * cleanup routine to free all the outstanding completion-queue events 8921 * allocated from this pool back into the pool before invoking this routine 8922 * to destroy the pool. 8923 **/ 8924 static void 8925 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 8926 { 8927 struct lpfc_cq_event *cq_event, *next_cq_event; 8928 8929 list_for_each_entry_safe(cq_event, next_cq_event, 8930 &phba->sli4_hba.sp_cqe_event_pool, list) { 8931 list_del(&cq_event->list); 8932 kfree(cq_event); 8933 } 8934 } 8935 8936 /** 8937 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8938 * @phba: pointer to lpfc hba data structure. 8939 * 8940 * This routine is the lock free version of the API invoked to allocate a 8941 * completion-queue event from the free pool. 8942 * 8943 * Return: Pointer to the newly allocated completion-queue event if successful 8944 * NULL otherwise. 8945 **/ 8946 struct lpfc_cq_event * 8947 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8948 { 8949 struct lpfc_cq_event *cq_event = NULL; 8950 8951 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 8952 struct lpfc_cq_event, list); 8953 return cq_event; 8954 } 8955 8956 /** 8957 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8958 * @phba: pointer to lpfc hba data structure. 8959 * 8960 * This routine is the lock version of the API invoked to allocate a 8961 * completion-queue event from the free pool. 8962 * 8963 * Return: Pointer to the newly allocated completion-queue event if successful 8964 * NULL otherwise. 8965 **/ 8966 struct lpfc_cq_event * 8967 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8968 { 8969 struct lpfc_cq_event *cq_event; 8970 unsigned long iflags; 8971 8972 spin_lock_irqsave(&phba->hbalock, iflags); 8973 cq_event = __lpfc_sli4_cq_event_alloc(phba); 8974 spin_unlock_irqrestore(&phba->hbalock, iflags); 8975 return cq_event; 8976 } 8977 8978 /** 8979 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8980 * @phba: pointer to lpfc hba data structure. 8981 * @cq_event: pointer to the completion queue event to be freed. 8982 * 8983 * This routine is the lock free version of the API invoked to release a 8984 * completion-queue event back into the free pool. 8985 **/ 8986 void 8987 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8988 struct lpfc_cq_event *cq_event) 8989 { 8990 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 8991 } 8992 8993 /** 8994 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8995 * @phba: pointer to lpfc hba data structure. 8996 * @cq_event: pointer to the completion queue event to be freed. 8997 * 8998 * This routine is the lock version of the API invoked to release a 8999 * completion-queue event back into the free pool. 9000 **/ 9001 void 9002 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9003 struct lpfc_cq_event *cq_event) 9004 { 9005 unsigned long iflags; 9006 spin_lock_irqsave(&phba->hbalock, iflags); 9007 __lpfc_sli4_cq_event_release(phba, cq_event); 9008 spin_unlock_irqrestore(&phba->hbalock, iflags); 9009 } 9010 9011 /** 9012 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9013 * @phba: pointer to lpfc hba data structure. 9014 * 9015 * This routine is to free all the pending completion-queue events to the 9016 * back into the free pool for device reset. 9017 **/ 9018 static void 9019 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 9020 { 9021 LIST_HEAD(cqelist); 9022 struct lpfc_cq_event *cqe; 9023 unsigned long iflags; 9024 9025 /* Retrieve all the pending WCQEs from pending WCQE lists */ 9026 spin_lock_irqsave(&phba->hbalock, iflags); 9027 /* Pending FCP XRI abort events */ 9028 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9029 &cqelist); 9030 /* Pending ELS XRI abort events */ 9031 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9032 &cqelist); 9033 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9034 /* Pending NVME XRI abort events */ 9035 list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue, 9036 &cqelist); 9037 } 9038 /* Pending asynnc events */ 9039 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9040 &cqelist); 9041 spin_unlock_irqrestore(&phba->hbalock, iflags); 9042 9043 while (!list_empty(&cqelist)) { 9044 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 9045 lpfc_sli4_cq_event_release(phba, cqe); 9046 } 9047 } 9048 9049 /** 9050 * lpfc_pci_function_reset - Reset pci function. 9051 * @phba: pointer to lpfc hba data structure. 9052 * 9053 * This routine is invoked to request a PCI function reset. It will destroys 9054 * all resources assigned to the PCI function which originates this request. 9055 * 9056 * Return codes 9057 * 0 - successful 9058 * -ENOMEM - No available memory 9059 * -EIO - The mailbox failed to complete successfully. 9060 **/ 9061 int 9062 lpfc_pci_function_reset(struct lpfc_hba *phba) 9063 { 9064 LPFC_MBOXQ_t *mboxq; 9065 uint32_t rc = 0, if_type; 9066 uint32_t shdr_status, shdr_add_status; 9067 uint32_t rdy_chk; 9068 uint32_t port_reset = 0; 9069 union lpfc_sli4_cfg_shdr *shdr; 9070 struct lpfc_register reg_data; 9071 uint16_t devid; 9072 9073 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9074 switch (if_type) { 9075 case LPFC_SLI_INTF_IF_TYPE_0: 9076 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 9077 GFP_KERNEL); 9078 if (!mboxq) { 9079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9080 "0494 Unable to allocate memory for " 9081 "issuing SLI_FUNCTION_RESET mailbox " 9082 "command\n"); 9083 return -ENOMEM; 9084 } 9085 9086 /* Setup PCI function reset mailbox-ioctl command */ 9087 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9088 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 9089 LPFC_SLI4_MBX_EMBED); 9090 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9091 shdr = (union lpfc_sli4_cfg_shdr *) 9092 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9093 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9094 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 9095 &shdr->response); 9096 if (rc != MBX_TIMEOUT) 9097 mempool_free(mboxq, phba->mbox_mem_pool); 9098 if (shdr_status || shdr_add_status || rc) { 9099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9100 "0495 SLI_FUNCTION_RESET mailbox " 9101 "failed with status x%x add_status x%x," 9102 " mbx status x%x\n", 9103 shdr_status, shdr_add_status, rc); 9104 rc = -ENXIO; 9105 } 9106 break; 9107 case LPFC_SLI_INTF_IF_TYPE_2: 9108 wait: 9109 /* 9110 * Poll the Port Status Register and wait for RDY for 9111 * up to 30 seconds. If the port doesn't respond, treat 9112 * it as an error. 9113 */ 9114 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 9115 if (lpfc_readl(phba->sli4_hba.u.if_type2. 9116 STATUSregaddr, ®_data.word0)) { 9117 rc = -ENODEV; 9118 goto out; 9119 } 9120 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 9121 break; 9122 msleep(20); 9123 } 9124 9125 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 9126 phba->work_status[0] = readl( 9127 phba->sli4_hba.u.if_type2.ERR1regaddr); 9128 phba->work_status[1] = readl( 9129 phba->sli4_hba.u.if_type2.ERR2regaddr); 9130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9131 "2890 Port not ready, port status reg " 9132 "0x%x error 1=0x%x, error 2=0x%x\n", 9133 reg_data.word0, 9134 phba->work_status[0], 9135 phba->work_status[1]); 9136 rc = -ENODEV; 9137 goto out; 9138 } 9139 9140 if (!port_reset) { 9141 /* 9142 * Reset the port now 9143 */ 9144 reg_data.word0 = 0; 9145 bf_set(lpfc_sliport_ctrl_end, ®_data, 9146 LPFC_SLIPORT_LITTLE_ENDIAN); 9147 bf_set(lpfc_sliport_ctrl_ip, ®_data, 9148 LPFC_SLIPORT_INIT_PORT); 9149 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 9150 CTRLregaddr); 9151 /* flush */ 9152 pci_read_config_word(phba->pcidev, 9153 PCI_DEVICE_ID, &devid); 9154 9155 port_reset = 1; 9156 msleep(20); 9157 goto wait; 9158 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 9159 rc = -ENODEV; 9160 goto out; 9161 } 9162 break; 9163 9164 case LPFC_SLI_INTF_IF_TYPE_1: 9165 default: 9166 break; 9167 } 9168 9169 out: 9170 /* Catch the not-ready port failure after a port reset. */ 9171 if (rc) { 9172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9173 "3317 HBA not functional: IP Reset Failed " 9174 "try: echo fw_reset > board_mode\n"); 9175 rc = -ENODEV; 9176 } 9177 9178 return rc; 9179 } 9180 9181 /** 9182 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 9183 * @phba: pointer to lpfc hba data structure. 9184 * 9185 * This routine is invoked to set up the PCI device memory space for device 9186 * with SLI-4 interface spec. 9187 * 9188 * Return codes 9189 * 0 - successful 9190 * other values - error 9191 **/ 9192 static int 9193 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 9194 { 9195 struct pci_dev *pdev; 9196 unsigned long bar0map_len, bar1map_len, bar2map_len; 9197 int error = -ENODEV; 9198 uint32_t if_type; 9199 9200 /* Obtain PCI device reference */ 9201 if (!phba->pcidev) 9202 return error; 9203 else 9204 pdev = phba->pcidev; 9205 9206 /* Set the device DMA mask size */ 9207 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 9208 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 9209 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 9210 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 9211 return error; 9212 } 9213 } 9214 9215 /* 9216 * The BARs and register set definitions and offset locations are 9217 * dependent on the if_type. 9218 */ 9219 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 9220 &phba->sli4_hba.sli_intf.word0)) { 9221 return error; 9222 } 9223 9224 /* There is no SLI3 failback for SLI4 devices. */ 9225 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 9226 LPFC_SLI_INTF_VALID) { 9227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9228 "2894 SLI_INTF reg contents invalid " 9229 "sli_intf reg 0x%x\n", 9230 phba->sli4_hba.sli_intf.word0); 9231 return error; 9232 } 9233 9234 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9235 /* 9236 * Get the bus address of SLI4 device Bar regions and the 9237 * number of bytes required by each mapping. The mapping of the 9238 * particular PCI BARs regions is dependent on the type of 9239 * SLI4 device. 9240 */ 9241 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 9242 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 9243 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 9244 9245 /* 9246 * Map SLI4 PCI Config Space Register base to a kernel virtual 9247 * addr 9248 */ 9249 phba->sli4_hba.conf_regs_memmap_p = 9250 ioremap(phba->pci_bar0_map, bar0map_len); 9251 if (!phba->sli4_hba.conf_regs_memmap_p) { 9252 dev_printk(KERN_ERR, &pdev->dev, 9253 "ioremap failed for SLI4 PCI config " 9254 "registers.\n"); 9255 goto out; 9256 } 9257 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 9258 /* Set up BAR0 PCI config space register memory map */ 9259 lpfc_sli4_bar0_register_memmap(phba, if_type); 9260 } else { 9261 phba->pci_bar0_map = pci_resource_start(pdev, 1); 9262 bar0map_len = pci_resource_len(pdev, 1); 9263 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 9264 dev_printk(KERN_ERR, &pdev->dev, 9265 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 9266 goto out; 9267 } 9268 phba->sli4_hba.conf_regs_memmap_p = 9269 ioremap(phba->pci_bar0_map, bar0map_len); 9270 if (!phba->sli4_hba.conf_regs_memmap_p) { 9271 dev_printk(KERN_ERR, &pdev->dev, 9272 "ioremap failed for SLI4 PCI config " 9273 "registers.\n"); 9274 goto out; 9275 } 9276 lpfc_sli4_bar0_register_memmap(phba, if_type); 9277 } 9278 9279 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 9280 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 9281 /* 9282 * Map SLI4 if type 0 HBA Control Register base to a kernel 9283 * virtual address and setup the registers. 9284 */ 9285 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 9286 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 9287 phba->sli4_hba.ctrl_regs_memmap_p = 9288 ioremap(phba->pci_bar1_map, bar1map_len); 9289 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 9290 dev_printk(KERN_ERR, &pdev->dev, 9291 "ioremap failed for SLI4 HBA control registers.\n"); 9292 goto out_iounmap_conf; 9293 } 9294 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 9295 lpfc_sli4_bar1_register_memmap(phba); 9296 } 9297 9298 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 9299 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 9300 /* 9301 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 9302 * virtual address and setup the registers. 9303 */ 9304 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 9305 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 9306 phba->sli4_hba.drbl_regs_memmap_p = 9307 ioremap(phba->pci_bar2_map, bar2map_len); 9308 if (!phba->sli4_hba.drbl_regs_memmap_p) { 9309 dev_printk(KERN_ERR, &pdev->dev, 9310 "ioremap failed for SLI4 HBA doorbell registers.\n"); 9311 goto out_iounmap_ctrl; 9312 } 9313 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 9314 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 9315 if (error) 9316 goto out_iounmap_all; 9317 } 9318 9319 return 0; 9320 9321 out_iounmap_all: 9322 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9323 out_iounmap_ctrl: 9324 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 9325 out_iounmap_conf: 9326 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9327 out: 9328 return error; 9329 } 9330 9331 /** 9332 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 9333 * @phba: pointer to lpfc hba data structure. 9334 * 9335 * This routine is invoked to unset the PCI device memory space for device 9336 * with SLI-4 interface spec. 9337 **/ 9338 static void 9339 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 9340 { 9341 uint32_t if_type; 9342 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9343 9344 switch (if_type) { 9345 case LPFC_SLI_INTF_IF_TYPE_0: 9346 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9347 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 9348 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9349 break; 9350 case LPFC_SLI_INTF_IF_TYPE_2: 9351 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9352 break; 9353 case LPFC_SLI_INTF_IF_TYPE_1: 9354 default: 9355 dev_printk(KERN_ERR, &phba->pcidev->dev, 9356 "FATAL - unsupported SLI4 interface type - %d\n", 9357 if_type); 9358 break; 9359 } 9360 } 9361 9362 /** 9363 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 9364 * @phba: pointer to lpfc hba data structure. 9365 * 9366 * This routine is invoked to enable the MSI-X interrupt vectors to device 9367 * with SLI-3 interface specs. 9368 * 9369 * Return codes 9370 * 0 - successful 9371 * other values - error 9372 **/ 9373 static int 9374 lpfc_sli_enable_msix(struct lpfc_hba *phba) 9375 { 9376 int rc; 9377 LPFC_MBOXQ_t *pmb; 9378 9379 /* Set up MSI-X multi-message vectors */ 9380 rc = pci_alloc_irq_vectors(phba->pcidev, 9381 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 9382 if (rc < 0) { 9383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9384 "0420 PCI enable MSI-X failed (%d)\n", rc); 9385 goto vec_fail_out; 9386 } 9387 9388 /* 9389 * Assign MSI-X vectors to interrupt handlers 9390 */ 9391 9392 /* vector-0 is associated to slow-path handler */ 9393 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 9394 &lpfc_sli_sp_intr_handler, 0, 9395 LPFC_SP_DRIVER_HANDLER_NAME, phba); 9396 if (rc) { 9397 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9398 "0421 MSI-X slow-path request_irq failed " 9399 "(%d)\n", rc); 9400 goto msi_fail_out; 9401 } 9402 9403 /* vector-1 is associated to fast-path handler */ 9404 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 9405 &lpfc_sli_fp_intr_handler, 0, 9406 LPFC_FP_DRIVER_HANDLER_NAME, phba); 9407 9408 if (rc) { 9409 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9410 "0429 MSI-X fast-path request_irq failed " 9411 "(%d)\n", rc); 9412 goto irq_fail_out; 9413 } 9414 9415 /* 9416 * Configure HBA MSI-X attention conditions to messages 9417 */ 9418 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9419 9420 if (!pmb) { 9421 rc = -ENOMEM; 9422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9423 "0474 Unable to allocate memory for issuing " 9424 "MBOX_CONFIG_MSI command\n"); 9425 goto mem_fail_out; 9426 } 9427 rc = lpfc_config_msi(phba, pmb); 9428 if (rc) 9429 goto mbx_fail_out; 9430 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9431 if (rc != MBX_SUCCESS) { 9432 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 9433 "0351 Config MSI mailbox command failed, " 9434 "mbxCmd x%x, mbxStatus x%x\n", 9435 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 9436 goto mbx_fail_out; 9437 } 9438 9439 /* Free memory allocated for mailbox command */ 9440 mempool_free(pmb, phba->mbox_mem_pool); 9441 return rc; 9442 9443 mbx_fail_out: 9444 /* Free memory allocated for mailbox command */ 9445 mempool_free(pmb, phba->mbox_mem_pool); 9446 9447 mem_fail_out: 9448 /* free the irq already requested */ 9449 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 9450 9451 irq_fail_out: 9452 /* free the irq already requested */ 9453 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 9454 9455 msi_fail_out: 9456 /* Unconfigure MSI-X capability structure */ 9457 pci_free_irq_vectors(phba->pcidev); 9458 9459 vec_fail_out: 9460 return rc; 9461 } 9462 9463 /** 9464 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 9465 * @phba: pointer to lpfc hba data structure. 9466 * 9467 * This routine is invoked to enable the MSI interrupt mode to device with 9468 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 9469 * enable the MSI vector. The device driver is responsible for calling the 9470 * request_irq() to register MSI vector with a interrupt the handler, which 9471 * is done in this function. 9472 * 9473 * Return codes 9474 * 0 - successful 9475 * other values - error 9476 */ 9477 static int 9478 lpfc_sli_enable_msi(struct lpfc_hba *phba) 9479 { 9480 int rc; 9481 9482 rc = pci_enable_msi(phba->pcidev); 9483 if (!rc) 9484 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9485 "0462 PCI enable MSI mode success.\n"); 9486 else { 9487 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9488 "0471 PCI enable MSI mode failed (%d)\n", rc); 9489 return rc; 9490 } 9491 9492 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 9493 0, LPFC_DRIVER_NAME, phba); 9494 if (rc) { 9495 pci_disable_msi(phba->pcidev); 9496 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9497 "0478 MSI request_irq failed (%d)\n", rc); 9498 } 9499 return rc; 9500 } 9501 9502 /** 9503 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 9504 * @phba: pointer to lpfc hba data structure. 9505 * 9506 * This routine is invoked to enable device interrupt and associate driver's 9507 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 9508 * spec. Depends on the interrupt mode configured to the driver, the driver 9509 * will try to fallback from the configured interrupt mode to an interrupt 9510 * mode which is supported by the platform, kernel, and device in the order 9511 * of: 9512 * MSI-X -> MSI -> IRQ. 9513 * 9514 * Return codes 9515 * 0 - successful 9516 * other values - error 9517 **/ 9518 static uint32_t 9519 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9520 { 9521 uint32_t intr_mode = LPFC_INTR_ERROR; 9522 int retval; 9523 9524 if (cfg_mode == 2) { 9525 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 9526 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 9527 if (!retval) { 9528 /* Now, try to enable MSI-X interrupt mode */ 9529 retval = lpfc_sli_enable_msix(phba); 9530 if (!retval) { 9531 /* Indicate initialization to MSI-X mode */ 9532 phba->intr_type = MSIX; 9533 intr_mode = 2; 9534 } 9535 } 9536 } 9537 9538 /* Fallback to MSI if MSI-X initialization failed */ 9539 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9540 retval = lpfc_sli_enable_msi(phba); 9541 if (!retval) { 9542 /* Indicate initialization to MSI mode */ 9543 phba->intr_type = MSI; 9544 intr_mode = 1; 9545 } 9546 } 9547 9548 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9549 if (phba->intr_type == NONE) { 9550 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 9551 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9552 if (!retval) { 9553 /* Indicate initialization to INTx mode */ 9554 phba->intr_type = INTx; 9555 intr_mode = 0; 9556 } 9557 } 9558 return intr_mode; 9559 } 9560 9561 /** 9562 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 9563 * @phba: pointer to lpfc hba data structure. 9564 * 9565 * This routine is invoked to disable device interrupt and disassociate the 9566 * driver's interrupt handler(s) from interrupt vector(s) to device with 9567 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 9568 * release the interrupt vector(s) for the message signaled interrupt. 9569 **/ 9570 static void 9571 lpfc_sli_disable_intr(struct lpfc_hba *phba) 9572 { 9573 int nr_irqs, i; 9574 9575 if (phba->intr_type == MSIX) 9576 nr_irqs = LPFC_MSIX_VECTORS; 9577 else 9578 nr_irqs = 1; 9579 9580 for (i = 0; i < nr_irqs; i++) 9581 free_irq(pci_irq_vector(phba->pcidev, i), phba); 9582 pci_free_irq_vectors(phba->pcidev); 9583 9584 /* Reset interrupt management states */ 9585 phba->intr_type = NONE; 9586 phba->sli.slistat.sli_intr = 0; 9587 } 9588 9589 /** 9590 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 9591 * @phba: pointer to lpfc hba data structure. 9592 * @vectors: number of msix vectors allocated. 9593 * 9594 * The routine will figure out the CPU affinity assignment for every 9595 * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated 9596 * with a pointer to the CPU mask that defines ALL the CPUs this vector 9597 * can be associated with. If the vector can be unquely associated with 9598 * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. 9599 * In addition, the CPU to IO channel mapping will be calculated 9600 * and the phba->sli4_hba.cpu_map array will reflect this. 9601 */ 9602 static void 9603 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 9604 { 9605 struct lpfc_vector_map_info *cpup; 9606 int index = 0; 9607 int vec = 0; 9608 int cpu; 9609 #ifdef CONFIG_X86 9610 struct cpuinfo_x86 *cpuinfo; 9611 #endif 9612 9613 /* Init cpu_map array */ 9614 memset(phba->sli4_hba.cpu_map, 0xff, 9615 (sizeof(struct lpfc_vector_map_info) * 9616 phba->sli4_hba.num_present_cpu)); 9617 9618 /* Update CPU map with physical id and core id of each CPU */ 9619 cpup = phba->sli4_hba.cpu_map; 9620 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 9621 #ifdef CONFIG_X86 9622 cpuinfo = &cpu_data(cpu); 9623 cpup->phys_id = cpuinfo->phys_proc_id; 9624 cpup->core_id = cpuinfo->cpu_core_id; 9625 #else 9626 /* No distinction between CPUs for other platforms */ 9627 cpup->phys_id = 0; 9628 cpup->core_id = 0; 9629 #endif 9630 cpup->channel_id = index; /* For now round robin */ 9631 cpup->irq = pci_irq_vector(phba->pcidev, vec); 9632 vec++; 9633 if (vec >= vectors) 9634 vec = 0; 9635 index++; 9636 if (index >= phba->cfg_fcp_io_channel) 9637 index = 0; 9638 cpup++; 9639 } 9640 } 9641 9642 9643 /** 9644 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 9645 * @phba: pointer to lpfc hba data structure. 9646 * 9647 * This routine is invoked to enable the MSI-X interrupt vectors to device 9648 * with SLI-4 interface spec. 9649 * 9650 * Return codes 9651 * 0 - successful 9652 * other values - error 9653 **/ 9654 static int 9655 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 9656 { 9657 int vectors, rc, index; 9658 9659 /* Set up MSI-X multi-message vectors */ 9660 vectors = phba->io_channel_irqs; 9661 if (phba->cfg_fof) 9662 vectors++; 9663 9664 rc = pci_alloc_irq_vectors(phba->pcidev, 9665 (phba->nvmet_support) ? 1 : 2, 9666 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 9667 if (rc < 0) { 9668 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9669 "0484 PCI enable MSI-X failed (%d)\n", rc); 9670 goto vec_fail_out; 9671 } 9672 vectors = rc; 9673 9674 /* Assign MSI-X vectors to interrupt handlers */ 9675 for (index = 0; index < vectors; index++) { 9676 memset(&phba->sli4_hba.handler_name[index], 0, 16); 9677 snprintf((char *)&phba->sli4_hba.handler_name[index], 9678 LPFC_SLI4_HANDLER_NAME_SZ, 9679 LPFC_DRIVER_HANDLER_NAME"%d", index); 9680 9681 phba->sli4_hba.hba_eq_hdl[index].idx = index; 9682 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 9683 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); 9684 if (phba->cfg_fof && (index == (vectors - 1))) 9685 rc = request_irq(pci_irq_vector(phba->pcidev, index), 9686 &lpfc_sli4_fof_intr_handler, 0, 9687 (char *)&phba->sli4_hba.handler_name[index], 9688 &phba->sli4_hba.hba_eq_hdl[index]); 9689 else 9690 rc = request_irq(pci_irq_vector(phba->pcidev, index), 9691 &lpfc_sli4_hba_intr_handler, 0, 9692 (char *)&phba->sli4_hba.handler_name[index], 9693 &phba->sli4_hba.hba_eq_hdl[index]); 9694 if (rc) { 9695 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9696 "0486 MSI-X fast-path (%d) " 9697 "request_irq failed (%d)\n", index, rc); 9698 goto cfg_fail_out; 9699 } 9700 } 9701 9702 if (phba->cfg_fof) 9703 vectors--; 9704 9705 if (vectors != phba->io_channel_irqs) { 9706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9707 "3238 Reducing IO channels to match number of " 9708 "MSI-X vectors, requested %d got %d\n", 9709 phba->io_channel_irqs, vectors); 9710 if (phba->cfg_fcp_io_channel > vectors) 9711 phba->cfg_fcp_io_channel = vectors; 9712 if (phba->cfg_nvme_io_channel > vectors) 9713 phba->cfg_nvme_io_channel = vectors; 9714 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) 9715 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 9716 else 9717 phba->io_channel_irqs = phba->cfg_nvme_io_channel; 9718 } 9719 lpfc_cpu_affinity_check(phba, vectors); 9720 9721 return rc; 9722 9723 cfg_fail_out: 9724 /* free the irq already requested */ 9725 for (--index; index >= 0; index--) 9726 free_irq(pci_irq_vector(phba->pcidev, index), 9727 &phba->sli4_hba.hba_eq_hdl[index]); 9728 9729 /* Unconfigure MSI-X capability structure */ 9730 pci_free_irq_vectors(phba->pcidev); 9731 9732 vec_fail_out: 9733 return rc; 9734 } 9735 9736 /** 9737 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 9738 * @phba: pointer to lpfc hba data structure. 9739 * 9740 * This routine is invoked to enable the MSI interrupt mode to device with 9741 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 9742 * to enable the MSI vector. The device driver is responsible for calling 9743 * the request_irq() to register MSI vector with a interrupt the handler, 9744 * which is done in this function. 9745 * 9746 * Return codes 9747 * 0 - successful 9748 * other values - error 9749 **/ 9750 static int 9751 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 9752 { 9753 int rc, index; 9754 9755 rc = pci_enable_msi(phba->pcidev); 9756 if (!rc) 9757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9758 "0487 PCI enable MSI mode success.\n"); 9759 else { 9760 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9761 "0488 PCI enable MSI mode failed (%d)\n", rc); 9762 return rc; 9763 } 9764 9765 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9766 0, LPFC_DRIVER_NAME, phba); 9767 if (rc) { 9768 pci_disable_msi(phba->pcidev); 9769 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9770 "0490 MSI request_irq failed (%d)\n", rc); 9771 return rc; 9772 } 9773 9774 for (index = 0; index < phba->io_channel_irqs; index++) { 9775 phba->sli4_hba.hba_eq_hdl[index].idx = index; 9776 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 9777 } 9778 9779 if (phba->cfg_fof) { 9780 phba->sli4_hba.hba_eq_hdl[index].idx = index; 9781 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 9782 } 9783 return 0; 9784 } 9785 9786 /** 9787 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 9788 * @phba: pointer to lpfc hba data structure. 9789 * 9790 * This routine is invoked to enable device interrupt and associate driver's 9791 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 9792 * interface spec. Depends on the interrupt mode configured to the driver, 9793 * the driver will try to fallback from the configured interrupt mode to an 9794 * interrupt mode which is supported by the platform, kernel, and device in 9795 * the order of: 9796 * MSI-X -> MSI -> IRQ. 9797 * 9798 * Return codes 9799 * 0 - successful 9800 * other values - error 9801 **/ 9802 static uint32_t 9803 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9804 { 9805 uint32_t intr_mode = LPFC_INTR_ERROR; 9806 int retval, idx; 9807 9808 if (cfg_mode == 2) { 9809 /* Preparation before conf_msi mbox cmd */ 9810 retval = 0; 9811 if (!retval) { 9812 /* Now, try to enable MSI-X interrupt mode */ 9813 retval = lpfc_sli4_enable_msix(phba); 9814 if (!retval) { 9815 /* Indicate initialization to MSI-X mode */ 9816 phba->intr_type = MSIX; 9817 intr_mode = 2; 9818 } 9819 } 9820 } 9821 9822 /* Fallback to MSI if MSI-X initialization failed */ 9823 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9824 retval = lpfc_sli4_enable_msi(phba); 9825 if (!retval) { 9826 /* Indicate initialization to MSI mode */ 9827 phba->intr_type = MSI; 9828 intr_mode = 1; 9829 } 9830 } 9831 9832 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9833 if (phba->intr_type == NONE) { 9834 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9835 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9836 if (!retval) { 9837 struct lpfc_hba_eq_hdl *eqhdl; 9838 9839 /* Indicate initialization to INTx mode */ 9840 phba->intr_type = INTx; 9841 intr_mode = 0; 9842 9843 for (idx = 0; idx < phba->io_channel_irqs; idx++) { 9844 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 9845 eqhdl->idx = idx; 9846 eqhdl->phba = phba; 9847 atomic_set(&eqhdl->hba_eq_in_use, 1); 9848 } 9849 if (phba->cfg_fof) { 9850 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 9851 eqhdl->idx = idx; 9852 eqhdl->phba = phba; 9853 atomic_set(&eqhdl->hba_eq_in_use, 1); 9854 } 9855 } 9856 } 9857 return intr_mode; 9858 } 9859 9860 /** 9861 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 9862 * @phba: pointer to lpfc hba data structure. 9863 * 9864 * This routine is invoked to disable device interrupt and disassociate 9865 * the driver's interrupt handler(s) from interrupt vector(s) to device 9866 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 9867 * will release the interrupt vector(s) for the message signaled interrupt. 9868 **/ 9869 static void 9870 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 9871 { 9872 /* Disable the currently initialized interrupt mode */ 9873 if (phba->intr_type == MSIX) { 9874 int index; 9875 9876 /* Free up MSI-X multi-message vectors */ 9877 for (index = 0; index < phba->io_channel_irqs; index++) 9878 free_irq(pci_irq_vector(phba->pcidev, index), 9879 &phba->sli4_hba.hba_eq_hdl[index]); 9880 9881 if (phba->cfg_fof) 9882 free_irq(pci_irq_vector(phba->pcidev, index), 9883 &phba->sli4_hba.hba_eq_hdl[index]); 9884 } else { 9885 free_irq(phba->pcidev->irq, phba); 9886 } 9887 9888 pci_free_irq_vectors(phba->pcidev); 9889 9890 /* Reset interrupt management states */ 9891 phba->intr_type = NONE; 9892 phba->sli.slistat.sli_intr = 0; 9893 } 9894 9895 /** 9896 * lpfc_unset_hba - Unset SLI3 hba device initialization 9897 * @phba: pointer to lpfc hba data structure. 9898 * 9899 * This routine is invoked to unset the HBA device initialization steps to 9900 * a device with SLI-3 interface spec. 9901 **/ 9902 static void 9903 lpfc_unset_hba(struct lpfc_hba *phba) 9904 { 9905 struct lpfc_vport *vport = phba->pport; 9906 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9907 9908 spin_lock_irq(shost->host_lock); 9909 vport->load_flag |= FC_UNLOADING; 9910 spin_unlock_irq(shost->host_lock); 9911 9912 kfree(phba->vpi_bmask); 9913 kfree(phba->vpi_ids); 9914 9915 lpfc_stop_hba_timers(phba); 9916 9917 phba->pport->work_port_events = 0; 9918 9919 lpfc_sli_hba_down(phba); 9920 9921 lpfc_sli_brdrestart(phba); 9922 9923 lpfc_sli_disable_intr(phba); 9924 9925 return; 9926 } 9927 9928 /** 9929 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9930 * @phba: Pointer to HBA context object. 9931 * 9932 * This function is called in the SLI4 code path to wait for completion 9933 * of device's XRIs exchange busy. It will check the XRI exchange busy 9934 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9935 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9936 * I/Os every 30 seconds, log error message, and wait forever. Only when 9937 * all XRI exchange busy complete, the driver unload shall proceed with 9938 * invoking the function reset ioctl mailbox command to the CNA and the 9939 * the rest of the driver unload resource release. 9940 **/ 9941 static void 9942 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9943 { 9944 int wait_time = 0; 9945 int nvme_xri_cmpl = 1; 9946 int nvmet_xri_cmpl = 1; 9947 int fcp_xri_cmpl = 1; 9948 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9949 9950 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 9951 fcp_xri_cmpl = 9952 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9953 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9954 nvme_xri_cmpl = 9955 list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 9956 nvmet_xri_cmpl = 9957 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 9958 } 9959 9960 while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || 9961 !nvmet_xri_cmpl) { 9962 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9963 if (!nvme_xri_cmpl) 9964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9965 "6100 NVME XRI exchange busy " 9966 "wait time: %d seconds.\n", 9967 wait_time/1000); 9968 if (!fcp_xri_cmpl) 9969 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9970 "2877 FCP XRI exchange busy " 9971 "wait time: %d seconds.\n", 9972 wait_time/1000); 9973 if (!els_xri_cmpl) 9974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9975 "2878 ELS XRI exchange busy " 9976 "wait time: %d seconds.\n", 9977 wait_time/1000); 9978 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9979 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9980 } else { 9981 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9982 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9983 } 9984 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9985 nvme_xri_cmpl = list_empty( 9986 &phba->sli4_hba.lpfc_abts_nvme_buf_list); 9987 nvmet_xri_cmpl = list_empty( 9988 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 9989 } 9990 9991 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 9992 fcp_xri_cmpl = list_empty( 9993 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 9994 9995 els_xri_cmpl = 9996 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9997 9998 } 9999 } 10000 10001 /** 10002 * lpfc_sli4_hba_unset - Unset the fcoe hba 10003 * @phba: Pointer to HBA context object. 10004 * 10005 * This function is called in the SLI4 code path to reset the HBA's FCoE 10006 * function. The caller is not required to hold any lock. This routine 10007 * issues PCI function reset mailbox command to reset the FCoE function. 10008 * At the end of the function, it calls lpfc_hba_down_post function to 10009 * free any pending commands. 10010 **/ 10011 static void 10012 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 10013 { 10014 int wait_cnt = 0; 10015 LPFC_MBOXQ_t *mboxq; 10016 struct pci_dev *pdev = phba->pcidev; 10017 10018 lpfc_stop_hba_timers(phba); 10019 phba->sli4_hba.intr_enable = 0; 10020 10021 /* 10022 * Gracefully wait out the potential current outstanding asynchronous 10023 * mailbox command. 10024 */ 10025 10026 /* First, block any pending async mailbox command from posted */ 10027 spin_lock_irq(&phba->hbalock); 10028 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10029 spin_unlock_irq(&phba->hbalock); 10030 /* Now, trying to wait it out if we can */ 10031 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10032 msleep(10); 10033 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 10034 break; 10035 } 10036 /* Forcefully release the outstanding mailbox command if timed out */ 10037 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10038 spin_lock_irq(&phba->hbalock); 10039 mboxq = phba->sli.mbox_active; 10040 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 10041 __lpfc_mbox_cmpl_put(phba, mboxq); 10042 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10043 phba->sli.mbox_active = NULL; 10044 spin_unlock_irq(&phba->hbalock); 10045 } 10046 10047 /* Abort all iocbs associated with the hba */ 10048 lpfc_sli_hba_iocb_abort(phba); 10049 10050 /* Wait for completion of device XRI exchange busy */ 10051 lpfc_sli4_xri_exchange_busy_wait(phba); 10052 10053 /* Disable PCI subsystem interrupt */ 10054 lpfc_sli4_disable_intr(phba); 10055 10056 /* Disable SR-IOV if enabled */ 10057 if (phba->cfg_sriov_nr_virtfn) 10058 pci_disable_sriov(pdev); 10059 10060 /* Stop kthread signal shall trigger work_done one more time */ 10061 kthread_stop(phba->worker_thread); 10062 10063 /* Unset the queues shared with the hardware then release all 10064 * allocated resources. 10065 */ 10066 lpfc_sli4_queue_unset(phba); 10067 lpfc_sli4_queue_destroy(phba); 10068 10069 /* Reset SLI4 HBA FCoE function */ 10070 lpfc_pci_function_reset(phba); 10071 10072 /* Stop the SLI4 device port */ 10073 phba->pport->work_port_events = 0; 10074 } 10075 10076 /** 10077 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 10078 * @phba: Pointer to HBA context object. 10079 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 10080 * 10081 * This function is called in the SLI4 code path to read the port's 10082 * sli4 capabilities. 10083 * 10084 * This function may be be called from any context that can block-wait 10085 * for the completion. The expectation is that this routine is called 10086 * typically from probe_one or from the online routine. 10087 **/ 10088 int 10089 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 10090 { 10091 int rc; 10092 struct lpfc_mqe *mqe; 10093 struct lpfc_pc_sli4_params *sli4_params; 10094 uint32_t mbox_tmo; 10095 10096 rc = 0; 10097 mqe = &mboxq->u.mqe; 10098 10099 /* Read the port's SLI4 Parameters port capabilities */ 10100 lpfc_pc_sli4_params(mboxq); 10101 if (!phba->sli4_hba.intr_enable) 10102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10103 else { 10104 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 10105 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 10106 } 10107 10108 if (unlikely(rc)) 10109 return 1; 10110 10111 sli4_params = &phba->sli4_hba.pc_sli4_params; 10112 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 10113 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 10114 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 10115 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 10116 &mqe->un.sli4_params); 10117 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 10118 &mqe->un.sli4_params); 10119 sli4_params->proto_types = mqe->un.sli4_params.word3; 10120 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 10121 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 10122 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 10123 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 10124 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 10125 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 10126 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 10127 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 10128 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 10129 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 10130 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 10131 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 10132 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 10133 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 10134 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 10135 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 10136 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 10137 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 10138 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 10139 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 10140 10141 /* Make sure that sge_supp_len can be handled by the driver */ 10142 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 10143 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 10144 10145 return rc; 10146 } 10147 10148 /** 10149 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 10150 * @phba: Pointer to HBA context object. 10151 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 10152 * 10153 * This function is called in the SLI4 code path to read the port's 10154 * sli4 capabilities. 10155 * 10156 * This function may be be called from any context that can block-wait 10157 * for the completion. The expectation is that this routine is called 10158 * typically from probe_one or from the online routine. 10159 **/ 10160 int 10161 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 10162 { 10163 int rc; 10164 struct lpfc_mqe *mqe = &mboxq->u.mqe; 10165 struct lpfc_pc_sli4_params *sli4_params; 10166 uint32_t mbox_tmo; 10167 int length; 10168 struct lpfc_sli4_parameters *mbx_sli4_parameters; 10169 10170 /* 10171 * By default, the driver assumes the SLI4 port requires RPI 10172 * header postings. The SLI4_PARAM response will correct this 10173 * assumption. 10174 */ 10175 phba->sli4_hba.rpi_hdrs_in_use = 1; 10176 10177 /* Read the port's SLI4 Config Parameters */ 10178 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 10179 sizeof(struct lpfc_sli4_cfg_mhdr)); 10180 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10181 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 10182 length, LPFC_SLI4_MBX_EMBED); 10183 if (!phba->sli4_hba.intr_enable) 10184 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10185 else { 10186 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 10187 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 10188 } 10189 if (unlikely(rc)) 10190 return rc; 10191 sli4_params = &phba->sli4_hba.pc_sli4_params; 10192 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 10193 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 10194 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 10195 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 10196 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 10197 mbx_sli4_parameters); 10198 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 10199 mbx_sli4_parameters); 10200 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 10201 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 10202 else 10203 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 10204 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 10205 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 10206 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 10207 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 10208 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 10209 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 10210 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 10211 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 10212 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 10213 mbx_sli4_parameters); 10214 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 10215 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 10216 mbx_sli4_parameters); 10217 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 10218 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 10219 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && 10220 bf_get(cfg_xib, mbx_sli4_parameters)); 10221 10222 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || 10223 !phba->nvme_support) { 10224 phba->nvme_support = 0; 10225 phba->nvmet_support = 0; 10226 phba->cfg_nvmet_mrq = 0; 10227 phba->cfg_nvme_io_channel = 0; 10228 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 10229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 10230 "6101 Disabling NVME support: " 10231 "Not supported by firmware: %d %d\n", 10232 bf_get(cfg_nvme, mbx_sli4_parameters), 10233 bf_get(cfg_xib, mbx_sli4_parameters)); 10234 10235 /* If firmware doesn't support NVME, just use SCSI support */ 10236 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 10237 return -ENODEV; 10238 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 10239 } 10240 10241 if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp) 10242 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 10243 10244 /* Make sure that sge_supp_len can be handled by the driver */ 10245 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 10246 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 10247 10248 /* 10249 * Issue IOs with CDB embedded in WQE to minimized the number 10250 * of DMAs the firmware has to do. Setting this to 1 also forces 10251 * the driver to use 128 bytes WQEs for FCP IOs. 10252 */ 10253 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 10254 phba->fcp_embed_io = 1; 10255 else 10256 phba->fcp_embed_io = 0; 10257 10258 /* 10259 * Check if the SLI port supports MDS Diagnostics 10260 */ 10261 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 10262 phba->mds_diags_support = 1; 10263 else 10264 phba->mds_diags_support = 0; 10265 return 0; 10266 } 10267 10268 /** 10269 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 10270 * @pdev: pointer to PCI device 10271 * @pid: pointer to PCI device identifier 10272 * 10273 * This routine is to be called to attach a device with SLI-3 interface spec 10274 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 10275 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10276 * information of the device and driver to see if the driver state that it can 10277 * support this kind of device. If the match is successful, the driver core 10278 * invokes this routine. If this routine determines it can claim the HBA, it 10279 * does all the initialization that it needs to do to handle the HBA properly. 10280 * 10281 * Return code 10282 * 0 - driver can claim the device 10283 * negative value - driver can not claim the device 10284 **/ 10285 static int 10286 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 10287 { 10288 struct lpfc_hba *phba; 10289 struct lpfc_vport *vport = NULL; 10290 struct Scsi_Host *shost = NULL; 10291 int error; 10292 uint32_t cfg_mode, intr_mode; 10293 10294 /* Allocate memory for HBA structure */ 10295 phba = lpfc_hba_alloc(pdev); 10296 if (!phba) 10297 return -ENOMEM; 10298 10299 /* Perform generic PCI device enabling operation */ 10300 error = lpfc_enable_pci_dev(phba); 10301 if (error) 10302 goto out_free_phba; 10303 10304 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 10305 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 10306 if (error) 10307 goto out_disable_pci_dev; 10308 10309 /* Set up SLI-3 specific device PCI memory space */ 10310 error = lpfc_sli_pci_mem_setup(phba); 10311 if (error) { 10312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10313 "1402 Failed to set up pci memory space.\n"); 10314 goto out_disable_pci_dev; 10315 } 10316 10317 /* Set up SLI-3 specific device driver resources */ 10318 error = lpfc_sli_driver_resource_setup(phba); 10319 if (error) { 10320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10321 "1404 Failed to set up driver resource.\n"); 10322 goto out_unset_pci_mem_s3; 10323 } 10324 10325 /* Initialize and populate the iocb list per host */ 10326 10327 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 10328 if (error) { 10329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10330 "1405 Failed to initialize iocb list.\n"); 10331 goto out_unset_driver_resource_s3; 10332 } 10333 10334 /* Set up common device driver resources */ 10335 error = lpfc_setup_driver_resource_phase2(phba); 10336 if (error) { 10337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10338 "1406 Failed to set up driver resource.\n"); 10339 goto out_free_iocb_list; 10340 } 10341 10342 /* Get the default values for Model Name and Description */ 10343 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10344 10345 /* Create SCSI host to the physical port */ 10346 error = lpfc_create_shost(phba); 10347 if (error) { 10348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10349 "1407 Failed to create scsi host.\n"); 10350 goto out_unset_driver_resource; 10351 } 10352 10353 /* Configure sysfs attributes */ 10354 vport = phba->pport; 10355 error = lpfc_alloc_sysfs_attr(vport); 10356 if (error) { 10357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10358 "1476 Failed to allocate sysfs attr\n"); 10359 goto out_destroy_shost; 10360 } 10361 10362 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10363 /* Now, trying to enable interrupt and bring up the device */ 10364 cfg_mode = phba->cfg_use_msi; 10365 while (true) { 10366 /* Put device to a known state before enabling interrupt */ 10367 lpfc_stop_port(phba); 10368 /* Configure and enable interrupt */ 10369 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 10370 if (intr_mode == LPFC_INTR_ERROR) { 10371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10372 "0431 Failed to enable interrupt.\n"); 10373 error = -ENODEV; 10374 goto out_free_sysfs_attr; 10375 } 10376 /* SLI-3 HBA setup */ 10377 if (lpfc_sli_hba_setup(phba)) { 10378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10379 "1477 Failed to set up hba\n"); 10380 error = -ENODEV; 10381 goto out_remove_device; 10382 } 10383 10384 /* Wait 50ms for the interrupts of previous mailbox commands */ 10385 msleep(50); 10386 /* Check active interrupts on message signaled interrupts */ 10387 if (intr_mode == 0 || 10388 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 10389 /* Log the current active interrupt mode */ 10390 phba->intr_mode = intr_mode; 10391 lpfc_log_intr_mode(phba, intr_mode); 10392 break; 10393 } else { 10394 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10395 "0447 Configure interrupt mode (%d) " 10396 "failed active interrupt test.\n", 10397 intr_mode); 10398 /* Disable the current interrupt mode */ 10399 lpfc_sli_disable_intr(phba); 10400 /* Try next level of interrupt mode */ 10401 cfg_mode = --intr_mode; 10402 } 10403 } 10404 10405 /* Perform post initialization setup */ 10406 lpfc_post_init_setup(phba); 10407 10408 /* Check if there are static vports to be created. */ 10409 lpfc_create_static_vport(phba); 10410 10411 return 0; 10412 10413 out_remove_device: 10414 lpfc_unset_hba(phba); 10415 out_free_sysfs_attr: 10416 lpfc_free_sysfs_attr(vport); 10417 out_destroy_shost: 10418 lpfc_destroy_shost(phba); 10419 out_unset_driver_resource: 10420 lpfc_unset_driver_resource_phase2(phba); 10421 out_free_iocb_list: 10422 lpfc_free_iocb_list(phba); 10423 out_unset_driver_resource_s3: 10424 lpfc_sli_driver_resource_unset(phba); 10425 out_unset_pci_mem_s3: 10426 lpfc_sli_pci_mem_unset(phba); 10427 out_disable_pci_dev: 10428 lpfc_disable_pci_dev(phba); 10429 if (shost) 10430 scsi_host_put(shost); 10431 out_free_phba: 10432 lpfc_hba_free(phba); 10433 return error; 10434 } 10435 10436 /** 10437 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 10438 * @pdev: pointer to PCI device 10439 * 10440 * This routine is to be called to disattach a device with SLI-3 interface 10441 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 10442 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10443 * device to be removed from the PCI subsystem properly. 10444 **/ 10445 static void 10446 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 10447 { 10448 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10449 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10450 struct lpfc_vport **vports; 10451 struct lpfc_hba *phba = vport->phba; 10452 int i; 10453 10454 spin_lock_irq(&phba->hbalock); 10455 vport->load_flag |= FC_UNLOADING; 10456 spin_unlock_irq(&phba->hbalock); 10457 10458 lpfc_free_sysfs_attr(vport); 10459 10460 /* Release all the vports against this physical port */ 10461 vports = lpfc_create_vport_work_array(phba); 10462 if (vports != NULL) 10463 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10464 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10465 continue; 10466 fc_vport_terminate(vports[i]->fc_vport); 10467 } 10468 lpfc_destroy_vport_work_array(phba, vports); 10469 10470 /* Remove FC host and then SCSI host with the physical port */ 10471 fc_remove_host(shost); 10472 scsi_remove_host(shost); 10473 10474 lpfc_cleanup(vport); 10475 10476 /* 10477 * Bring down the SLI Layer. This step disable all interrupts, 10478 * clears the rings, discards all mailbox commands, and resets 10479 * the HBA. 10480 */ 10481 10482 /* HBA interrupt will be disabled after this call */ 10483 lpfc_sli_hba_down(phba); 10484 /* Stop kthread signal shall trigger work_done one more time */ 10485 kthread_stop(phba->worker_thread); 10486 /* Final cleanup of txcmplq and reset the HBA */ 10487 lpfc_sli_brdrestart(phba); 10488 10489 kfree(phba->vpi_bmask); 10490 kfree(phba->vpi_ids); 10491 10492 lpfc_stop_hba_timers(phba); 10493 spin_lock_irq(&phba->hbalock); 10494 list_del_init(&vport->listentry); 10495 spin_unlock_irq(&phba->hbalock); 10496 10497 lpfc_debugfs_terminate(vport); 10498 10499 /* Disable SR-IOV if enabled */ 10500 if (phba->cfg_sriov_nr_virtfn) 10501 pci_disable_sriov(pdev); 10502 10503 /* Disable interrupt */ 10504 lpfc_sli_disable_intr(phba); 10505 10506 scsi_host_put(shost); 10507 10508 /* 10509 * Call scsi_free before mem_free since scsi bufs are released to their 10510 * corresponding pools here. 10511 */ 10512 lpfc_scsi_free(phba); 10513 lpfc_mem_free_all(phba); 10514 10515 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 10516 phba->hbqslimp.virt, phba->hbqslimp.phys); 10517 10518 /* Free resources associated with SLI2 interface */ 10519 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 10520 phba->slim2p.virt, phba->slim2p.phys); 10521 10522 /* unmap adapter SLIM and Control Registers */ 10523 iounmap(phba->ctrl_regs_memmap_p); 10524 iounmap(phba->slim_memmap_p); 10525 10526 lpfc_hba_free(phba); 10527 10528 pci_release_mem_regions(pdev); 10529 pci_disable_device(pdev); 10530 } 10531 10532 /** 10533 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 10534 * @pdev: pointer to PCI device 10535 * @msg: power management message 10536 * 10537 * This routine is to be called from the kernel's PCI subsystem to support 10538 * system Power Management (PM) to device with SLI-3 interface spec. When 10539 * PM invokes this method, it quiesces the device by stopping the driver's 10540 * worker thread for the device, turning off device's interrupt and DMA, 10541 * and bring the device offline. Note that as the driver implements the 10542 * minimum PM requirements to a power-aware driver's PM support for the 10543 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10544 * to the suspend() method call will be treated as SUSPEND and the driver will 10545 * fully reinitialize its device during resume() method call, the driver will 10546 * set device to PCI_D3hot state in PCI config space instead of setting it 10547 * according to the @msg provided by the PM. 10548 * 10549 * Return code 10550 * 0 - driver suspended the device 10551 * Error otherwise 10552 **/ 10553 static int 10554 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 10555 { 10556 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10557 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10558 10559 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10560 "0473 PCI device Power Management suspend.\n"); 10561 10562 /* Bring down the device */ 10563 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10564 lpfc_offline(phba); 10565 kthread_stop(phba->worker_thread); 10566 10567 /* Disable interrupt from device */ 10568 lpfc_sli_disable_intr(phba); 10569 10570 /* Save device state to PCI config space */ 10571 pci_save_state(pdev); 10572 pci_set_power_state(pdev, PCI_D3hot); 10573 10574 return 0; 10575 } 10576 10577 /** 10578 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 10579 * @pdev: pointer to PCI device 10580 * 10581 * This routine is to be called from the kernel's PCI subsystem to support 10582 * system Power Management (PM) to device with SLI-3 interface spec. When PM 10583 * invokes this method, it restores the device's PCI config space state and 10584 * fully reinitializes the device and brings it online. Note that as the 10585 * driver implements the minimum PM requirements to a power-aware driver's 10586 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 10587 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 10588 * driver will fully reinitialize its device during resume() method call, 10589 * the device will be set to PCI_D0 directly in PCI config space before 10590 * restoring the state. 10591 * 10592 * Return code 10593 * 0 - driver suspended the device 10594 * Error otherwise 10595 **/ 10596 static int 10597 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 10598 { 10599 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10600 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10601 uint32_t intr_mode; 10602 int error; 10603 10604 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10605 "0452 PCI device Power Management resume.\n"); 10606 10607 /* Restore device state from PCI config space */ 10608 pci_set_power_state(pdev, PCI_D0); 10609 pci_restore_state(pdev); 10610 10611 /* 10612 * As the new kernel behavior of pci_restore_state() API call clears 10613 * device saved_state flag, need to save the restored state again. 10614 */ 10615 pci_save_state(pdev); 10616 10617 if (pdev->is_busmaster) 10618 pci_set_master(pdev); 10619 10620 /* Startup the kernel thread for this host adapter. */ 10621 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10622 "lpfc_worker_%d", phba->brd_no); 10623 if (IS_ERR(phba->worker_thread)) { 10624 error = PTR_ERR(phba->worker_thread); 10625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10626 "0434 PM resume failed to start worker " 10627 "thread: error=x%x.\n", error); 10628 return error; 10629 } 10630 10631 /* Configure and enable interrupt */ 10632 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10633 if (intr_mode == LPFC_INTR_ERROR) { 10634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10635 "0430 PM resume Failed to enable interrupt\n"); 10636 return -EIO; 10637 } else 10638 phba->intr_mode = intr_mode; 10639 10640 /* Restart HBA and bring it online */ 10641 lpfc_sli_brdrestart(phba); 10642 lpfc_online(phba); 10643 10644 /* Log the current active interrupt mode */ 10645 lpfc_log_intr_mode(phba, phba->intr_mode); 10646 10647 return 0; 10648 } 10649 10650 /** 10651 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 10652 * @phba: pointer to lpfc hba data structure. 10653 * 10654 * This routine is called to prepare the SLI3 device for PCI slot recover. It 10655 * aborts all the outstanding SCSI I/Os to the pci device. 10656 **/ 10657 static void 10658 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 10659 { 10660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10661 "2723 PCI channel I/O abort preparing for recovery\n"); 10662 10663 /* 10664 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10665 * and let the SCSI mid-layer to retry them to recover. 10666 */ 10667 lpfc_sli_abort_fcp_rings(phba); 10668 } 10669 10670 /** 10671 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 10672 * @phba: pointer to lpfc hba data structure. 10673 * 10674 * This routine is called to prepare the SLI3 device for PCI slot reset. It 10675 * disables the device interrupt and pci device, and aborts the internal FCP 10676 * pending I/Os. 10677 **/ 10678 static void 10679 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 10680 { 10681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10682 "2710 PCI channel disable preparing for reset\n"); 10683 10684 /* Block any management I/Os to the device */ 10685 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 10686 10687 /* Block all SCSI devices' I/Os on the host */ 10688 lpfc_scsi_dev_block(phba); 10689 10690 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10691 lpfc_sli_flush_fcp_rings(phba); 10692 10693 /* stop all timers */ 10694 lpfc_stop_hba_timers(phba); 10695 10696 /* Disable interrupt and pci device */ 10697 lpfc_sli_disable_intr(phba); 10698 pci_disable_device(phba->pcidev); 10699 } 10700 10701 /** 10702 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 10703 * @phba: pointer to lpfc hba data structure. 10704 * 10705 * This routine is called to prepare the SLI3 device for PCI slot permanently 10706 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10707 * pending I/Os. 10708 **/ 10709 static void 10710 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10711 { 10712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10713 "2711 PCI channel permanent disable for failure\n"); 10714 /* Block all SCSI devices' I/Os on the host */ 10715 lpfc_scsi_dev_block(phba); 10716 10717 /* stop all timers */ 10718 lpfc_stop_hba_timers(phba); 10719 10720 /* Clean up all driver's outstanding SCSI I/Os */ 10721 lpfc_sli_flush_fcp_rings(phba); 10722 } 10723 10724 /** 10725 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 10726 * @pdev: pointer to PCI device. 10727 * @state: the current PCI connection state. 10728 * 10729 * This routine is called from the PCI subsystem for I/O error handling to 10730 * device with SLI-3 interface spec. This function is called by the PCI 10731 * subsystem after a PCI bus error affecting this device has been detected. 10732 * When this function is invoked, it will need to stop all the I/Os and 10733 * interrupt(s) to the device. Once that is done, it will return 10734 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 10735 * as desired. 10736 * 10737 * Return codes 10738 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 10739 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10740 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10741 **/ 10742 static pci_ers_result_t 10743 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 10744 { 10745 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10746 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10747 10748 switch (state) { 10749 case pci_channel_io_normal: 10750 /* Non-fatal error, prepare for recovery */ 10751 lpfc_sli_prep_dev_for_recover(phba); 10752 return PCI_ERS_RESULT_CAN_RECOVER; 10753 case pci_channel_io_frozen: 10754 /* Fatal error, prepare for slot reset */ 10755 lpfc_sli_prep_dev_for_reset(phba); 10756 return PCI_ERS_RESULT_NEED_RESET; 10757 case pci_channel_io_perm_failure: 10758 /* Permanent failure, prepare for device down */ 10759 lpfc_sli_prep_dev_for_perm_failure(phba); 10760 return PCI_ERS_RESULT_DISCONNECT; 10761 default: 10762 /* Unknown state, prepare and request slot reset */ 10763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10764 "0472 Unknown PCI error state: x%x\n", state); 10765 lpfc_sli_prep_dev_for_reset(phba); 10766 return PCI_ERS_RESULT_NEED_RESET; 10767 } 10768 } 10769 10770 /** 10771 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 10772 * @pdev: pointer to PCI device. 10773 * 10774 * This routine is called from the PCI subsystem for error handling to 10775 * device with SLI-3 interface spec. This is called after PCI bus has been 10776 * reset to restart the PCI card from scratch, as if from a cold-boot. 10777 * During the PCI subsystem error recovery, after driver returns 10778 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10779 * recovery and then call this routine before calling the .resume method 10780 * to recover the device. This function will initialize the HBA device, 10781 * enable the interrupt, but it will just put the HBA to offline state 10782 * without passing any I/O traffic. 10783 * 10784 * Return codes 10785 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10786 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10787 */ 10788 static pci_ers_result_t 10789 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 10790 { 10791 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10792 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10793 struct lpfc_sli *psli = &phba->sli; 10794 uint32_t intr_mode; 10795 10796 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10797 if (pci_enable_device_mem(pdev)) { 10798 printk(KERN_ERR "lpfc: Cannot re-enable " 10799 "PCI device after reset.\n"); 10800 return PCI_ERS_RESULT_DISCONNECT; 10801 } 10802 10803 pci_restore_state(pdev); 10804 10805 /* 10806 * As the new kernel behavior of pci_restore_state() API call clears 10807 * device saved_state flag, need to save the restored state again. 10808 */ 10809 pci_save_state(pdev); 10810 10811 if (pdev->is_busmaster) 10812 pci_set_master(pdev); 10813 10814 spin_lock_irq(&phba->hbalock); 10815 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10816 spin_unlock_irq(&phba->hbalock); 10817 10818 /* Configure and enable interrupt */ 10819 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10820 if (intr_mode == LPFC_INTR_ERROR) { 10821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10822 "0427 Cannot re-enable interrupt after " 10823 "slot reset.\n"); 10824 return PCI_ERS_RESULT_DISCONNECT; 10825 } else 10826 phba->intr_mode = intr_mode; 10827 10828 /* Take device offline, it will perform cleanup */ 10829 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10830 lpfc_offline(phba); 10831 lpfc_sli_brdrestart(phba); 10832 10833 /* Log the current active interrupt mode */ 10834 lpfc_log_intr_mode(phba, phba->intr_mode); 10835 10836 return PCI_ERS_RESULT_RECOVERED; 10837 } 10838 10839 /** 10840 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 10841 * @pdev: pointer to PCI device 10842 * 10843 * This routine is called from the PCI subsystem for error handling to device 10844 * with SLI-3 interface spec. It is called when kernel error recovery tells 10845 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10846 * error recovery. After this call, traffic can start to flow from this device 10847 * again. 10848 */ 10849 static void 10850 lpfc_io_resume_s3(struct pci_dev *pdev) 10851 { 10852 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10853 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10854 10855 /* Bring device online, it will be no-op for non-fatal error resume */ 10856 lpfc_online(phba); 10857 10858 /* Clean up Advanced Error Reporting (AER) if needed */ 10859 if (phba->hba_flag & HBA_AER_ENABLED) 10860 pci_cleanup_aer_uncorrect_error_status(pdev); 10861 } 10862 10863 /** 10864 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 10865 * @phba: pointer to lpfc hba data structure. 10866 * 10867 * returns the number of ELS/CT IOCBs to reserve 10868 **/ 10869 int 10870 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 10871 { 10872 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 10873 10874 if (phba->sli_rev == LPFC_SLI_REV4) { 10875 if (max_xri <= 100) 10876 return 10; 10877 else if (max_xri <= 256) 10878 return 25; 10879 else if (max_xri <= 512) 10880 return 50; 10881 else if (max_xri <= 1024) 10882 return 100; 10883 else if (max_xri <= 1536) 10884 return 150; 10885 else if (max_xri <= 2048) 10886 return 200; 10887 else 10888 return 250; 10889 } else 10890 return 0; 10891 } 10892 10893 /** 10894 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 10895 * @phba: pointer to lpfc hba data structure. 10896 * 10897 * returns the number of ELS/CT + NVMET IOCBs to reserve 10898 **/ 10899 int 10900 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 10901 { 10902 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 10903 10904 if (phba->nvmet_support) 10905 max_xri += LPFC_NVMET_BUF_POST; 10906 return max_xri; 10907 } 10908 10909 10910 /** 10911 * lpfc_write_firmware - attempt to write a firmware image to the port 10912 * @fw: pointer to firmware image returned from request_firmware. 10913 * @phba: pointer to lpfc hba data structure. 10914 * 10915 **/ 10916 static void 10917 lpfc_write_firmware(const struct firmware *fw, void *context) 10918 { 10919 struct lpfc_hba *phba = (struct lpfc_hba *)context; 10920 char fwrev[FW_REV_STR_SIZE]; 10921 struct lpfc_grp_hdr *image; 10922 struct list_head dma_buffer_list; 10923 int i, rc = 0; 10924 struct lpfc_dmabuf *dmabuf, *next; 10925 uint32_t offset = 0, temp_offset = 0; 10926 uint32_t magic_number, ftype, fid, fsize; 10927 10928 /* It can be null in no-wait mode, sanity check */ 10929 if (!fw) { 10930 rc = -ENXIO; 10931 goto out; 10932 } 10933 image = (struct lpfc_grp_hdr *)fw->data; 10934 10935 magic_number = be32_to_cpu(image->magic_number); 10936 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 10937 fid = bf_get_be32(lpfc_grp_hdr_id, image), 10938 fsize = be32_to_cpu(image->size); 10939 10940 INIT_LIST_HEAD(&dma_buffer_list); 10941 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 && 10942 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) || 10943 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) { 10944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10945 "3022 Invalid FW image found. " 10946 "Magic:%x Type:%x ID:%x Size %d %zd\n", 10947 magic_number, ftype, fid, fsize, fw->size); 10948 rc = -EINVAL; 10949 goto release_out; 10950 } 10951 lpfc_decode_firmware_rev(phba, fwrev, 1); 10952 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 10953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10954 "3023 Updating Firmware, Current Version:%s " 10955 "New Version:%s\n", 10956 fwrev, image->revision); 10957 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 10958 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 10959 GFP_KERNEL); 10960 if (!dmabuf) { 10961 rc = -ENOMEM; 10962 goto release_out; 10963 } 10964 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10965 SLI4_PAGE_SIZE, 10966 &dmabuf->phys, 10967 GFP_KERNEL); 10968 if (!dmabuf->virt) { 10969 kfree(dmabuf); 10970 rc = -ENOMEM; 10971 goto release_out; 10972 } 10973 list_add_tail(&dmabuf->list, &dma_buffer_list); 10974 } 10975 while (offset < fw->size) { 10976 temp_offset = offset; 10977 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 10978 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 10979 memcpy(dmabuf->virt, 10980 fw->data + temp_offset, 10981 fw->size - temp_offset); 10982 temp_offset = fw->size; 10983 break; 10984 } 10985 memcpy(dmabuf->virt, fw->data + temp_offset, 10986 SLI4_PAGE_SIZE); 10987 temp_offset += SLI4_PAGE_SIZE; 10988 } 10989 rc = lpfc_wr_object(phba, &dma_buffer_list, 10990 (fw->size - offset), &offset); 10991 if (rc) 10992 goto release_out; 10993 } 10994 rc = offset; 10995 } 10996 10997 release_out: 10998 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 10999 list_del(&dmabuf->list); 11000 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 11001 dmabuf->virt, dmabuf->phys); 11002 kfree(dmabuf); 11003 } 11004 release_firmware(fw); 11005 out: 11006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11007 "3024 Firmware update done: %d.\n", rc); 11008 return; 11009 } 11010 11011 /** 11012 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 11013 * @phba: pointer to lpfc hba data structure. 11014 * 11015 * This routine is called to perform Linux generic firmware upgrade on device 11016 * that supports such feature. 11017 **/ 11018 int 11019 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 11020 { 11021 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 11022 int ret; 11023 const struct firmware *fw; 11024 11025 /* Only supported on SLI4 interface type 2 for now */ 11026 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11027 LPFC_SLI_INTF_IF_TYPE_2) 11028 return -EPERM; 11029 11030 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 11031 11032 if (fw_upgrade == INT_FW_UPGRADE) { 11033 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 11034 file_name, &phba->pcidev->dev, 11035 GFP_KERNEL, (void *)phba, 11036 lpfc_write_firmware); 11037 } else if (fw_upgrade == RUN_FW_UPGRADE) { 11038 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 11039 if (!ret) 11040 lpfc_write_firmware(fw, (void *)phba); 11041 } else { 11042 ret = -EINVAL; 11043 } 11044 11045 return ret; 11046 } 11047 11048 /** 11049 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 11050 * @pdev: pointer to PCI device 11051 * @pid: pointer to PCI device identifier 11052 * 11053 * This routine is called from the kernel's PCI subsystem to device with 11054 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 11055 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11056 * information of the device and driver to see if the driver state that it 11057 * can support this kind of device. If the match is successful, the driver 11058 * core invokes this routine. If this routine determines it can claim the HBA, 11059 * it does all the initialization that it needs to do to handle the HBA 11060 * properly. 11061 * 11062 * Return code 11063 * 0 - driver can claim the device 11064 * negative value - driver can not claim the device 11065 **/ 11066 static int 11067 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 11068 { 11069 struct lpfc_hba *phba; 11070 struct lpfc_vport *vport = NULL; 11071 struct Scsi_Host *shost = NULL; 11072 int error; 11073 uint32_t cfg_mode, intr_mode; 11074 11075 /* Allocate memory for HBA structure */ 11076 phba = lpfc_hba_alloc(pdev); 11077 if (!phba) 11078 return -ENOMEM; 11079 11080 /* Perform generic PCI device enabling operation */ 11081 error = lpfc_enable_pci_dev(phba); 11082 if (error) 11083 goto out_free_phba; 11084 11085 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 11086 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 11087 if (error) 11088 goto out_disable_pci_dev; 11089 11090 /* Set up SLI-4 specific device PCI memory space */ 11091 error = lpfc_sli4_pci_mem_setup(phba); 11092 if (error) { 11093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11094 "1410 Failed to set up pci memory space.\n"); 11095 goto out_disable_pci_dev; 11096 } 11097 11098 /* Set up SLI-4 Specific device driver resources */ 11099 error = lpfc_sli4_driver_resource_setup(phba); 11100 if (error) { 11101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11102 "1412 Failed to set up driver resource.\n"); 11103 goto out_unset_pci_mem_s4; 11104 } 11105 11106 INIT_LIST_HEAD(&phba->active_rrq_list); 11107 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 11108 11109 /* Set up common device driver resources */ 11110 error = lpfc_setup_driver_resource_phase2(phba); 11111 if (error) { 11112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11113 "1414 Failed to set up driver resource.\n"); 11114 goto out_unset_driver_resource_s4; 11115 } 11116 11117 /* Get the default values for Model Name and Description */ 11118 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11119 11120 /* Create SCSI host to the physical port */ 11121 error = lpfc_create_shost(phba); 11122 if (error) { 11123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11124 "1415 Failed to create scsi host.\n"); 11125 goto out_unset_driver_resource; 11126 } 11127 11128 /* Configure sysfs attributes */ 11129 vport = phba->pport; 11130 error = lpfc_alloc_sysfs_attr(vport); 11131 if (error) { 11132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11133 "1416 Failed to allocate sysfs attr\n"); 11134 goto out_destroy_shost; 11135 } 11136 11137 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11138 /* Now, trying to enable interrupt and bring up the device */ 11139 cfg_mode = phba->cfg_use_msi; 11140 11141 /* Put device to a known state before enabling interrupt */ 11142 lpfc_stop_port(phba); 11143 11144 /* Configure and enable interrupt */ 11145 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 11146 if (intr_mode == LPFC_INTR_ERROR) { 11147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11148 "0426 Failed to enable interrupt.\n"); 11149 error = -ENODEV; 11150 goto out_free_sysfs_attr; 11151 } 11152 /* Default to single EQ for non-MSI-X */ 11153 if (phba->intr_type != MSIX) { 11154 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 11155 phba->cfg_fcp_io_channel = 1; 11156 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11157 phba->cfg_nvme_io_channel = 1; 11158 if (phba->nvmet_support) 11159 phba->cfg_nvmet_mrq = 1; 11160 } 11161 phba->io_channel_irqs = 1; 11162 } 11163 11164 /* Set up SLI-4 HBA */ 11165 if (lpfc_sli4_hba_setup(phba)) { 11166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11167 "1421 Failed to set up hba\n"); 11168 error = -ENODEV; 11169 goto out_disable_intr; 11170 } 11171 11172 /* Log the current active interrupt mode */ 11173 phba->intr_mode = intr_mode; 11174 lpfc_log_intr_mode(phba, intr_mode); 11175 11176 /* Perform post initialization setup */ 11177 lpfc_post_init_setup(phba); 11178 11179 /* NVME support in FW earlier in the driver load corrects the 11180 * FC4 type making a check for nvme_support unnecessary. 11181 */ 11182 if ((phba->nvmet_support == 0) && 11183 (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 11184 /* Create NVME binding with nvme_fc_transport. This 11185 * ensures the vport is initialized. If the localport 11186 * create fails, it should not unload the driver to 11187 * support field issues. 11188 */ 11189 error = lpfc_nvme_create_localport(vport); 11190 if (error) { 11191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11192 "6004 NVME registration failed, " 11193 "error x%x\n", 11194 error); 11195 } 11196 } 11197 11198 /* check for firmware upgrade or downgrade */ 11199 if (phba->cfg_request_firmware_upgrade) 11200 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 11201 11202 /* Check if there are static vports to be created. */ 11203 lpfc_create_static_vport(phba); 11204 return 0; 11205 11206 out_disable_intr: 11207 lpfc_sli4_disable_intr(phba); 11208 out_free_sysfs_attr: 11209 lpfc_free_sysfs_attr(vport); 11210 out_destroy_shost: 11211 lpfc_destroy_shost(phba); 11212 out_unset_driver_resource: 11213 lpfc_unset_driver_resource_phase2(phba); 11214 out_unset_driver_resource_s4: 11215 lpfc_sli4_driver_resource_unset(phba); 11216 out_unset_pci_mem_s4: 11217 lpfc_sli4_pci_mem_unset(phba); 11218 out_disable_pci_dev: 11219 lpfc_disable_pci_dev(phba); 11220 if (shost) 11221 scsi_host_put(shost); 11222 out_free_phba: 11223 lpfc_hba_free(phba); 11224 return error; 11225 } 11226 11227 /** 11228 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 11229 * @pdev: pointer to PCI device 11230 * 11231 * This routine is called from the kernel's PCI subsystem to device with 11232 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 11233 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11234 * device to be removed from the PCI subsystem properly. 11235 **/ 11236 static void 11237 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 11238 { 11239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11240 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11241 struct lpfc_vport **vports; 11242 struct lpfc_hba *phba = vport->phba; 11243 int i; 11244 11245 /* Mark the device unloading flag */ 11246 spin_lock_irq(&phba->hbalock); 11247 vport->load_flag |= FC_UNLOADING; 11248 spin_unlock_irq(&phba->hbalock); 11249 11250 /* Free the HBA sysfs attributes */ 11251 lpfc_free_sysfs_attr(vport); 11252 11253 /* Release all the vports against this physical port */ 11254 vports = lpfc_create_vport_work_array(phba); 11255 if (vports != NULL) 11256 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11257 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11258 continue; 11259 fc_vport_terminate(vports[i]->fc_vport); 11260 } 11261 lpfc_destroy_vport_work_array(phba, vports); 11262 11263 /* Remove FC host and then SCSI host with the physical port */ 11264 fc_remove_host(shost); 11265 scsi_remove_host(shost); 11266 11267 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 11268 * localports are destroyed after to cleanup all transport memory. 11269 */ 11270 lpfc_cleanup(vport); 11271 lpfc_nvmet_destroy_targetport(phba); 11272 lpfc_nvme_destroy_localport(vport); 11273 11274 /* 11275 * Bring down the SLI Layer. This step disables all interrupts, 11276 * clears the rings, discards all mailbox commands, and resets 11277 * the HBA FCoE function. 11278 */ 11279 lpfc_debugfs_terminate(vport); 11280 lpfc_sli4_hba_unset(phba); 11281 11282 spin_lock_irq(&phba->hbalock); 11283 list_del_init(&vport->listentry); 11284 spin_unlock_irq(&phba->hbalock); 11285 11286 /* Perform scsi free before driver resource_unset since scsi 11287 * buffers are released to their corresponding pools here. 11288 */ 11289 lpfc_scsi_free(phba); 11290 lpfc_nvme_free(phba); 11291 lpfc_free_iocb_list(phba); 11292 11293 lpfc_sli4_driver_resource_unset(phba); 11294 11295 /* Unmap adapter Control and Doorbell registers */ 11296 lpfc_sli4_pci_mem_unset(phba); 11297 11298 /* Release PCI resources and disable device's PCI function */ 11299 scsi_host_put(shost); 11300 lpfc_disable_pci_dev(phba); 11301 11302 /* Finally, free the driver's device data structure */ 11303 lpfc_hba_free(phba); 11304 11305 return; 11306 } 11307 11308 /** 11309 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 11310 * @pdev: pointer to PCI device 11311 * @msg: power management message 11312 * 11313 * This routine is called from the kernel's PCI subsystem to support system 11314 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 11315 * this method, it quiesces the device by stopping the driver's worker 11316 * thread for the device, turning off device's interrupt and DMA, and bring 11317 * the device offline. Note that as the driver implements the minimum PM 11318 * requirements to a power-aware driver's PM support for suspend/resume -- all 11319 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 11320 * method call will be treated as SUSPEND and the driver will fully 11321 * reinitialize its device during resume() method call, the driver will set 11322 * device to PCI_D3hot state in PCI config space instead of setting it 11323 * according to the @msg provided by the PM. 11324 * 11325 * Return code 11326 * 0 - driver suspended the device 11327 * Error otherwise 11328 **/ 11329 static int 11330 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 11331 { 11332 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11333 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11334 11335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11336 "2843 PCI device Power Management suspend.\n"); 11337 11338 /* Bring down the device */ 11339 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11340 lpfc_offline(phba); 11341 kthread_stop(phba->worker_thread); 11342 11343 /* Disable interrupt from device */ 11344 lpfc_sli4_disable_intr(phba); 11345 lpfc_sli4_queue_destroy(phba); 11346 11347 /* Save device state to PCI config space */ 11348 pci_save_state(pdev); 11349 pci_set_power_state(pdev, PCI_D3hot); 11350 11351 return 0; 11352 } 11353 11354 /** 11355 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 11356 * @pdev: pointer to PCI device 11357 * 11358 * This routine is called from the kernel's PCI subsystem to support system 11359 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 11360 * this method, it restores the device's PCI config space state and fully 11361 * reinitializes the device and brings it online. Note that as the driver 11362 * implements the minimum PM requirements to a power-aware driver's PM for 11363 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11364 * to the suspend() method call will be treated as SUSPEND and the driver 11365 * will fully reinitialize its device during resume() method call, the device 11366 * will be set to PCI_D0 directly in PCI config space before restoring the 11367 * state. 11368 * 11369 * Return code 11370 * 0 - driver suspended the device 11371 * Error otherwise 11372 **/ 11373 static int 11374 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 11375 { 11376 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11378 uint32_t intr_mode; 11379 int error; 11380 11381 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11382 "0292 PCI device Power Management resume.\n"); 11383 11384 /* Restore device state from PCI config space */ 11385 pci_set_power_state(pdev, PCI_D0); 11386 pci_restore_state(pdev); 11387 11388 /* 11389 * As the new kernel behavior of pci_restore_state() API call clears 11390 * device saved_state flag, need to save the restored state again. 11391 */ 11392 pci_save_state(pdev); 11393 11394 if (pdev->is_busmaster) 11395 pci_set_master(pdev); 11396 11397 /* Startup the kernel thread for this host adapter. */ 11398 phba->worker_thread = kthread_run(lpfc_do_work, phba, 11399 "lpfc_worker_%d", phba->brd_no); 11400 if (IS_ERR(phba->worker_thread)) { 11401 error = PTR_ERR(phba->worker_thread); 11402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11403 "0293 PM resume failed to start worker " 11404 "thread: error=x%x.\n", error); 11405 return error; 11406 } 11407 11408 /* Configure and enable interrupt */ 11409 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 11410 if (intr_mode == LPFC_INTR_ERROR) { 11411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11412 "0294 PM resume Failed to enable interrupt\n"); 11413 return -EIO; 11414 } else 11415 phba->intr_mode = intr_mode; 11416 11417 /* Restart HBA and bring it online */ 11418 lpfc_sli_brdrestart(phba); 11419 lpfc_online(phba); 11420 11421 /* Log the current active interrupt mode */ 11422 lpfc_log_intr_mode(phba, phba->intr_mode); 11423 11424 return 0; 11425 } 11426 11427 /** 11428 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 11429 * @phba: pointer to lpfc hba data structure. 11430 * 11431 * This routine is called to prepare the SLI4 device for PCI slot recover. It 11432 * aborts all the outstanding SCSI I/Os to the pci device. 11433 **/ 11434 static void 11435 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 11436 { 11437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11438 "2828 PCI channel I/O abort preparing for recovery\n"); 11439 /* 11440 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 11441 * and let the SCSI mid-layer to retry them to recover. 11442 */ 11443 lpfc_sli_abort_fcp_rings(phba); 11444 } 11445 11446 /** 11447 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 11448 * @phba: pointer to lpfc hba data structure. 11449 * 11450 * This routine is called to prepare the SLI4 device for PCI slot reset. It 11451 * disables the device interrupt and pci device, and aborts the internal FCP 11452 * pending I/Os. 11453 **/ 11454 static void 11455 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 11456 { 11457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11458 "2826 PCI channel disable preparing for reset\n"); 11459 11460 /* Block any management I/Os to the device */ 11461 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 11462 11463 /* Block all SCSI devices' I/Os on the host */ 11464 lpfc_scsi_dev_block(phba); 11465 11466 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11467 lpfc_sli_flush_fcp_rings(phba); 11468 11469 /* stop all timers */ 11470 lpfc_stop_hba_timers(phba); 11471 11472 /* Disable interrupt and pci device */ 11473 lpfc_sli4_disable_intr(phba); 11474 lpfc_sli4_queue_destroy(phba); 11475 pci_disable_device(phba->pcidev); 11476 } 11477 11478 /** 11479 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 11480 * @phba: pointer to lpfc hba data structure. 11481 * 11482 * This routine is called to prepare the SLI4 device for PCI slot permanently 11483 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 11484 * pending I/Os. 11485 **/ 11486 static void 11487 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 11488 { 11489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11490 "2827 PCI channel permanent disable for failure\n"); 11491 11492 /* Block all SCSI devices' I/Os on the host */ 11493 lpfc_scsi_dev_block(phba); 11494 11495 /* stop all timers */ 11496 lpfc_stop_hba_timers(phba); 11497 11498 /* Clean up all driver's outstanding SCSI I/Os */ 11499 lpfc_sli_flush_fcp_rings(phba); 11500 } 11501 11502 /** 11503 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 11504 * @pdev: pointer to PCI device. 11505 * @state: the current PCI connection state. 11506 * 11507 * This routine is called from the PCI subsystem for error handling to device 11508 * with SLI-4 interface spec. This function is called by the PCI subsystem 11509 * after a PCI bus error affecting this device has been detected. When this 11510 * function is invoked, it will need to stop all the I/Os and interrupt(s) 11511 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 11512 * for the PCI subsystem to perform proper recovery as desired. 11513 * 11514 * Return codes 11515 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11516 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11517 **/ 11518 static pci_ers_result_t 11519 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 11520 { 11521 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11522 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11523 11524 switch (state) { 11525 case pci_channel_io_normal: 11526 /* Non-fatal error, prepare for recovery */ 11527 lpfc_sli4_prep_dev_for_recover(phba); 11528 return PCI_ERS_RESULT_CAN_RECOVER; 11529 case pci_channel_io_frozen: 11530 /* Fatal error, prepare for slot reset */ 11531 lpfc_sli4_prep_dev_for_reset(phba); 11532 return PCI_ERS_RESULT_NEED_RESET; 11533 case pci_channel_io_perm_failure: 11534 /* Permanent failure, prepare for device down */ 11535 lpfc_sli4_prep_dev_for_perm_failure(phba); 11536 return PCI_ERS_RESULT_DISCONNECT; 11537 default: 11538 /* Unknown state, prepare and request slot reset */ 11539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11540 "2825 Unknown PCI error state: x%x\n", state); 11541 lpfc_sli4_prep_dev_for_reset(phba); 11542 return PCI_ERS_RESULT_NEED_RESET; 11543 } 11544 } 11545 11546 /** 11547 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 11548 * @pdev: pointer to PCI device. 11549 * 11550 * This routine is called from the PCI subsystem for error handling to device 11551 * with SLI-4 interface spec. It is called after PCI bus has been reset to 11552 * restart the PCI card from scratch, as if from a cold-boot. During the 11553 * PCI subsystem error recovery, after the driver returns 11554 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 11555 * recovery and then call this routine before calling the .resume method to 11556 * recover the device. This function will initialize the HBA device, enable 11557 * the interrupt, but it will just put the HBA to offline state without 11558 * passing any I/O traffic. 11559 * 11560 * Return codes 11561 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11562 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11563 */ 11564 static pci_ers_result_t 11565 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 11566 { 11567 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11568 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11569 struct lpfc_sli *psli = &phba->sli; 11570 uint32_t intr_mode; 11571 11572 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 11573 if (pci_enable_device_mem(pdev)) { 11574 printk(KERN_ERR "lpfc: Cannot re-enable " 11575 "PCI device after reset.\n"); 11576 return PCI_ERS_RESULT_DISCONNECT; 11577 } 11578 11579 pci_restore_state(pdev); 11580 11581 /* 11582 * As the new kernel behavior of pci_restore_state() API call clears 11583 * device saved_state flag, need to save the restored state again. 11584 */ 11585 pci_save_state(pdev); 11586 11587 if (pdev->is_busmaster) 11588 pci_set_master(pdev); 11589 11590 spin_lock_irq(&phba->hbalock); 11591 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 11592 spin_unlock_irq(&phba->hbalock); 11593 11594 /* Configure and enable interrupt */ 11595 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 11596 if (intr_mode == LPFC_INTR_ERROR) { 11597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11598 "2824 Cannot re-enable interrupt after " 11599 "slot reset.\n"); 11600 return PCI_ERS_RESULT_DISCONNECT; 11601 } else 11602 phba->intr_mode = intr_mode; 11603 11604 /* Log the current active interrupt mode */ 11605 lpfc_log_intr_mode(phba, phba->intr_mode); 11606 11607 return PCI_ERS_RESULT_RECOVERED; 11608 } 11609 11610 /** 11611 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 11612 * @pdev: pointer to PCI device 11613 * 11614 * This routine is called from the PCI subsystem for error handling to device 11615 * with SLI-4 interface spec. It is called when kernel error recovery tells 11616 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 11617 * error recovery. After this call, traffic can start to flow from this device 11618 * again. 11619 **/ 11620 static void 11621 lpfc_io_resume_s4(struct pci_dev *pdev) 11622 { 11623 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11624 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11625 11626 /* 11627 * In case of slot reset, as function reset is performed through 11628 * mailbox command which needs DMA to be enabled, this operation 11629 * has to be moved to the io resume phase. Taking device offline 11630 * will perform the necessary cleanup. 11631 */ 11632 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 11633 /* Perform device reset */ 11634 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11635 lpfc_offline(phba); 11636 lpfc_sli_brdrestart(phba); 11637 /* Bring the device back online */ 11638 lpfc_online(phba); 11639 } 11640 11641 /* Clean up Advanced Error Reporting (AER) if needed */ 11642 if (phba->hba_flag & HBA_AER_ENABLED) 11643 pci_cleanup_aer_uncorrect_error_status(pdev); 11644 } 11645 11646 /** 11647 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 11648 * @pdev: pointer to PCI device 11649 * @pid: pointer to PCI device identifier 11650 * 11651 * This routine is to be registered to the kernel's PCI subsystem. When an 11652 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 11653 * at PCI device-specific information of the device and driver to see if the 11654 * driver state that it can support this kind of device. If the match is 11655 * successful, the driver core invokes this routine. This routine dispatches 11656 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 11657 * do all the initialization that it needs to do to handle the HBA device 11658 * properly. 11659 * 11660 * Return code 11661 * 0 - driver can claim the device 11662 * negative value - driver can not claim the device 11663 **/ 11664 static int 11665 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 11666 { 11667 int rc; 11668 struct lpfc_sli_intf intf; 11669 11670 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 11671 return -ENODEV; 11672 11673 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 11674 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 11675 rc = lpfc_pci_probe_one_s4(pdev, pid); 11676 else 11677 rc = lpfc_pci_probe_one_s3(pdev, pid); 11678 11679 return rc; 11680 } 11681 11682 /** 11683 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 11684 * @pdev: pointer to PCI device 11685 * 11686 * This routine is to be registered to the kernel's PCI subsystem. When an 11687 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 11688 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 11689 * remove routine, which will perform all the necessary cleanup for the 11690 * device to be removed from the PCI subsystem properly. 11691 **/ 11692 static void 11693 lpfc_pci_remove_one(struct pci_dev *pdev) 11694 { 11695 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11696 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11697 11698 switch (phba->pci_dev_grp) { 11699 case LPFC_PCI_DEV_LP: 11700 lpfc_pci_remove_one_s3(pdev); 11701 break; 11702 case LPFC_PCI_DEV_OC: 11703 lpfc_pci_remove_one_s4(pdev); 11704 break; 11705 default: 11706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11707 "1424 Invalid PCI device group: 0x%x\n", 11708 phba->pci_dev_grp); 11709 break; 11710 } 11711 return; 11712 } 11713 11714 /** 11715 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 11716 * @pdev: pointer to PCI device 11717 * @msg: power management message 11718 * 11719 * This routine is to be registered to the kernel's PCI subsystem to support 11720 * system Power Management (PM). When PM invokes this method, it dispatches 11721 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 11722 * suspend the device. 11723 * 11724 * Return code 11725 * 0 - driver suspended the device 11726 * Error otherwise 11727 **/ 11728 static int 11729 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 11730 { 11731 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11732 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11733 int rc = -ENODEV; 11734 11735 switch (phba->pci_dev_grp) { 11736 case LPFC_PCI_DEV_LP: 11737 rc = lpfc_pci_suspend_one_s3(pdev, msg); 11738 break; 11739 case LPFC_PCI_DEV_OC: 11740 rc = lpfc_pci_suspend_one_s4(pdev, msg); 11741 break; 11742 default: 11743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11744 "1425 Invalid PCI device group: 0x%x\n", 11745 phba->pci_dev_grp); 11746 break; 11747 } 11748 return rc; 11749 } 11750 11751 /** 11752 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 11753 * @pdev: pointer to PCI device 11754 * 11755 * This routine is to be registered to the kernel's PCI subsystem to support 11756 * system Power Management (PM). When PM invokes this method, it dispatches 11757 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 11758 * resume the device. 11759 * 11760 * Return code 11761 * 0 - driver suspended the device 11762 * Error otherwise 11763 **/ 11764 static int 11765 lpfc_pci_resume_one(struct pci_dev *pdev) 11766 { 11767 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11768 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11769 int rc = -ENODEV; 11770 11771 switch (phba->pci_dev_grp) { 11772 case LPFC_PCI_DEV_LP: 11773 rc = lpfc_pci_resume_one_s3(pdev); 11774 break; 11775 case LPFC_PCI_DEV_OC: 11776 rc = lpfc_pci_resume_one_s4(pdev); 11777 break; 11778 default: 11779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11780 "1426 Invalid PCI device group: 0x%x\n", 11781 phba->pci_dev_grp); 11782 break; 11783 } 11784 return rc; 11785 } 11786 11787 /** 11788 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 11789 * @pdev: pointer to PCI device. 11790 * @state: the current PCI connection state. 11791 * 11792 * This routine is registered to the PCI subsystem for error handling. This 11793 * function is called by the PCI subsystem after a PCI bus error affecting 11794 * this device has been detected. When this routine is invoked, it dispatches 11795 * the action to the proper SLI-3 or SLI-4 device error detected handling 11796 * routine, which will perform the proper error detected operation. 11797 * 11798 * Return codes 11799 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11800 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11801 **/ 11802 static pci_ers_result_t 11803 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 11804 { 11805 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11806 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11807 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11808 11809 switch (phba->pci_dev_grp) { 11810 case LPFC_PCI_DEV_LP: 11811 rc = lpfc_io_error_detected_s3(pdev, state); 11812 break; 11813 case LPFC_PCI_DEV_OC: 11814 rc = lpfc_io_error_detected_s4(pdev, state); 11815 break; 11816 default: 11817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11818 "1427 Invalid PCI device group: 0x%x\n", 11819 phba->pci_dev_grp); 11820 break; 11821 } 11822 return rc; 11823 } 11824 11825 /** 11826 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 11827 * @pdev: pointer to PCI device. 11828 * 11829 * This routine is registered to the PCI subsystem for error handling. This 11830 * function is called after PCI bus has been reset to restart the PCI card 11831 * from scratch, as if from a cold-boot. When this routine is invoked, it 11832 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 11833 * routine, which will perform the proper device reset. 11834 * 11835 * Return codes 11836 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11837 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11838 **/ 11839 static pci_ers_result_t 11840 lpfc_io_slot_reset(struct pci_dev *pdev) 11841 { 11842 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11843 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11844 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11845 11846 switch (phba->pci_dev_grp) { 11847 case LPFC_PCI_DEV_LP: 11848 rc = lpfc_io_slot_reset_s3(pdev); 11849 break; 11850 case LPFC_PCI_DEV_OC: 11851 rc = lpfc_io_slot_reset_s4(pdev); 11852 break; 11853 default: 11854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11855 "1428 Invalid PCI device group: 0x%x\n", 11856 phba->pci_dev_grp); 11857 break; 11858 } 11859 return rc; 11860 } 11861 11862 /** 11863 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 11864 * @pdev: pointer to PCI device 11865 * 11866 * This routine is registered to the PCI subsystem for error handling. It 11867 * is called when kernel error recovery tells the lpfc driver that it is 11868 * OK to resume normal PCI operation after PCI bus error recovery. When 11869 * this routine is invoked, it dispatches the action to the proper SLI-3 11870 * or SLI-4 device io_resume routine, which will resume the device operation. 11871 **/ 11872 static void 11873 lpfc_io_resume(struct pci_dev *pdev) 11874 { 11875 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11876 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11877 11878 switch (phba->pci_dev_grp) { 11879 case LPFC_PCI_DEV_LP: 11880 lpfc_io_resume_s3(pdev); 11881 break; 11882 case LPFC_PCI_DEV_OC: 11883 lpfc_io_resume_s4(pdev); 11884 break; 11885 default: 11886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11887 "1429 Invalid PCI device group: 0x%x\n", 11888 phba->pci_dev_grp); 11889 break; 11890 } 11891 return; 11892 } 11893 11894 /** 11895 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 11896 * @phba: pointer to lpfc hba data structure. 11897 * 11898 * This routine checks to see if OAS is supported for this adapter. If 11899 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 11900 * the enable oas flag is cleared and the pool created for OAS device data 11901 * is destroyed. 11902 * 11903 **/ 11904 void 11905 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 11906 { 11907 11908 if (!phba->cfg_EnableXLane) 11909 return; 11910 11911 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11912 phba->cfg_fof = 1; 11913 } else { 11914 phba->cfg_fof = 0; 11915 if (phba->device_data_mem_pool) 11916 mempool_destroy(phba->device_data_mem_pool); 11917 phba->device_data_mem_pool = NULL; 11918 } 11919 11920 return; 11921 } 11922 11923 /** 11924 * lpfc_fof_queue_setup - Set up all the fof queues 11925 * @phba: pointer to lpfc hba data structure. 11926 * 11927 * This routine is invoked to set up all the fof queues for the FC HBA 11928 * operation. 11929 * 11930 * Return codes 11931 * 0 - successful 11932 * -ENOMEM - No available memory 11933 **/ 11934 int 11935 lpfc_fof_queue_setup(struct lpfc_hba *phba) 11936 { 11937 struct lpfc_sli_ring *pring; 11938 int rc; 11939 11940 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 11941 if (rc) 11942 return -ENOMEM; 11943 11944 if (phba->cfg_fof) { 11945 11946 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11947 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11948 if (rc) 11949 goto out_oas_cq; 11950 11951 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 11952 phba->sli4_hba.oas_cq, LPFC_FCP); 11953 if (rc) 11954 goto out_oas_wq; 11955 11956 /* Bind this CQ/WQ to the NVME ring */ 11957 pring = phba->sli4_hba.oas_wq->pring; 11958 pring->sli.sli4.wqp = 11959 (void *)phba->sli4_hba.oas_wq; 11960 phba->sli4_hba.oas_cq->pring = pring; 11961 } 11962 11963 return 0; 11964 11965 out_oas_wq: 11966 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11967 out_oas_cq: 11968 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11969 return rc; 11970 11971 } 11972 11973 /** 11974 * lpfc_fof_queue_create - Create all the fof queues 11975 * @phba: pointer to lpfc hba data structure. 11976 * 11977 * This routine is invoked to allocate all the fof queues for the FC HBA 11978 * operation. For each SLI4 queue type, the parameters such as queue entry 11979 * count (queue depth) shall be taken from the module parameter. For now, 11980 * we just use some constant number as place holder. 11981 * 11982 * Return codes 11983 * 0 - successful 11984 * -ENOMEM - No availble memory 11985 * -EIO - The mailbox failed to complete successfully. 11986 **/ 11987 int 11988 lpfc_fof_queue_create(struct lpfc_hba *phba) 11989 { 11990 struct lpfc_queue *qdesc; 11991 uint32_t wqesize; 11992 11993 /* Create FOF EQ */ 11994 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 11995 phba->sli4_hba.eq_ecount); 11996 if (!qdesc) 11997 goto out_error; 11998 11999 phba->sli4_hba.fof_eq = qdesc; 12000 12001 if (phba->cfg_fof) { 12002 12003 /* Create OAS CQ */ 12004 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 12005 phba->sli4_hba.cq_ecount); 12006 if (!qdesc) 12007 goto out_error; 12008 12009 phba->sli4_hba.oas_cq = qdesc; 12010 12011 /* Create OAS WQ */ 12012 wqesize = (phba->fcp_embed_io) ? 12013 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 12014 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, 12015 phba->sli4_hba.wq_ecount); 12016 12017 if (!qdesc) 12018 goto out_error; 12019 12020 phba->sli4_hba.oas_wq = qdesc; 12021 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 12022 12023 } 12024 return 0; 12025 12026 out_error: 12027 lpfc_fof_queue_destroy(phba); 12028 return -ENOMEM; 12029 } 12030 12031 /** 12032 * lpfc_fof_queue_destroy - Destroy all the fof queues 12033 * @phba: pointer to lpfc hba data structure. 12034 * 12035 * This routine is invoked to release all the SLI4 queues with the FC HBA 12036 * operation. 12037 * 12038 * Return codes 12039 * 0 - successful 12040 **/ 12041 int 12042 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 12043 { 12044 /* Release FOF Event queue */ 12045 if (phba->sli4_hba.fof_eq != NULL) { 12046 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 12047 phba->sli4_hba.fof_eq = NULL; 12048 } 12049 12050 /* Release OAS Completion queue */ 12051 if (phba->sli4_hba.oas_cq != NULL) { 12052 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 12053 phba->sli4_hba.oas_cq = NULL; 12054 } 12055 12056 /* Release OAS Work queue */ 12057 if (phba->sli4_hba.oas_wq != NULL) { 12058 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 12059 phba->sli4_hba.oas_wq = NULL; 12060 } 12061 return 0; 12062 } 12063 12064 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 12065 12066 static const struct pci_error_handlers lpfc_err_handler = { 12067 .error_detected = lpfc_io_error_detected, 12068 .slot_reset = lpfc_io_slot_reset, 12069 .resume = lpfc_io_resume, 12070 }; 12071 12072 static struct pci_driver lpfc_driver = { 12073 .name = LPFC_DRIVER_NAME, 12074 .id_table = lpfc_id_table, 12075 .probe = lpfc_pci_probe_one, 12076 .remove = lpfc_pci_remove_one, 12077 .shutdown = lpfc_pci_remove_one, 12078 .suspend = lpfc_pci_suspend_one, 12079 .resume = lpfc_pci_resume_one, 12080 .err_handler = &lpfc_err_handler, 12081 }; 12082 12083 static const struct file_operations lpfc_mgmt_fop = { 12084 .owner = THIS_MODULE, 12085 }; 12086 12087 static struct miscdevice lpfc_mgmt_dev = { 12088 .minor = MISC_DYNAMIC_MINOR, 12089 .name = "lpfcmgmt", 12090 .fops = &lpfc_mgmt_fop, 12091 }; 12092 12093 /** 12094 * lpfc_init - lpfc module initialization routine 12095 * 12096 * This routine is to be invoked when the lpfc module is loaded into the 12097 * kernel. The special kernel macro module_init() is used to indicate the 12098 * role of this routine to the kernel as lpfc module entry point. 12099 * 12100 * Return codes 12101 * 0 - successful 12102 * -ENOMEM - FC attach transport failed 12103 * all others - failed 12104 */ 12105 static int __init 12106 lpfc_init(void) 12107 { 12108 int error = 0; 12109 12110 printk(LPFC_MODULE_DESC "\n"); 12111 printk(LPFC_COPYRIGHT "\n"); 12112 12113 error = misc_register(&lpfc_mgmt_dev); 12114 if (error) 12115 printk(KERN_ERR "Could not register lpfcmgmt device, " 12116 "misc_register returned with status %d", error); 12117 12118 lpfc_transport_functions.vport_create = lpfc_vport_create; 12119 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 12120 lpfc_transport_template = 12121 fc_attach_transport(&lpfc_transport_functions); 12122 if (lpfc_transport_template == NULL) 12123 return -ENOMEM; 12124 lpfc_vport_transport_template = 12125 fc_attach_transport(&lpfc_vport_transport_functions); 12126 if (lpfc_vport_transport_template == NULL) { 12127 fc_release_transport(lpfc_transport_template); 12128 return -ENOMEM; 12129 } 12130 12131 /* Initialize in case vector mapping is needed */ 12132 lpfc_used_cpu = NULL; 12133 lpfc_present_cpu = num_present_cpus(); 12134 12135 error = pci_register_driver(&lpfc_driver); 12136 if (error) { 12137 fc_release_transport(lpfc_transport_template); 12138 fc_release_transport(lpfc_vport_transport_template); 12139 } 12140 12141 return error; 12142 } 12143 12144 /** 12145 * lpfc_exit - lpfc module removal routine 12146 * 12147 * This routine is invoked when the lpfc module is removed from the kernel. 12148 * The special kernel macro module_exit() is used to indicate the role of 12149 * this routine to the kernel as lpfc module exit point. 12150 */ 12151 static void __exit 12152 lpfc_exit(void) 12153 { 12154 misc_deregister(&lpfc_mgmt_dev); 12155 pci_unregister_driver(&lpfc_driver); 12156 fc_release_transport(lpfc_transport_template); 12157 fc_release_transport(lpfc_vport_transport_template); 12158 if (_dump_buf_data) { 12159 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 12160 "_dump_buf_data at 0x%p\n", 12161 (1L << _dump_buf_data_order), _dump_buf_data); 12162 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 12163 } 12164 12165 if (_dump_buf_dif) { 12166 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 12167 "_dump_buf_dif at 0x%p\n", 12168 (1L << _dump_buf_dif_order), _dump_buf_dif); 12169 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 12170 } 12171 kfree(lpfc_used_cpu); 12172 idr_destroy(&lpfc_hba_index); 12173 } 12174 12175 module_init(lpfc_init); 12176 module_exit(lpfc_exit); 12177 MODULE_LICENSE("GPL"); 12178 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 12179 MODULE_AUTHOR("Broadcom"); 12180 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 12181