1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/bitops.h> 41 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_transport_fc.h> 46 #include <scsi/scsi_tcq.h> 47 #include <scsi/fc/fc_fs.h> 48 49 #include <linux/nvme-fc-driver.h> 50 51 #include "lpfc_hw4.h" 52 #include "lpfc_hw.h" 53 #include "lpfc_sli.h" 54 #include "lpfc_sli4.h" 55 #include "lpfc_nl.h" 56 #include "lpfc_disc.h" 57 #include "lpfc.h" 58 #include "lpfc_scsi.h" 59 #include "lpfc_nvme.h" 60 #include "lpfc_nvmet.h" 61 #include "lpfc_logmsg.h" 62 #include "lpfc_crtn.h" 63 #include "lpfc_vport.h" 64 #include "lpfc_version.h" 65 #include "lpfc_ids.h" 66 67 char *_dump_buf_data; 68 unsigned long _dump_buf_data_order; 69 char *_dump_buf_dif; 70 unsigned long _dump_buf_dif_order; 71 spinlock_t _dump_buf_lock; 72 73 /* Used when mapping IRQ vectors in a driver centric manner */ 74 uint16_t *lpfc_used_cpu; 75 uint32_t lpfc_present_cpu; 76 77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 78 static int lpfc_post_rcv_buf(struct lpfc_hba *); 79 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 81 static int lpfc_setup_endian_order(struct lpfc_hba *); 82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 83 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 85 static void lpfc_init_sgl_list(struct lpfc_hba *); 86 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 87 static void lpfc_free_active_sgl(struct lpfc_hba *); 88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 93 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 96 97 static struct scsi_transport_template *lpfc_transport_template = NULL; 98 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 99 static DEFINE_IDR(lpfc_hba_index); 100 #define LPFC_NVMET_BUF_POST 254 101 102 /** 103 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 104 * @phba: pointer to lpfc hba data structure. 105 * 106 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 107 * mailbox command. It retrieves the revision information from the HBA and 108 * collects the Vital Product Data (VPD) about the HBA for preparing the 109 * configuration of the HBA. 110 * 111 * Return codes: 112 * 0 - success. 113 * -ERESTART - requests the SLI layer to reset the HBA and try again. 114 * Any other value - indicates an error. 115 **/ 116 int 117 lpfc_config_port_prep(struct lpfc_hba *phba) 118 { 119 lpfc_vpd_t *vp = &phba->vpd; 120 int i = 0, rc; 121 LPFC_MBOXQ_t *pmb; 122 MAILBOX_t *mb; 123 char *lpfc_vpd_data = NULL; 124 uint16_t offset = 0; 125 static char licensed[56] = 126 "key unlock for use with gnu public licensed code only\0"; 127 static int init_key = 1; 128 129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 130 if (!pmb) { 131 phba->link_state = LPFC_HBA_ERROR; 132 return -ENOMEM; 133 } 134 135 mb = &pmb->u.mb; 136 phba->link_state = LPFC_INIT_MBX_CMDS; 137 138 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 139 if (init_key) { 140 uint32_t *ptext = (uint32_t *) licensed; 141 142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 143 *ptext = cpu_to_be32(*ptext); 144 init_key = 0; 145 } 146 147 lpfc_read_nv(phba, pmb); 148 memset((char*)mb->un.varRDnvp.rsvd3, 0, 149 sizeof (mb->un.varRDnvp.rsvd3)); 150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 151 sizeof (licensed)); 152 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 155 if (rc != MBX_SUCCESS) { 156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 157 "0324 Config Port initialization " 158 "error, mbxCmd x%x READ_NVPARM, " 159 "mbxStatus x%x\n", 160 mb->mbxCommand, mb->mbxStatus); 161 mempool_free(pmb, phba->mbox_mem_pool); 162 return -ERESTART; 163 } 164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 165 sizeof(phba->wwnn)); 166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 167 sizeof(phba->wwpn)); 168 } 169 170 phba->sli3_options = 0x0; 171 172 /* Setup and issue mailbox READ REV command */ 173 lpfc_read_rev(phba, pmb); 174 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 175 if (rc != MBX_SUCCESS) { 176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 177 "0439 Adapter failed to init, mbxCmd x%x " 178 "READ_REV, mbxStatus x%x\n", 179 mb->mbxCommand, mb->mbxStatus); 180 mempool_free( pmb, phba->mbox_mem_pool); 181 return -ERESTART; 182 } 183 184 185 /* 186 * The value of rr must be 1 since the driver set the cv field to 1. 187 * This setting requires the FW to set all revision fields. 188 */ 189 if (mb->un.varRdRev.rr == 0) { 190 vp->rev.rBit = 0; 191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 192 "0440 Adapter failed to init, READ_REV has " 193 "missing revision information.\n"); 194 mempool_free(pmb, phba->mbox_mem_pool); 195 return -ERESTART; 196 } 197 198 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 199 mempool_free(pmb, phba->mbox_mem_pool); 200 return -EINVAL; 201 } 202 203 /* Save information as VPD data */ 204 vp->rev.rBit = 1; 205 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 206 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 207 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 208 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 209 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 210 vp->rev.biuRev = mb->un.varRdRev.biuRev; 211 vp->rev.smRev = mb->un.varRdRev.smRev; 212 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 213 vp->rev.endecRev = mb->un.varRdRev.endecRev; 214 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 215 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 216 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 217 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 218 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 219 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 220 221 /* If the sli feature level is less then 9, we must 222 * tear down all RPIs and VPIs on link down if NPIV 223 * is enabled. 224 */ 225 if (vp->rev.feaLevelHigh < 9) 226 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 227 228 if (lpfc_is_LC_HBA(phba->pcidev->device)) 229 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 230 sizeof (phba->RandomData)); 231 232 /* Get adapter VPD information */ 233 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 234 if (!lpfc_vpd_data) 235 goto out_free_mbox; 236 do { 237 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 238 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 239 240 if (rc != MBX_SUCCESS) { 241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 242 "0441 VPD not present on adapter, " 243 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 244 mb->mbxCommand, mb->mbxStatus); 245 mb->un.varDmp.word_cnt = 0; 246 } 247 /* dump mem may return a zero when finished or we got a 248 * mailbox error, either way we are done. 249 */ 250 if (mb->un.varDmp.word_cnt == 0) 251 break; 252 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 253 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 254 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 255 lpfc_vpd_data + offset, 256 mb->un.varDmp.word_cnt); 257 offset += mb->un.varDmp.word_cnt; 258 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 259 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 260 261 kfree(lpfc_vpd_data); 262 out_free_mbox: 263 mempool_free(pmb, phba->mbox_mem_pool); 264 return 0; 265 } 266 267 /** 268 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for driver's configuring asynchronous event 273 * mailbox command to the device. If the mailbox command returns successfully, 274 * it will set internal async event support flag to 1; otherwise, it will 275 * set internal async event support flag to 0. 276 **/ 277 static void 278 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 279 { 280 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 281 phba->temp_sensor_support = 1; 282 else 283 phba->temp_sensor_support = 0; 284 mempool_free(pmboxq, phba->mbox_mem_pool); 285 return; 286 } 287 288 /** 289 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 290 * @phba: pointer to lpfc hba data structure. 291 * @pmboxq: pointer to the driver internal queue element for mailbox command. 292 * 293 * This is the completion handler for dump mailbox command for getting 294 * wake up parameters. When this command complete, the response contain 295 * Option rom version of the HBA. This function translate the version number 296 * into a human readable string and store it in OptionROMVersion. 297 **/ 298 static void 299 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 300 { 301 struct prog_id *prg; 302 uint32_t prog_id_word; 303 char dist = ' '; 304 /* character array used for decoding dist type. */ 305 char dist_char[] = "nabx"; 306 307 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 308 mempool_free(pmboxq, phba->mbox_mem_pool); 309 return; 310 } 311 312 prg = (struct prog_id *) &prog_id_word; 313 314 /* word 7 contain option rom version */ 315 prog_id_word = pmboxq->u.mb.un.varWords[7]; 316 317 /* Decode the Option rom version word to a readable string */ 318 if (prg->dist < 4) 319 dist = dist_char[prg->dist]; 320 321 if ((prg->dist == 3) && (prg->num == 0)) 322 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 323 prg->ver, prg->rev, prg->lev); 324 else 325 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 326 prg->ver, prg->rev, prg->lev, 327 dist, prg->num); 328 mempool_free(pmboxq, phba->mbox_mem_pool); 329 return; 330 } 331 332 /** 333 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 334 * cfg_soft_wwnn, cfg_soft_wwpn 335 * @vport: pointer to lpfc vport data structure. 336 * 337 * 338 * Return codes 339 * None. 340 **/ 341 void 342 lpfc_update_vport_wwn(struct lpfc_vport *vport) 343 { 344 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 345 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 346 347 /* If the soft name exists then update it using the service params */ 348 if (vport->phba->cfg_soft_wwnn) 349 u64_to_wwn(vport->phba->cfg_soft_wwnn, 350 vport->fc_sparam.nodeName.u.wwn); 351 if (vport->phba->cfg_soft_wwpn) 352 u64_to_wwn(vport->phba->cfg_soft_wwpn, 353 vport->fc_sparam.portName.u.wwn); 354 355 /* 356 * If the name is empty or there exists a soft name 357 * then copy the service params name, otherwise use the fc name 358 */ 359 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 361 sizeof(struct lpfc_name)); 362 else 363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 364 sizeof(struct lpfc_name)); 365 366 /* 367 * If the port name has changed, then set the Param changes flag 368 * to unreg the login 369 */ 370 if (vport->fc_portname.u.wwn[0] != 0 && 371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 372 sizeof(struct lpfc_name))) 373 vport->vport_flag |= FAWWPN_PARAM_CHG; 374 375 if (vport->fc_portname.u.wwn[0] == 0 || 376 vport->phba->cfg_soft_wwpn || 377 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 378 vport->vport_flag & FAWWPN_SET) { 379 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 380 sizeof(struct lpfc_name)); 381 vport->vport_flag &= ~FAWWPN_SET; 382 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 383 vport->vport_flag |= FAWWPN_SET; 384 } 385 else 386 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 387 sizeof(struct lpfc_name)); 388 } 389 390 /** 391 * lpfc_config_port_post - Perform lpfc initialization after config port 392 * @phba: pointer to lpfc hba data structure. 393 * 394 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 395 * command call. It performs all internal resource and state setups on the 396 * port: post IOCB buffers, enable appropriate host interrupt attentions, 397 * ELS ring timers, etc. 398 * 399 * Return codes 400 * 0 - success. 401 * Any other value - error. 402 **/ 403 int 404 lpfc_config_port_post(struct lpfc_hba *phba) 405 { 406 struct lpfc_vport *vport = phba->pport; 407 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 408 LPFC_MBOXQ_t *pmb; 409 MAILBOX_t *mb; 410 struct lpfc_dmabuf *mp; 411 struct lpfc_sli *psli = &phba->sli; 412 uint32_t status, timeout; 413 int i, j; 414 int rc; 415 416 spin_lock_irq(&phba->hbalock); 417 /* 418 * If the Config port completed correctly the HBA is not 419 * over heated any more. 420 */ 421 if (phba->over_temp_state == HBA_OVER_TEMP) 422 phba->over_temp_state = HBA_NORMAL_TEMP; 423 spin_unlock_irq(&phba->hbalock); 424 425 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 426 if (!pmb) { 427 phba->link_state = LPFC_HBA_ERROR; 428 return -ENOMEM; 429 } 430 mb = &pmb->u.mb; 431 432 /* Get login parameters for NID. */ 433 rc = lpfc_read_sparam(phba, pmb, 0); 434 if (rc) { 435 mempool_free(pmb, phba->mbox_mem_pool); 436 return -ENOMEM; 437 } 438 439 pmb->vport = vport; 440 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 442 "0448 Adapter failed init, mbxCmd x%x " 443 "READ_SPARM mbxStatus x%x\n", 444 mb->mbxCommand, mb->mbxStatus); 445 phba->link_state = LPFC_HBA_ERROR; 446 mp = (struct lpfc_dmabuf *) pmb->context1; 447 mempool_free(pmb, phba->mbox_mem_pool); 448 lpfc_mbuf_free(phba, mp->virt, mp->phys); 449 kfree(mp); 450 return -EIO; 451 } 452 453 mp = (struct lpfc_dmabuf *) pmb->context1; 454 455 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 456 lpfc_mbuf_free(phba, mp->virt, mp->phys); 457 kfree(mp); 458 pmb->context1 = NULL; 459 lpfc_update_vport_wwn(vport); 460 461 /* Update the fc_host data structures with new wwn. */ 462 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 463 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 464 fc_host_max_npiv_vports(shost) = phba->max_vpi; 465 466 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 467 /* This should be consolidated into parse_vpd ? - mr */ 468 if (phba->SerialNumber[0] == 0) { 469 uint8_t *outptr; 470 471 outptr = &vport->fc_nodename.u.s.IEEE[0]; 472 for (i = 0; i < 12; i++) { 473 status = *outptr++; 474 j = ((status & 0xf0) >> 4); 475 if (j <= 9) 476 phba->SerialNumber[i] = 477 (char)((uint8_t) 0x30 + (uint8_t) j); 478 else 479 phba->SerialNumber[i] = 480 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 481 i++; 482 j = (status & 0xf); 483 if (j <= 9) 484 phba->SerialNumber[i] = 485 (char)((uint8_t) 0x30 + (uint8_t) j); 486 else 487 phba->SerialNumber[i] = 488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 489 } 490 } 491 492 lpfc_read_config(phba, pmb); 493 pmb->vport = vport; 494 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 496 "0453 Adapter failed to init, mbxCmd x%x " 497 "READ_CONFIG, mbxStatus x%x\n", 498 mb->mbxCommand, mb->mbxStatus); 499 phba->link_state = LPFC_HBA_ERROR; 500 mempool_free( pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 504 /* Check if the port is disabled */ 505 lpfc_sli_read_link_ste(phba); 506 507 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 508 i = (mb->un.varRdConfig.max_xri + 1); 509 if (phba->cfg_hba_queue_depth > i) { 510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 511 "3359 HBA queue depth changed from %d to %d\n", 512 phba->cfg_hba_queue_depth, i); 513 phba->cfg_hba_queue_depth = i; 514 } 515 516 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 517 i = (mb->un.varRdConfig.max_xri >> 3); 518 if (phba->pport->cfg_lun_queue_depth > i) { 519 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 520 "3360 LUN queue depth changed from %d to %d\n", 521 phba->pport->cfg_lun_queue_depth, i); 522 phba->pport->cfg_lun_queue_depth = i; 523 } 524 525 phba->lmt = mb->un.varRdConfig.lmt; 526 527 /* Get the default values for Model Name and Description */ 528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 529 530 phba->link_state = LPFC_LINK_DOWN; 531 532 /* Only process IOCBs on ELS ring till hba_state is READY */ 533 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 536 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 537 538 /* Post receive buffers for desired rings */ 539 if (phba->sli_rev != 3) 540 lpfc_post_rcv_buf(phba); 541 542 /* 543 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 544 */ 545 if (phba->intr_type == MSIX) { 546 rc = lpfc_config_msi(phba, pmb); 547 if (rc) { 548 mempool_free(pmb, phba->mbox_mem_pool); 549 return -EIO; 550 } 551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 552 if (rc != MBX_SUCCESS) { 553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 554 "0352 Config MSI mailbox command " 555 "failed, mbxCmd x%x, mbxStatus x%x\n", 556 pmb->u.mb.mbxCommand, 557 pmb->u.mb.mbxStatus); 558 mempool_free(pmb, phba->mbox_mem_pool); 559 return -EIO; 560 } 561 } 562 563 spin_lock_irq(&phba->hbalock); 564 /* Initialize ERATT handling flag */ 565 phba->hba_flag &= ~HBA_ERATT_HANDLED; 566 567 /* Enable appropriate host interrupts */ 568 if (lpfc_readl(phba->HCregaddr, &status)) { 569 spin_unlock_irq(&phba->hbalock); 570 return -EIO; 571 } 572 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 573 if (psli->num_rings > 0) 574 status |= HC_R0INT_ENA; 575 if (psli->num_rings > 1) 576 status |= HC_R1INT_ENA; 577 if (psli->num_rings > 2) 578 status |= HC_R2INT_ENA; 579 if (psli->num_rings > 3) 580 status |= HC_R3INT_ENA; 581 582 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 583 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 584 status &= ~(HC_R0INT_ENA); 585 586 writel(status, phba->HCregaddr); 587 readl(phba->HCregaddr); /* flush */ 588 spin_unlock_irq(&phba->hbalock); 589 590 /* Set up ring-0 (ELS) timer */ 591 timeout = phba->fc_ratov * 2; 592 mod_timer(&vport->els_tmofunc, 593 jiffies + msecs_to_jiffies(1000 * timeout)); 594 /* Set up heart beat (HB) timer */ 595 mod_timer(&phba->hb_tmofunc, 596 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 597 phba->hb_outstanding = 0; 598 phba->last_completion_time = jiffies; 599 /* Set up error attention (ERATT) polling timer */ 600 mod_timer(&phba->eratt_poll, 601 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 602 603 if (phba->hba_flag & LINK_DISABLED) { 604 lpfc_printf_log(phba, 605 KERN_ERR, LOG_INIT, 606 "2598 Adapter Link is disabled.\n"); 607 lpfc_down_link(phba, pmb); 608 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 609 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 610 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 611 lpfc_printf_log(phba, 612 KERN_ERR, LOG_INIT, 613 "2599 Adapter failed to issue DOWN_LINK" 614 " mbox command rc 0x%x\n", rc); 615 616 mempool_free(pmb, phba->mbox_mem_pool); 617 return -EIO; 618 } 619 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 620 mempool_free(pmb, phba->mbox_mem_pool); 621 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 622 if (rc) 623 return rc; 624 } 625 /* MBOX buffer will be freed in mbox compl */ 626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 627 if (!pmb) { 628 phba->link_state = LPFC_HBA_ERROR; 629 return -ENOMEM; 630 } 631 632 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 633 pmb->mbox_cmpl = lpfc_config_async_cmpl; 634 pmb->vport = phba->pport; 635 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 636 637 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 638 lpfc_printf_log(phba, 639 KERN_ERR, 640 LOG_INIT, 641 "0456 Adapter failed to issue " 642 "ASYNCEVT_ENABLE mbox status x%x\n", 643 rc); 644 mempool_free(pmb, phba->mbox_mem_pool); 645 } 646 647 /* Get Option rom version */ 648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 649 if (!pmb) { 650 phba->link_state = LPFC_HBA_ERROR; 651 return -ENOMEM; 652 } 653 654 lpfc_dump_wakeup_param(phba, pmb); 655 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 656 pmb->vport = phba->pport; 657 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 658 659 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 661 "to get Option ROM version status x%x\n", rc); 662 mempool_free(pmb, phba->mbox_mem_pool); 663 } 664 665 return 0; 666 } 667 668 /** 669 * lpfc_hba_init_link - Initialize the FC link 670 * @phba: pointer to lpfc hba data structure. 671 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 672 * 673 * This routine will issue the INIT_LINK mailbox command call. 674 * It is available to other drivers through the lpfc_hba data 675 * structure for use as a delayed link up mechanism with the 676 * module parameter lpfc_suppress_link_up. 677 * 678 * Return code 679 * 0 - success 680 * Any other value - error 681 **/ 682 static int 683 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 684 { 685 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 686 } 687 688 /** 689 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 690 * @phba: pointer to lpfc hba data structure. 691 * @fc_topology: desired fc topology. 692 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 693 * 694 * This routine will issue the INIT_LINK mailbox command call. 695 * It is available to other drivers through the lpfc_hba data 696 * structure for use as a delayed link up mechanism with the 697 * module parameter lpfc_suppress_link_up. 698 * 699 * Return code 700 * 0 - success 701 * Any other value - error 702 **/ 703 int 704 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 705 uint32_t flag) 706 { 707 struct lpfc_vport *vport = phba->pport; 708 LPFC_MBOXQ_t *pmb; 709 MAILBOX_t *mb; 710 int rc; 711 712 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 713 if (!pmb) { 714 phba->link_state = LPFC_HBA_ERROR; 715 return -ENOMEM; 716 } 717 mb = &pmb->u.mb; 718 pmb->vport = vport; 719 720 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 722 !(phba->lmt & LMT_1Gb)) || 723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 724 !(phba->lmt & LMT_2Gb)) || 725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 726 !(phba->lmt & LMT_4Gb)) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 728 !(phba->lmt & LMT_8Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 730 !(phba->lmt & LMT_10Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 732 !(phba->lmt & LMT_16Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 734 !(phba->lmt & LMT_32Gb)) || 735 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 736 !(phba->lmt & LMT_64Gb))) { 737 /* Reset link speed to auto */ 738 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 739 "1302 Invalid speed for this board:%d " 740 "Reset link speed to auto.\n", 741 phba->cfg_link_speed); 742 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 743 } 744 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 745 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 746 if (phba->sli_rev < LPFC_SLI_REV4) 747 lpfc_set_loopback_flag(phba); 748 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 749 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 751 "0498 Adapter failed to init, mbxCmd x%x " 752 "INIT_LINK, mbxStatus x%x\n", 753 mb->mbxCommand, mb->mbxStatus); 754 if (phba->sli_rev <= LPFC_SLI_REV3) { 755 /* Clear all interrupt enable conditions */ 756 writel(0, phba->HCregaddr); 757 readl(phba->HCregaddr); /* flush */ 758 /* Clear all pending interrupts */ 759 writel(0xffffffff, phba->HAregaddr); 760 readl(phba->HAregaddr); /* flush */ 761 } 762 phba->link_state = LPFC_HBA_ERROR; 763 if (rc != MBX_BUSY || flag == MBX_POLL) 764 mempool_free(pmb, phba->mbox_mem_pool); 765 return -EIO; 766 } 767 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 768 if (flag == MBX_POLL) 769 mempool_free(pmb, phba->mbox_mem_pool); 770 771 return 0; 772 } 773 774 /** 775 * lpfc_hba_down_link - this routine downs the FC link 776 * @phba: pointer to lpfc hba data structure. 777 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 778 * 779 * This routine will issue the DOWN_LINK mailbox command call. 780 * It is available to other drivers through the lpfc_hba data 781 * structure for use to stop the link. 782 * 783 * Return code 784 * 0 - success 785 * Any other value - error 786 **/ 787 static int 788 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 789 { 790 LPFC_MBOXQ_t *pmb; 791 int rc; 792 793 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 794 if (!pmb) { 795 phba->link_state = LPFC_HBA_ERROR; 796 return -ENOMEM; 797 } 798 799 lpfc_printf_log(phba, 800 KERN_ERR, LOG_INIT, 801 "0491 Adapter Link is disabled.\n"); 802 lpfc_down_link(phba, pmb); 803 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 804 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 805 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 806 lpfc_printf_log(phba, 807 KERN_ERR, LOG_INIT, 808 "2522 Adapter failed to issue DOWN_LINK" 809 " mbox command rc 0x%x\n", rc); 810 811 mempool_free(pmb, phba->mbox_mem_pool); 812 return -EIO; 813 } 814 if (flag == MBX_POLL) 815 mempool_free(pmb, phba->mbox_mem_pool); 816 817 return 0; 818 } 819 820 /** 821 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 822 * @phba: pointer to lpfc HBA data structure. 823 * 824 * This routine will do LPFC uninitialization before the HBA is reset when 825 * bringing down the SLI Layer. 826 * 827 * Return codes 828 * 0 - success. 829 * Any other value - error. 830 **/ 831 int 832 lpfc_hba_down_prep(struct lpfc_hba *phba) 833 { 834 struct lpfc_vport **vports; 835 int i; 836 837 if (phba->sli_rev <= LPFC_SLI_REV3) { 838 /* Disable interrupts */ 839 writel(0, phba->HCregaddr); 840 readl(phba->HCregaddr); /* flush */ 841 } 842 843 if (phba->pport->load_flag & FC_UNLOADING) 844 lpfc_cleanup_discovery_resources(phba->pport); 845 else { 846 vports = lpfc_create_vport_work_array(phba); 847 if (vports != NULL) 848 for (i = 0; i <= phba->max_vports && 849 vports[i] != NULL; i++) 850 lpfc_cleanup_discovery_resources(vports[i]); 851 lpfc_destroy_vport_work_array(phba, vports); 852 } 853 return 0; 854 } 855 856 /** 857 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 858 * rspiocb which got deferred 859 * 860 * @phba: pointer to lpfc HBA data structure. 861 * 862 * This routine will cleanup completed slow path events after HBA is reset 863 * when bringing down the SLI Layer. 864 * 865 * 866 * Return codes 867 * void. 868 **/ 869 static void 870 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 871 { 872 struct lpfc_iocbq *rspiocbq; 873 struct hbq_dmabuf *dmabuf; 874 struct lpfc_cq_event *cq_event; 875 876 spin_lock_irq(&phba->hbalock); 877 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 878 spin_unlock_irq(&phba->hbalock); 879 880 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 881 /* Get the response iocb from the head of work queue */ 882 spin_lock_irq(&phba->hbalock); 883 list_remove_head(&phba->sli4_hba.sp_queue_event, 884 cq_event, struct lpfc_cq_event, list); 885 spin_unlock_irq(&phba->hbalock); 886 887 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 888 case CQE_CODE_COMPL_WQE: 889 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 890 cq_event); 891 lpfc_sli_release_iocbq(phba, rspiocbq); 892 break; 893 case CQE_CODE_RECEIVE: 894 case CQE_CODE_RECEIVE_V1: 895 dmabuf = container_of(cq_event, struct hbq_dmabuf, 896 cq_event); 897 lpfc_in_buf_free(phba, &dmabuf->dbuf); 898 } 899 } 900 } 901 902 /** 903 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 904 * @phba: pointer to lpfc HBA data structure. 905 * 906 * This routine will cleanup posted ELS buffers after the HBA is reset 907 * when bringing down the SLI Layer. 908 * 909 * 910 * Return codes 911 * void. 912 **/ 913 static void 914 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 915 { 916 struct lpfc_sli *psli = &phba->sli; 917 struct lpfc_sli_ring *pring; 918 struct lpfc_dmabuf *mp, *next_mp; 919 LIST_HEAD(buflist); 920 int count; 921 922 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 923 lpfc_sli_hbqbuf_free_all(phba); 924 else { 925 /* Cleanup preposted buffers on the ELS ring */ 926 pring = &psli->sli3_ring[LPFC_ELS_RING]; 927 spin_lock_irq(&phba->hbalock); 928 list_splice_init(&pring->postbufq, &buflist); 929 spin_unlock_irq(&phba->hbalock); 930 931 count = 0; 932 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 933 list_del(&mp->list); 934 count++; 935 lpfc_mbuf_free(phba, mp->virt, mp->phys); 936 kfree(mp); 937 } 938 939 spin_lock_irq(&phba->hbalock); 940 pring->postbufq_cnt -= count; 941 spin_unlock_irq(&phba->hbalock); 942 } 943 } 944 945 /** 946 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 947 * @phba: pointer to lpfc HBA data structure. 948 * 949 * This routine will cleanup the txcmplq after the HBA is reset when bringing 950 * down the SLI Layer. 951 * 952 * Return codes 953 * void 954 **/ 955 static void 956 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 957 { 958 struct lpfc_sli *psli = &phba->sli; 959 struct lpfc_queue *qp = NULL; 960 struct lpfc_sli_ring *pring; 961 LIST_HEAD(completions); 962 int i; 963 struct lpfc_iocbq *piocb, *next_iocb; 964 965 if (phba->sli_rev != LPFC_SLI_REV4) { 966 for (i = 0; i < psli->num_rings; i++) { 967 pring = &psli->sli3_ring[i]; 968 spin_lock_irq(&phba->hbalock); 969 /* At this point in time the HBA is either reset or DOA 970 * Nothing should be on txcmplq as it will 971 * NEVER complete. 972 */ 973 list_splice_init(&pring->txcmplq, &completions); 974 pring->txcmplq_cnt = 0; 975 spin_unlock_irq(&phba->hbalock); 976 977 lpfc_sli_abort_iocb_ring(phba, pring); 978 } 979 /* Cancel all the IOCBs from the completions list */ 980 lpfc_sli_cancel_iocbs(phba, &completions, 981 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 982 return; 983 } 984 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 985 pring = qp->pring; 986 if (!pring) 987 continue; 988 spin_lock_irq(&pring->ring_lock); 989 list_for_each_entry_safe(piocb, next_iocb, 990 &pring->txcmplq, list) 991 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 992 list_splice_init(&pring->txcmplq, &completions); 993 pring->txcmplq_cnt = 0; 994 spin_unlock_irq(&pring->ring_lock); 995 lpfc_sli_abort_iocb_ring(phba, pring); 996 } 997 /* Cancel all the IOCBs from the completions list */ 998 lpfc_sli_cancel_iocbs(phba, &completions, 999 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1000 } 1001 1002 /** 1003 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1004 int i; 1005 * @phba: pointer to lpfc HBA data structure. 1006 * 1007 * This routine will do uninitialization after the HBA is reset when bring 1008 * down the SLI Layer. 1009 * 1010 * Return codes 1011 * 0 - success. 1012 * Any other value - error. 1013 **/ 1014 static int 1015 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1016 { 1017 lpfc_hba_free_post_buf(phba); 1018 lpfc_hba_clean_txcmplq(phba); 1019 return 0; 1020 } 1021 1022 /** 1023 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1024 * @phba: pointer to lpfc HBA data structure. 1025 * 1026 * This routine will do uninitialization after the HBA is reset when bring 1027 * down the SLI Layer. 1028 * 1029 * Return codes 1030 * 0 - success. 1031 * Any other value - error. 1032 **/ 1033 static int 1034 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1035 { 1036 struct lpfc_scsi_buf *psb, *psb_next; 1037 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1038 LIST_HEAD(aborts); 1039 LIST_HEAD(nvme_aborts); 1040 LIST_HEAD(nvmet_aborts); 1041 unsigned long iflag = 0; 1042 struct lpfc_sglq *sglq_entry = NULL; 1043 int cnt; 1044 1045 1046 lpfc_sli_hbqbuf_free_all(phba); 1047 lpfc_hba_clean_txcmplq(phba); 1048 1049 /* At this point in time the HBA is either reset or DOA. Either 1050 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1051 * on the lpfc_els_sgl_list so that it can either be freed if the 1052 * driver is unloading or reposted if the driver is restarting 1053 * the port. 1054 */ 1055 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1056 /* scsl_buf_list */ 1057 /* sgl_list_lock required because worker thread uses this 1058 * list. 1059 */ 1060 spin_lock(&phba->sli4_hba.sgl_list_lock); 1061 list_for_each_entry(sglq_entry, 1062 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1063 sglq_entry->state = SGL_FREED; 1064 1065 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1066 &phba->sli4_hba.lpfc_els_sgl_list); 1067 1068 1069 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1070 /* abts_scsi_buf_list_lock required because worker thread uses this 1071 * list. 1072 */ 1073 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 1074 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1075 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1076 &aborts); 1077 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1078 } 1079 1080 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1081 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1082 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, 1083 &nvme_aborts); 1084 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1085 &nvmet_aborts); 1086 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1087 } 1088 1089 spin_unlock_irq(&phba->hbalock); 1090 1091 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1092 psb->pCmd = NULL; 1093 psb->status = IOSTAT_SUCCESS; 1094 } 1095 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1096 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1097 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1098 1099 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1100 cnt = 0; 1101 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { 1102 psb->pCmd = NULL; 1103 psb->status = IOSTAT_SUCCESS; 1104 cnt++; 1105 } 1106 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); 1107 phba->put_nvme_bufs += cnt; 1108 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); 1109 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); 1110 1111 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1112 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1113 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1114 } 1115 } 1116 1117 lpfc_sli4_free_sp_events(phba); 1118 return 0; 1119 } 1120 1121 /** 1122 * lpfc_hba_down_post - Wrapper func for hba down post routine 1123 * @phba: pointer to lpfc HBA data structure. 1124 * 1125 * This routine wraps the actual SLI3 or SLI4 routine for performing 1126 * uninitialization after the HBA is reset when bring down the SLI Layer. 1127 * 1128 * Return codes 1129 * 0 - success. 1130 * Any other value - error. 1131 **/ 1132 int 1133 lpfc_hba_down_post(struct lpfc_hba *phba) 1134 { 1135 return (*phba->lpfc_hba_down_post)(phba); 1136 } 1137 1138 /** 1139 * lpfc_hb_timeout - The HBA-timer timeout handler 1140 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1141 * 1142 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1143 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1144 * work-port-events bitmap and the worker thread is notified. This timeout 1145 * event will be used by the worker thread to invoke the actual timeout 1146 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1147 * be performed in the timeout handler and the HBA timeout event bit shall 1148 * be cleared by the worker thread after it has taken the event bitmap out. 1149 **/ 1150 static void 1151 lpfc_hb_timeout(struct timer_list *t) 1152 { 1153 struct lpfc_hba *phba; 1154 uint32_t tmo_posted; 1155 unsigned long iflag; 1156 1157 phba = from_timer(phba, t, hb_tmofunc); 1158 1159 /* Check for heart beat timeout conditions */ 1160 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1161 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1162 if (!tmo_posted) 1163 phba->pport->work_port_events |= WORKER_HB_TMO; 1164 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1165 1166 /* Tell the worker thread there is work to do */ 1167 if (!tmo_posted) 1168 lpfc_worker_wake_up(phba); 1169 return; 1170 } 1171 1172 /** 1173 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1174 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1175 * 1176 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1177 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1178 * work-port-events bitmap and the worker thread is notified. This timeout 1179 * event will be used by the worker thread to invoke the actual timeout 1180 * handler routine, lpfc_rrq_handler. Any periodical operations will 1181 * be performed in the timeout handler and the RRQ timeout event bit shall 1182 * be cleared by the worker thread after it has taken the event bitmap out. 1183 **/ 1184 static void 1185 lpfc_rrq_timeout(struct timer_list *t) 1186 { 1187 struct lpfc_hba *phba; 1188 unsigned long iflag; 1189 1190 phba = from_timer(phba, t, rrq_tmr); 1191 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1192 if (!(phba->pport->load_flag & FC_UNLOADING)) 1193 phba->hba_flag |= HBA_RRQ_ACTIVE; 1194 else 1195 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1196 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1197 1198 if (!(phba->pport->load_flag & FC_UNLOADING)) 1199 lpfc_worker_wake_up(phba); 1200 } 1201 1202 /** 1203 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1204 * @phba: pointer to lpfc hba data structure. 1205 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1206 * 1207 * This is the callback function to the lpfc heart-beat mailbox command. 1208 * If configured, the lpfc driver issues the heart-beat mailbox command to 1209 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1210 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1211 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1212 * heart-beat outstanding state. Once the mailbox command comes back and 1213 * no error conditions detected, the heart-beat mailbox command timer is 1214 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1215 * state is cleared for the next heart-beat. If the timer expired with the 1216 * heart-beat outstanding state set, the driver will put the HBA offline. 1217 **/ 1218 static void 1219 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1220 { 1221 unsigned long drvr_flag; 1222 1223 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1224 phba->hb_outstanding = 0; 1225 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1226 1227 /* Check and reset heart-beat timer is necessary */ 1228 mempool_free(pmboxq, phba->mbox_mem_pool); 1229 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1230 !(phba->link_state == LPFC_HBA_ERROR) && 1231 !(phba->pport->load_flag & FC_UNLOADING)) 1232 mod_timer(&phba->hb_tmofunc, 1233 jiffies + 1234 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1235 return; 1236 } 1237 1238 /** 1239 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1240 * @phba: pointer to lpfc hba data structure. 1241 * 1242 * This is the actual HBA-timer timeout handler to be invoked by the worker 1243 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1244 * handler performs any periodic operations needed for the device. If such 1245 * periodic event has already been attended to either in the interrupt handler 1246 * or by processing slow-ring or fast-ring events within the HBA-timer 1247 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1248 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1249 * is configured and there is no heart-beat mailbox command outstanding, a 1250 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1251 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1252 * to offline. 1253 **/ 1254 void 1255 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1256 { 1257 struct lpfc_vport **vports; 1258 LPFC_MBOXQ_t *pmboxq; 1259 struct lpfc_dmabuf *buf_ptr; 1260 int retval, i; 1261 struct lpfc_sli *psli = &phba->sli; 1262 LIST_HEAD(completions); 1263 struct lpfc_queue *qp; 1264 unsigned long time_elapsed; 1265 uint32_t tick_cqe, max_cqe, val; 1266 uint64_t tot, data1, data2, data3; 1267 struct lpfc_nvmet_tgtport *tgtp; 1268 struct lpfc_register reg_data; 1269 struct nvme_fc_local_port *localport; 1270 struct lpfc_nvme_lport *lport; 1271 struct lpfc_nvme_ctrl_stat *cstat; 1272 void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; 1273 1274 vports = lpfc_create_vport_work_array(phba); 1275 if (vports != NULL) 1276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1277 lpfc_rcv_seq_check_edtov(vports[i]); 1278 lpfc_fdmi_num_disc_check(vports[i]); 1279 } 1280 lpfc_destroy_vport_work_array(phba, vports); 1281 1282 if ((phba->link_state == LPFC_HBA_ERROR) || 1283 (phba->pport->load_flag & FC_UNLOADING) || 1284 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1285 return; 1286 1287 if (phba->cfg_auto_imax) { 1288 if (!phba->last_eqdelay_time) { 1289 phba->last_eqdelay_time = jiffies; 1290 goto skip_eqdelay; 1291 } 1292 time_elapsed = jiffies - phba->last_eqdelay_time; 1293 phba->last_eqdelay_time = jiffies; 1294 1295 tot = 0xffff; 1296 /* Check outstanding IO count */ 1297 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1298 if (phba->nvmet_support) { 1299 tgtp = phba->targetport->private; 1300 /* Calculate outstanding IOs */ 1301 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); 1302 tot += atomic_read(&tgtp->xmt_fcp_release); 1303 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; 1304 } else { 1305 localport = phba->pport->localport; 1306 if (!localport || !localport->private) 1307 goto skip_eqdelay; 1308 lport = (struct lpfc_nvme_lport *) 1309 localport->private; 1310 tot = 0; 1311 for (i = 0; 1312 i < phba->cfg_nvme_io_channel; i++) { 1313 cstat = &lport->cstat[i]; 1314 data1 = atomic_read( 1315 &cstat->fc4NvmeInputRequests); 1316 data2 = atomic_read( 1317 &cstat->fc4NvmeOutputRequests); 1318 data3 = atomic_read( 1319 &cstat->fc4NvmeControlRequests); 1320 tot += (data1 + data2 + data3); 1321 tot -= atomic_read( 1322 &cstat->fc4NvmeIoCmpls); 1323 } 1324 } 1325 } 1326 1327 /* Interrupts per sec per EQ */ 1328 val = phba->cfg_fcp_imax / phba->io_channel_irqs; 1329 tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ 1330 1331 /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ 1332 max_cqe = time_elapsed * tick_cqe; 1333 1334 for (i = 0; i < phba->io_channel_irqs; i++) { 1335 /* Fast-path EQ */ 1336 qp = phba->sli4_hba.hba_eq[i]; 1337 if (!qp) 1338 continue; 1339 1340 /* Use no EQ delay if we don't have many outstanding 1341 * IOs, or if we are only processing 1 CQE/ISR or less. 1342 * Otherwise, assume we can process up to lpfc_fcp_imax 1343 * interrupts per HBA. 1344 */ 1345 if (tot < LPFC_NODELAY_MAX_IO || 1346 qp->EQ_cqe_cnt <= max_cqe) 1347 val = 0; 1348 else 1349 val = phba->cfg_fcp_imax; 1350 1351 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 1352 /* Use EQ Delay Register method */ 1353 1354 /* Convert for EQ Delay register */ 1355 if (val) { 1356 /* First, interrupts per sec per EQ */ 1357 val = phba->cfg_fcp_imax / 1358 phba->io_channel_irqs; 1359 1360 /* us delay between each interrupt */ 1361 val = LPFC_SEC_TO_USEC / val; 1362 } 1363 if (val != qp->q_mode) { 1364 reg_data.word0 = 0; 1365 bf_set(lpfc_sliport_eqdelay_id, 1366 ®_data, qp->queue_id); 1367 bf_set(lpfc_sliport_eqdelay_delay, 1368 ®_data, val); 1369 writel(reg_data.word0, eqdreg); 1370 } 1371 } else { 1372 /* Use mbox command method */ 1373 if (val != qp->q_mode) 1374 lpfc_modify_hba_eq_delay(phba, i, 1375 1, val); 1376 } 1377 1378 /* 1379 * val is cfg_fcp_imax or 0 for mbox delay or us delay 1380 * between interrupts for EQDR. 1381 */ 1382 qp->q_mode = val; 1383 qp->EQ_cqe_cnt = 0; 1384 } 1385 } 1386 1387 skip_eqdelay: 1388 spin_lock_irq(&phba->pport->work_port_lock); 1389 1390 if (time_after(phba->last_completion_time + 1391 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1392 jiffies)) { 1393 spin_unlock_irq(&phba->pport->work_port_lock); 1394 if (!phba->hb_outstanding) 1395 mod_timer(&phba->hb_tmofunc, 1396 jiffies + 1397 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1398 else 1399 mod_timer(&phba->hb_tmofunc, 1400 jiffies + 1401 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1402 return; 1403 } 1404 spin_unlock_irq(&phba->pport->work_port_lock); 1405 1406 if (phba->elsbuf_cnt && 1407 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1408 spin_lock_irq(&phba->hbalock); 1409 list_splice_init(&phba->elsbuf, &completions); 1410 phba->elsbuf_cnt = 0; 1411 phba->elsbuf_prev_cnt = 0; 1412 spin_unlock_irq(&phba->hbalock); 1413 1414 while (!list_empty(&completions)) { 1415 list_remove_head(&completions, buf_ptr, 1416 struct lpfc_dmabuf, list); 1417 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1418 kfree(buf_ptr); 1419 } 1420 } 1421 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1422 1423 /* If there is no heart beat outstanding, issue a heartbeat command */ 1424 if (phba->cfg_enable_hba_heartbeat) { 1425 if (!phba->hb_outstanding) { 1426 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1427 (list_empty(&psli->mboxq))) { 1428 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1429 GFP_KERNEL); 1430 if (!pmboxq) { 1431 mod_timer(&phba->hb_tmofunc, 1432 jiffies + 1433 msecs_to_jiffies(1000 * 1434 LPFC_HB_MBOX_INTERVAL)); 1435 return; 1436 } 1437 1438 lpfc_heart_beat(phba, pmboxq); 1439 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1440 pmboxq->vport = phba->pport; 1441 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1442 MBX_NOWAIT); 1443 1444 if (retval != MBX_BUSY && 1445 retval != MBX_SUCCESS) { 1446 mempool_free(pmboxq, 1447 phba->mbox_mem_pool); 1448 mod_timer(&phba->hb_tmofunc, 1449 jiffies + 1450 msecs_to_jiffies(1000 * 1451 LPFC_HB_MBOX_INTERVAL)); 1452 return; 1453 } 1454 phba->skipped_hb = 0; 1455 phba->hb_outstanding = 1; 1456 } else if (time_before_eq(phba->last_completion_time, 1457 phba->skipped_hb)) { 1458 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1459 "2857 Last completion time not " 1460 " updated in %d ms\n", 1461 jiffies_to_msecs(jiffies 1462 - phba->last_completion_time)); 1463 } else 1464 phba->skipped_hb = jiffies; 1465 1466 mod_timer(&phba->hb_tmofunc, 1467 jiffies + 1468 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1469 return; 1470 } else { 1471 /* 1472 * If heart beat timeout called with hb_outstanding set 1473 * we need to give the hb mailbox cmd a chance to 1474 * complete or TMO. 1475 */ 1476 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1477 "0459 Adapter heartbeat still out" 1478 "standing:last compl time was %d ms.\n", 1479 jiffies_to_msecs(jiffies 1480 - phba->last_completion_time)); 1481 mod_timer(&phba->hb_tmofunc, 1482 jiffies + 1483 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1484 } 1485 } else { 1486 mod_timer(&phba->hb_tmofunc, 1487 jiffies + 1488 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1489 } 1490 } 1491 1492 /** 1493 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1494 * @phba: pointer to lpfc hba data structure. 1495 * 1496 * This routine is called to bring the HBA offline when HBA hardware error 1497 * other than Port Error 6 has been detected. 1498 **/ 1499 static void 1500 lpfc_offline_eratt(struct lpfc_hba *phba) 1501 { 1502 struct lpfc_sli *psli = &phba->sli; 1503 1504 spin_lock_irq(&phba->hbalock); 1505 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1506 spin_unlock_irq(&phba->hbalock); 1507 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1508 1509 lpfc_offline(phba); 1510 lpfc_reset_barrier(phba); 1511 spin_lock_irq(&phba->hbalock); 1512 lpfc_sli_brdreset(phba); 1513 spin_unlock_irq(&phba->hbalock); 1514 lpfc_hba_down_post(phba); 1515 lpfc_sli_brdready(phba, HS_MBRDY); 1516 lpfc_unblock_mgmt_io(phba); 1517 phba->link_state = LPFC_HBA_ERROR; 1518 return; 1519 } 1520 1521 /** 1522 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1523 * @phba: pointer to lpfc hba data structure. 1524 * 1525 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1526 * other than Port Error 6 has been detected. 1527 **/ 1528 void 1529 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1530 { 1531 spin_lock_irq(&phba->hbalock); 1532 phba->link_state = LPFC_HBA_ERROR; 1533 spin_unlock_irq(&phba->hbalock); 1534 1535 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1536 lpfc_offline(phba); 1537 lpfc_hba_down_post(phba); 1538 lpfc_unblock_mgmt_io(phba); 1539 } 1540 1541 /** 1542 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1543 * @phba: pointer to lpfc hba data structure. 1544 * 1545 * This routine is invoked to handle the deferred HBA hardware error 1546 * conditions. This type of error is indicated by HBA by setting ER1 1547 * and another ER bit in the host status register. The driver will 1548 * wait until the ER1 bit clears before handling the error condition. 1549 **/ 1550 static void 1551 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1552 { 1553 uint32_t old_host_status = phba->work_hs; 1554 struct lpfc_sli *psli = &phba->sli; 1555 1556 /* If the pci channel is offline, ignore possible errors, 1557 * since we cannot communicate with the pci card anyway. 1558 */ 1559 if (pci_channel_offline(phba->pcidev)) { 1560 spin_lock_irq(&phba->hbalock); 1561 phba->hba_flag &= ~DEFER_ERATT; 1562 spin_unlock_irq(&phba->hbalock); 1563 return; 1564 } 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1567 "0479 Deferred Adapter Hardware Error " 1568 "Data: x%x x%x x%x\n", 1569 phba->work_hs, 1570 phba->work_status[0], phba->work_status[1]); 1571 1572 spin_lock_irq(&phba->hbalock); 1573 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1574 spin_unlock_irq(&phba->hbalock); 1575 1576 1577 /* 1578 * Firmware stops when it triggred erratt. That could cause the I/Os 1579 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1580 * SCSI layer retry it after re-establishing link. 1581 */ 1582 lpfc_sli_abort_fcp_rings(phba); 1583 1584 /* 1585 * There was a firmware error. Take the hba offline and then 1586 * attempt to restart it. 1587 */ 1588 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1589 lpfc_offline(phba); 1590 1591 /* Wait for the ER1 bit to clear.*/ 1592 while (phba->work_hs & HS_FFER1) { 1593 msleep(100); 1594 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1595 phba->work_hs = UNPLUG_ERR ; 1596 break; 1597 } 1598 /* If driver is unloading let the worker thread continue */ 1599 if (phba->pport->load_flag & FC_UNLOADING) { 1600 phba->work_hs = 0; 1601 break; 1602 } 1603 } 1604 1605 /* 1606 * This is to ptrotect against a race condition in which 1607 * first write to the host attention register clear the 1608 * host status register. 1609 */ 1610 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1611 phba->work_hs = old_host_status & ~HS_FFER1; 1612 1613 spin_lock_irq(&phba->hbalock); 1614 phba->hba_flag &= ~DEFER_ERATT; 1615 spin_unlock_irq(&phba->hbalock); 1616 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1617 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1618 } 1619 1620 static void 1621 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1622 { 1623 struct lpfc_board_event_header board_event; 1624 struct Scsi_Host *shost; 1625 1626 board_event.event_type = FC_REG_BOARD_EVENT; 1627 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1628 shost = lpfc_shost_from_vport(phba->pport); 1629 fc_host_post_vendor_event(shost, fc_get_event_number(), 1630 sizeof(board_event), 1631 (char *) &board_event, 1632 LPFC_NL_VENDOR_ID); 1633 } 1634 1635 /** 1636 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1637 * @phba: pointer to lpfc hba data structure. 1638 * 1639 * This routine is invoked to handle the following HBA hardware error 1640 * conditions: 1641 * 1 - HBA error attention interrupt 1642 * 2 - DMA ring index out of range 1643 * 3 - Mailbox command came back as unknown 1644 **/ 1645 static void 1646 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1647 { 1648 struct lpfc_vport *vport = phba->pport; 1649 struct lpfc_sli *psli = &phba->sli; 1650 uint32_t event_data; 1651 unsigned long temperature; 1652 struct temp_event temp_event_data; 1653 struct Scsi_Host *shost; 1654 1655 /* If the pci channel is offline, ignore possible errors, 1656 * since we cannot communicate with the pci card anyway. 1657 */ 1658 if (pci_channel_offline(phba->pcidev)) { 1659 spin_lock_irq(&phba->hbalock); 1660 phba->hba_flag &= ~DEFER_ERATT; 1661 spin_unlock_irq(&phba->hbalock); 1662 return; 1663 } 1664 1665 /* If resets are disabled then leave the HBA alone and return */ 1666 if (!phba->cfg_enable_hba_reset) 1667 return; 1668 1669 /* Send an internal error event to mgmt application */ 1670 lpfc_board_errevt_to_mgmt(phba); 1671 1672 if (phba->hba_flag & DEFER_ERATT) 1673 lpfc_handle_deferred_eratt(phba); 1674 1675 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1676 if (phba->work_hs & HS_FFER6) 1677 /* Re-establishing Link */ 1678 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1679 "1301 Re-establishing Link " 1680 "Data: x%x x%x x%x\n", 1681 phba->work_hs, phba->work_status[0], 1682 phba->work_status[1]); 1683 if (phba->work_hs & HS_FFER8) 1684 /* Device Zeroization */ 1685 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1686 "2861 Host Authentication device " 1687 "zeroization Data:x%x x%x x%x\n", 1688 phba->work_hs, phba->work_status[0], 1689 phba->work_status[1]); 1690 1691 spin_lock_irq(&phba->hbalock); 1692 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1693 spin_unlock_irq(&phba->hbalock); 1694 1695 /* 1696 * Firmware stops when it triggled erratt with HS_FFER6. 1697 * That could cause the I/Os dropped by the firmware. 1698 * Error iocb (I/O) on txcmplq and let the SCSI layer 1699 * retry it after re-establishing link. 1700 */ 1701 lpfc_sli_abort_fcp_rings(phba); 1702 1703 /* 1704 * There was a firmware error. Take the hba offline and then 1705 * attempt to restart it. 1706 */ 1707 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1708 lpfc_offline(phba); 1709 lpfc_sli_brdrestart(phba); 1710 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1711 lpfc_unblock_mgmt_io(phba); 1712 return; 1713 } 1714 lpfc_unblock_mgmt_io(phba); 1715 } else if (phba->work_hs & HS_CRIT_TEMP) { 1716 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1717 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1718 temp_event_data.event_code = LPFC_CRIT_TEMP; 1719 temp_event_data.data = (uint32_t)temperature; 1720 1721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1722 "0406 Adapter maximum temperature exceeded " 1723 "(%ld), taking this port offline " 1724 "Data: x%x x%x x%x\n", 1725 temperature, phba->work_hs, 1726 phba->work_status[0], phba->work_status[1]); 1727 1728 shost = lpfc_shost_from_vport(phba->pport); 1729 fc_host_post_vendor_event(shost, fc_get_event_number(), 1730 sizeof(temp_event_data), 1731 (char *) &temp_event_data, 1732 SCSI_NL_VID_TYPE_PCI 1733 | PCI_VENDOR_ID_EMULEX); 1734 1735 spin_lock_irq(&phba->hbalock); 1736 phba->over_temp_state = HBA_OVER_TEMP; 1737 spin_unlock_irq(&phba->hbalock); 1738 lpfc_offline_eratt(phba); 1739 1740 } else { 1741 /* The if clause above forces this code path when the status 1742 * failure is a value other than FFER6. Do not call the offline 1743 * twice. This is the adapter hardware error path. 1744 */ 1745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1746 "0457 Adapter Hardware Error " 1747 "Data: x%x x%x x%x\n", 1748 phba->work_hs, 1749 phba->work_status[0], phba->work_status[1]); 1750 1751 event_data = FC_REG_DUMP_EVENT; 1752 shost = lpfc_shost_from_vport(vport); 1753 fc_host_post_vendor_event(shost, fc_get_event_number(), 1754 sizeof(event_data), (char *) &event_data, 1755 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1756 1757 lpfc_offline_eratt(phba); 1758 } 1759 return; 1760 } 1761 1762 /** 1763 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1764 * @phba: pointer to lpfc hba data structure. 1765 * @mbx_action: flag for mailbox shutdown action. 1766 * 1767 * This routine is invoked to perform an SLI4 port PCI function reset in 1768 * response to port status register polling attention. It waits for port 1769 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1770 * During this process, interrupt vectors are freed and later requested 1771 * for handling possible port resource change. 1772 **/ 1773 static int 1774 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1775 bool en_rn_msg) 1776 { 1777 int rc; 1778 uint32_t intr_mode; 1779 1780 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1781 LPFC_SLI_INTF_IF_TYPE_2) { 1782 /* 1783 * On error status condition, driver need to wait for port 1784 * ready before performing reset. 1785 */ 1786 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1787 if (rc) 1788 return rc; 1789 } 1790 1791 /* need reset: attempt for port recovery */ 1792 if (en_rn_msg) 1793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1794 "2887 Reset Needed: Attempting Port " 1795 "Recovery...\n"); 1796 lpfc_offline_prep(phba, mbx_action); 1797 lpfc_offline(phba); 1798 /* release interrupt for possible resource change */ 1799 lpfc_sli4_disable_intr(phba); 1800 lpfc_sli_brdrestart(phba); 1801 /* request and enable interrupt */ 1802 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1803 if (intr_mode == LPFC_INTR_ERROR) { 1804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1805 "3175 Failed to enable interrupt\n"); 1806 return -EIO; 1807 } 1808 phba->intr_mode = intr_mode; 1809 rc = lpfc_online(phba); 1810 if (rc == 0) 1811 lpfc_unblock_mgmt_io(phba); 1812 1813 return rc; 1814 } 1815 1816 /** 1817 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1818 * @phba: pointer to lpfc hba data structure. 1819 * 1820 * This routine is invoked to handle the SLI4 HBA hardware error attention 1821 * conditions. 1822 **/ 1823 static void 1824 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1825 { 1826 struct lpfc_vport *vport = phba->pport; 1827 uint32_t event_data; 1828 struct Scsi_Host *shost; 1829 uint32_t if_type; 1830 struct lpfc_register portstat_reg = {0}; 1831 uint32_t reg_err1, reg_err2; 1832 uint32_t uerrlo_reg, uemasklo_reg; 1833 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1834 bool en_rn_msg = true; 1835 struct temp_event temp_event_data; 1836 struct lpfc_register portsmphr_reg; 1837 int rc, i; 1838 1839 /* If the pci channel is offline, ignore possible errors, since 1840 * we cannot communicate with the pci card anyway. 1841 */ 1842 if (pci_channel_offline(phba->pcidev)) 1843 return; 1844 1845 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1846 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1847 switch (if_type) { 1848 case LPFC_SLI_INTF_IF_TYPE_0: 1849 pci_rd_rc1 = lpfc_readl( 1850 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1851 &uerrlo_reg); 1852 pci_rd_rc2 = lpfc_readl( 1853 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1854 &uemasklo_reg); 1855 /* consider PCI bus read error as pci_channel_offline */ 1856 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1857 return; 1858 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1859 lpfc_sli4_offline_eratt(phba); 1860 return; 1861 } 1862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1863 "7623 Checking UE recoverable"); 1864 1865 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1866 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1867 &portsmphr_reg.word0)) 1868 continue; 1869 1870 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1871 &portsmphr_reg); 1872 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1873 LPFC_PORT_SEM_UE_RECOVERABLE) 1874 break; 1875 /*Sleep for 1Sec, before checking SEMAPHORE */ 1876 msleep(1000); 1877 } 1878 1879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1880 "4827 smphr_port_status x%x : Waited %dSec", 1881 smphr_port_status, i); 1882 1883 /* Recoverable UE, reset the HBA device */ 1884 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1885 LPFC_PORT_SEM_UE_RECOVERABLE) { 1886 for (i = 0; i < 20; i++) { 1887 msleep(1000); 1888 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1889 &portsmphr_reg.word0) && 1890 (LPFC_POST_STAGE_PORT_READY == 1891 bf_get(lpfc_port_smphr_port_status, 1892 &portsmphr_reg))) { 1893 rc = lpfc_sli4_port_sta_fn_reset(phba, 1894 LPFC_MBX_NO_WAIT, en_rn_msg); 1895 if (rc == 0) 1896 return; 1897 lpfc_printf_log(phba, 1898 KERN_ERR, LOG_INIT, 1899 "4215 Failed to recover UE"); 1900 break; 1901 } 1902 } 1903 } 1904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1905 "7624 Firmware not ready: Failing UE recovery," 1906 " waited %dSec", i); 1907 lpfc_sli4_offline_eratt(phba); 1908 break; 1909 1910 case LPFC_SLI_INTF_IF_TYPE_2: 1911 case LPFC_SLI_INTF_IF_TYPE_6: 1912 pci_rd_rc1 = lpfc_readl( 1913 phba->sli4_hba.u.if_type2.STATUSregaddr, 1914 &portstat_reg.word0); 1915 /* consider PCI bus read error as pci_channel_offline */ 1916 if (pci_rd_rc1 == -EIO) { 1917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1918 "3151 PCI bus read access failure: x%x\n", 1919 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1920 return; 1921 } 1922 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1923 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1924 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1925 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1926 "2889 Port Overtemperature event, " 1927 "taking port offline Data: x%x x%x\n", 1928 reg_err1, reg_err2); 1929 1930 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1931 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1932 temp_event_data.event_code = LPFC_CRIT_TEMP; 1933 temp_event_data.data = 0xFFFFFFFF; 1934 1935 shost = lpfc_shost_from_vport(phba->pport); 1936 fc_host_post_vendor_event(shost, fc_get_event_number(), 1937 sizeof(temp_event_data), 1938 (char *)&temp_event_data, 1939 SCSI_NL_VID_TYPE_PCI 1940 | PCI_VENDOR_ID_EMULEX); 1941 1942 spin_lock_irq(&phba->hbalock); 1943 phba->over_temp_state = HBA_OVER_TEMP; 1944 spin_unlock_irq(&phba->hbalock); 1945 lpfc_sli4_offline_eratt(phba); 1946 return; 1947 } 1948 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1949 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1951 "3143 Port Down: Firmware Update " 1952 "Detected\n"); 1953 en_rn_msg = false; 1954 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1955 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1957 "3144 Port Down: Debug Dump\n"); 1958 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1959 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1961 "3145 Port Down: Provisioning\n"); 1962 1963 /* If resets are disabled then leave the HBA alone and return */ 1964 if (!phba->cfg_enable_hba_reset) 1965 return; 1966 1967 /* Check port status register for function reset */ 1968 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1969 en_rn_msg); 1970 if (rc == 0) { 1971 /* don't report event on forced debug dump */ 1972 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1973 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1974 return; 1975 else 1976 break; 1977 } 1978 /* fall through for not able to recover */ 1979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1980 "3152 Unrecoverable error, bring the port " 1981 "offline\n"); 1982 lpfc_sli4_offline_eratt(phba); 1983 break; 1984 case LPFC_SLI_INTF_IF_TYPE_1: 1985 default: 1986 break; 1987 } 1988 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1989 "3123 Report dump event to upper layer\n"); 1990 /* Send an internal error event to mgmt application */ 1991 lpfc_board_errevt_to_mgmt(phba); 1992 1993 event_data = FC_REG_DUMP_EVENT; 1994 shost = lpfc_shost_from_vport(vport); 1995 fc_host_post_vendor_event(shost, fc_get_event_number(), 1996 sizeof(event_data), (char *) &event_data, 1997 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1998 } 1999 2000 /** 2001 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2002 * @phba: pointer to lpfc HBA data structure. 2003 * 2004 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2005 * routine from the API jump table function pointer from the lpfc_hba struct. 2006 * 2007 * Return codes 2008 * 0 - success. 2009 * Any other value - error. 2010 **/ 2011 void 2012 lpfc_handle_eratt(struct lpfc_hba *phba) 2013 { 2014 (*phba->lpfc_handle_eratt)(phba); 2015 } 2016 2017 /** 2018 * lpfc_handle_latt - The HBA link event handler 2019 * @phba: pointer to lpfc hba data structure. 2020 * 2021 * This routine is invoked from the worker thread to handle a HBA host 2022 * attention link event. SLI3 only. 2023 **/ 2024 void 2025 lpfc_handle_latt(struct lpfc_hba *phba) 2026 { 2027 struct lpfc_vport *vport = phba->pport; 2028 struct lpfc_sli *psli = &phba->sli; 2029 LPFC_MBOXQ_t *pmb; 2030 volatile uint32_t control; 2031 struct lpfc_dmabuf *mp; 2032 int rc = 0; 2033 2034 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2035 if (!pmb) { 2036 rc = 1; 2037 goto lpfc_handle_latt_err_exit; 2038 } 2039 2040 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2041 if (!mp) { 2042 rc = 2; 2043 goto lpfc_handle_latt_free_pmb; 2044 } 2045 2046 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2047 if (!mp->virt) { 2048 rc = 3; 2049 goto lpfc_handle_latt_free_mp; 2050 } 2051 2052 /* Cleanup any outstanding ELS commands */ 2053 lpfc_els_flush_all_cmd(phba); 2054 2055 psli->slistat.link_event++; 2056 lpfc_read_topology(phba, pmb, mp); 2057 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2058 pmb->vport = vport; 2059 /* Block ELS IOCBs until we have processed this mbox command */ 2060 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2061 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2062 if (rc == MBX_NOT_FINISHED) { 2063 rc = 4; 2064 goto lpfc_handle_latt_free_mbuf; 2065 } 2066 2067 /* Clear Link Attention in HA REG */ 2068 spin_lock_irq(&phba->hbalock); 2069 writel(HA_LATT, phba->HAregaddr); 2070 readl(phba->HAregaddr); /* flush */ 2071 spin_unlock_irq(&phba->hbalock); 2072 2073 return; 2074 2075 lpfc_handle_latt_free_mbuf: 2076 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2077 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2078 lpfc_handle_latt_free_mp: 2079 kfree(mp); 2080 lpfc_handle_latt_free_pmb: 2081 mempool_free(pmb, phba->mbox_mem_pool); 2082 lpfc_handle_latt_err_exit: 2083 /* Enable Link attention interrupts */ 2084 spin_lock_irq(&phba->hbalock); 2085 psli->sli_flag |= LPFC_PROCESS_LA; 2086 control = readl(phba->HCregaddr); 2087 control |= HC_LAINT_ENA; 2088 writel(control, phba->HCregaddr); 2089 readl(phba->HCregaddr); /* flush */ 2090 2091 /* Clear Link Attention in HA REG */ 2092 writel(HA_LATT, phba->HAregaddr); 2093 readl(phba->HAregaddr); /* flush */ 2094 spin_unlock_irq(&phba->hbalock); 2095 lpfc_linkdown(phba); 2096 phba->link_state = LPFC_HBA_ERROR; 2097 2098 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2099 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2100 2101 return; 2102 } 2103 2104 /** 2105 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2106 * @phba: pointer to lpfc hba data structure. 2107 * @vpd: pointer to the vital product data. 2108 * @len: length of the vital product data in bytes. 2109 * 2110 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2111 * an array of characters. In this routine, the ModelName, ProgramType, and 2112 * ModelDesc, etc. fields of the phba data structure will be populated. 2113 * 2114 * Return codes 2115 * 0 - pointer to the VPD passed in is NULL 2116 * 1 - success 2117 **/ 2118 int 2119 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2120 { 2121 uint8_t lenlo, lenhi; 2122 int Length; 2123 int i, j; 2124 int finished = 0; 2125 int index = 0; 2126 2127 if (!vpd) 2128 return 0; 2129 2130 /* Vital Product */ 2131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2132 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2133 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2134 (uint32_t) vpd[3]); 2135 while (!finished && (index < (len - 4))) { 2136 switch (vpd[index]) { 2137 case 0x82: 2138 case 0x91: 2139 index += 1; 2140 lenlo = vpd[index]; 2141 index += 1; 2142 lenhi = vpd[index]; 2143 index += 1; 2144 i = ((((unsigned short)lenhi) << 8) + lenlo); 2145 index += i; 2146 break; 2147 case 0x90: 2148 index += 1; 2149 lenlo = vpd[index]; 2150 index += 1; 2151 lenhi = vpd[index]; 2152 index += 1; 2153 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2154 if (Length > len - index) 2155 Length = len - index; 2156 while (Length > 0) { 2157 /* Look for Serial Number */ 2158 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2159 index += 2; 2160 i = vpd[index]; 2161 index += 1; 2162 j = 0; 2163 Length -= (3+i); 2164 while(i--) { 2165 phba->SerialNumber[j++] = vpd[index++]; 2166 if (j == 31) 2167 break; 2168 } 2169 phba->SerialNumber[j] = 0; 2170 continue; 2171 } 2172 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2173 phba->vpd_flag |= VPD_MODEL_DESC; 2174 index += 2; 2175 i = vpd[index]; 2176 index += 1; 2177 j = 0; 2178 Length -= (3+i); 2179 while(i--) { 2180 phba->ModelDesc[j++] = vpd[index++]; 2181 if (j == 255) 2182 break; 2183 } 2184 phba->ModelDesc[j] = 0; 2185 continue; 2186 } 2187 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2188 phba->vpd_flag |= VPD_MODEL_NAME; 2189 index += 2; 2190 i = vpd[index]; 2191 index += 1; 2192 j = 0; 2193 Length -= (3+i); 2194 while(i--) { 2195 phba->ModelName[j++] = vpd[index++]; 2196 if (j == 79) 2197 break; 2198 } 2199 phba->ModelName[j] = 0; 2200 continue; 2201 } 2202 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2203 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2204 index += 2; 2205 i = vpd[index]; 2206 index += 1; 2207 j = 0; 2208 Length -= (3+i); 2209 while(i--) { 2210 phba->ProgramType[j++] = vpd[index++]; 2211 if (j == 255) 2212 break; 2213 } 2214 phba->ProgramType[j] = 0; 2215 continue; 2216 } 2217 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2218 phba->vpd_flag |= VPD_PORT; 2219 index += 2; 2220 i = vpd[index]; 2221 index += 1; 2222 j = 0; 2223 Length -= (3+i); 2224 while(i--) { 2225 if ((phba->sli_rev == LPFC_SLI_REV4) && 2226 (phba->sli4_hba.pport_name_sta == 2227 LPFC_SLI4_PPNAME_GET)) { 2228 j++; 2229 index++; 2230 } else 2231 phba->Port[j++] = vpd[index++]; 2232 if (j == 19) 2233 break; 2234 } 2235 if ((phba->sli_rev != LPFC_SLI_REV4) || 2236 (phba->sli4_hba.pport_name_sta == 2237 LPFC_SLI4_PPNAME_NON)) 2238 phba->Port[j] = 0; 2239 continue; 2240 } 2241 else { 2242 index += 2; 2243 i = vpd[index]; 2244 index += 1; 2245 index += i; 2246 Length -= (3 + i); 2247 } 2248 } 2249 finished = 0; 2250 break; 2251 case 0x78: 2252 finished = 1; 2253 break; 2254 default: 2255 index ++; 2256 break; 2257 } 2258 } 2259 2260 return(1); 2261 } 2262 2263 /** 2264 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2265 * @phba: pointer to lpfc hba data structure. 2266 * @mdp: pointer to the data structure to hold the derived model name. 2267 * @descp: pointer to the data structure to hold the derived description. 2268 * 2269 * This routine retrieves HBA's description based on its registered PCI device 2270 * ID. The @descp passed into this function points to an array of 256 chars. It 2271 * shall be returned with the model name, maximum speed, and the host bus type. 2272 * The @mdp passed into this function points to an array of 80 chars. When the 2273 * function returns, the @mdp will be filled with the model name. 2274 **/ 2275 static void 2276 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2277 { 2278 lpfc_vpd_t *vp; 2279 uint16_t dev_id = phba->pcidev->device; 2280 int max_speed; 2281 int GE = 0; 2282 int oneConnect = 0; /* default is not a oneConnect */ 2283 struct { 2284 char *name; 2285 char *bus; 2286 char *function; 2287 } m = {"<Unknown>", "", ""}; 2288 2289 if (mdp && mdp[0] != '\0' 2290 && descp && descp[0] != '\0') 2291 return; 2292 2293 if (phba->lmt & LMT_64Gb) 2294 max_speed = 64; 2295 else if (phba->lmt & LMT_32Gb) 2296 max_speed = 32; 2297 else if (phba->lmt & LMT_16Gb) 2298 max_speed = 16; 2299 else if (phba->lmt & LMT_10Gb) 2300 max_speed = 10; 2301 else if (phba->lmt & LMT_8Gb) 2302 max_speed = 8; 2303 else if (phba->lmt & LMT_4Gb) 2304 max_speed = 4; 2305 else if (phba->lmt & LMT_2Gb) 2306 max_speed = 2; 2307 else if (phba->lmt & LMT_1Gb) 2308 max_speed = 1; 2309 else 2310 max_speed = 0; 2311 2312 vp = &phba->vpd; 2313 2314 switch (dev_id) { 2315 case PCI_DEVICE_ID_FIREFLY: 2316 m = (typeof(m)){"LP6000", "PCI", 2317 "Obsolete, Unsupported Fibre Channel Adapter"}; 2318 break; 2319 case PCI_DEVICE_ID_SUPERFLY: 2320 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2321 m = (typeof(m)){"LP7000", "PCI", ""}; 2322 else 2323 m = (typeof(m)){"LP7000E", "PCI", ""}; 2324 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2325 break; 2326 case PCI_DEVICE_ID_DRAGONFLY: 2327 m = (typeof(m)){"LP8000", "PCI", 2328 "Obsolete, Unsupported Fibre Channel Adapter"}; 2329 break; 2330 case PCI_DEVICE_ID_CENTAUR: 2331 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2332 m = (typeof(m)){"LP9002", "PCI", ""}; 2333 else 2334 m = (typeof(m)){"LP9000", "PCI", ""}; 2335 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2336 break; 2337 case PCI_DEVICE_ID_RFLY: 2338 m = (typeof(m)){"LP952", "PCI", 2339 "Obsolete, Unsupported Fibre Channel Adapter"}; 2340 break; 2341 case PCI_DEVICE_ID_PEGASUS: 2342 m = (typeof(m)){"LP9802", "PCI-X", 2343 "Obsolete, Unsupported Fibre Channel Adapter"}; 2344 break; 2345 case PCI_DEVICE_ID_THOR: 2346 m = (typeof(m)){"LP10000", "PCI-X", 2347 "Obsolete, Unsupported Fibre Channel Adapter"}; 2348 break; 2349 case PCI_DEVICE_ID_VIPER: 2350 m = (typeof(m)){"LPX1000", "PCI-X", 2351 "Obsolete, Unsupported Fibre Channel Adapter"}; 2352 break; 2353 case PCI_DEVICE_ID_PFLY: 2354 m = (typeof(m)){"LP982", "PCI-X", 2355 "Obsolete, Unsupported Fibre Channel Adapter"}; 2356 break; 2357 case PCI_DEVICE_ID_TFLY: 2358 m = (typeof(m)){"LP1050", "PCI-X", 2359 "Obsolete, Unsupported Fibre Channel Adapter"}; 2360 break; 2361 case PCI_DEVICE_ID_HELIOS: 2362 m = (typeof(m)){"LP11000", "PCI-X2", 2363 "Obsolete, Unsupported Fibre Channel Adapter"}; 2364 break; 2365 case PCI_DEVICE_ID_HELIOS_SCSP: 2366 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_HELIOS_DCSP: 2370 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2371 "Obsolete, Unsupported Fibre Channel Adapter"}; 2372 break; 2373 case PCI_DEVICE_ID_NEPTUNE: 2374 m = (typeof(m)){"LPe1000", "PCIe", 2375 "Obsolete, Unsupported Fibre Channel Adapter"}; 2376 break; 2377 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2378 m = (typeof(m)){"LPe1000-SP", "PCIe", 2379 "Obsolete, Unsupported Fibre Channel Adapter"}; 2380 break; 2381 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2382 m = (typeof(m)){"LPe1002-SP", "PCIe", 2383 "Obsolete, Unsupported Fibre Channel Adapter"}; 2384 break; 2385 case PCI_DEVICE_ID_BMID: 2386 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2387 break; 2388 case PCI_DEVICE_ID_BSMB: 2389 m = (typeof(m)){"LP111", "PCI-X2", 2390 "Obsolete, Unsupported Fibre Channel Adapter"}; 2391 break; 2392 case PCI_DEVICE_ID_ZEPHYR: 2393 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2394 break; 2395 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2396 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2397 break; 2398 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2399 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2400 GE = 1; 2401 break; 2402 case PCI_DEVICE_ID_ZMID: 2403 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2404 break; 2405 case PCI_DEVICE_ID_ZSMB: 2406 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2407 break; 2408 case PCI_DEVICE_ID_LP101: 2409 m = (typeof(m)){"LP101", "PCI-X", 2410 "Obsolete, Unsupported Fibre Channel Adapter"}; 2411 break; 2412 case PCI_DEVICE_ID_LP10000S: 2413 m = (typeof(m)){"LP10000-S", "PCI", 2414 "Obsolete, Unsupported Fibre Channel Adapter"}; 2415 break; 2416 case PCI_DEVICE_ID_LP11000S: 2417 m = (typeof(m)){"LP11000-S", "PCI-X2", 2418 "Obsolete, Unsupported Fibre Channel Adapter"}; 2419 break; 2420 case PCI_DEVICE_ID_LPE11000S: 2421 m = (typeof(m)){"LPe11000-S", "PCIe", 2422 "Obsolete, Unsupported Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_SAT: 2425 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2426 break; 2427 case PCI_DEVICE_ID_SAT_MID: 2428 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2429 break; 2430 case PCI_DEVICE_ID_SAT_SMB: 2431 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2432 break; 2433 case PCI_DEVICE_ID_SAT_DCSP: 2434 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2435 break; 2436 case PCI_DEVICE_ID_SAT_SCSP: 2437 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2438 break; 2439 case PCI_DEVICE_ID_SAT_S: 2440 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2441 break; 2442 case PCI_DEVICE_ID_HORNET: 2443 m = (typeof(m)){"LP21000", "PCIe", 2444 "Obsolete, Unsupported FCoE Adapter"}; 2445 GE = 1; 2446 break; 2447 case PCI_DEVICE_ID_PROTEUS_VF: 2448 m = (typeof(m)){"LPev12000", "PCIe IOV", 2449 "Obsolete, Unsupported Fibre Channel Adapter"}; 2450 break; 2451 case PCI_DEVICE_ID_PROTEUS_PF: 2452 m = (typeof(m)){"LPev12000", "PCIe IOV", 2453 "Obsolete, Unsupported Fibre Channel Adapter"}; 2454 break; 2455 case PCI_DEVICE_ID_PROTEUS_S: 2456 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2457 "Obsolete, Unsupported Fibre Channel Adapter"}; 2458 break; 2459 case PCI_DEVICE_ID_TIGERSHARK: 2460 oneConnect = 1; 2461 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2462 break; 2463 case PCI_DEVICE_ID_TOMCAT: 2464 oneConnect = 1; 2465 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2466 break; 2467 case PCI_DEVICE_ID_FALCON: 2468 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2469 "EmulexSecure Fibre"}; 2470 break; 2471 case PCI_DEVICE_ID_BALIUS: 2472 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2473 "Obsolete, Unsupported Fibre Channel Adapter"}; 2474 break; 2475 case PCI_DEVICE_ID_LANCER_FC: 2476 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2477 break; 2478 case PCI_DEVICE_ID_LANCER_FC_VF: 2479 m = (typeof(m)){"LPe16000", "PCIe", 2480 "Obsolete, Unsupported Fibre Channel Adapter"}; 2481 break; 2482 case PCI_DEVICE_ID_LANCER_FCOE: 2483 oneConnect = 1; 2484 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2485 break; 2486 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2487 oneConnect = 1; 2488 m = (typeof(m)){"OCe15100", "PCIe", 2489 "Obsolete, Unsupported FCoE"}; 2490 break; 2491 case PCI_DEVICE_ID_LANCER_G6_FC: 2492 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2493 break; 2494 case PCI_DEVICE_ID_LANCER_G7_FC: 2495 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2496 break; 2497 case PCI_DEVICE_ID_SKYHAWK: 2498 case PCI_DEVICE_ID_SKYHAWK_VF: 2499 oneConnect = 1; 2500 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2501 break; 2502 default: 2503 m = (typeof(m)){"Unknown", "", ""}; 2504 break; 2505 } 2506 2507 if (mdp && mdp[0] == '\0') 2508 snprintf(mdp, 79,"%s", m.name); 2509 /* 2510 * oneConnect hba requires special processing, they are all initiators 2511 * and we put the port number on the end 2512 */ 2513 if (descp && descp[0] == '\0') { 2514 if (oneConnect) 2515 snprintf(descp, 255, 2516 "Emulex OneConnect %s, %s Initiator %s", 2517 m.name, m.function, 2518 phba->Port); 2519 else if (max_speed == 0) 2520 snprintf(descp, 255, 2521 "Emulex %s %s %s", 2522 m.name, m.bus, m.function); 2523 else 2524 snprintf(descp, 255, 2525 "Emulex %s %d%s %s %s", 2526 m.name, max_speed, (GE) ? "GE" : "Gb", 2527 m.bus, m.function); 2528 } 2529 } 2530 2531 /** 2532 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2533 * @phba: pointer to lpfc hba data structure. 2534 * @pring: pointer to a IOCB ring. 2535 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2536 * 2537 * This routine posts a given number of IOCBs with the associated DMA buffer 2538 * descriptors specified by the cnt argument to the given IOCB ring. 2539 * 2540 * Return codes 2541 * The number of IOCBs NOT able to be posted to the IOCB ring. 2542 **/ 2543 int 2544 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2545 { 2546 IOCB_t *icmd; 2547 struct lpfc_iocbq *iocb; 2548 struct lpfc_dmabuf *mp1, *mp2; 2549 2550 cnt += pring->missbufcnt; 2551 2552 /* While there are buffers to post */ 2553 while (cnt > 0) { 2554 /* Allocate buffer for command iocb */ 2555 iocb = lpfc_sli_get_iocbq(phba); 2556 if (iocb == NULL) { 2557 pring->missbufcnt = cnt; 2558 return cnt; 2559 } 2560 icmd = &iocb->iocb; 2561 2562 /* 2 buffers can be posted per command */ 2563 /* Allocate buffer to post */ 2564 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2565 if (mp1) 2566 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2567 if (!mp1 || !mp1->virt) { 2568 kfree(mp1); 2569 lpfc_sli_release_iocbq(phba, iocb); 2570 pring->missbufcnt = cnt; 2571 return cnt; 2572 } 2573 2574 INIT_LIST_HEAD(&mp1->list); 2575 /* Allocate buffer to post */ 2576 if (cnt > 1) { 2577 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2578 if (mp2) 2579 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2580 &mp2->phys); 2581 if (!mp2 || !mp2->virt) { 2582 kfree(mp2); 2583 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2584 kfree(mp1); 2585 lpfc_sli_release_iocbq(phba, iocb); 2586 pring->missbufcnt = cnt; 2587 return cnt; 2588 } 2589 2590 INIT_LIST_HEAD(&mp2->list); 2591 } else { 2592 mp2 = NULL; 2593 } 2594 2595 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2596 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2597 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2598 icmd->ulpBdeCount = 1; 2599 cnt--; 2600 if (mp2) { 2601 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2602 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2603 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2604 cnt--; 2605 icmd->ulpBdeCount = 2; 2606 } 2607 2608 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2609 icmd->ulpLe = 1; 2610 2611 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2612 IOCB_ERROR) { 2613 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2614 kfree(mp1); 2615 cnt++; 2616 if (mp2) { 2617 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2618 kfree(mp2); 2619 cnt++; 2620 } 2621 lpfc_sli_release_iocbq(phba, iocb); 2622 pring->missbufcnt = cnt; 2623 return cnt; 2624 } 2625 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2626 if (mp2) 2627 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2628 } 2629 pring->missbufcnt = 0; 2630 return 0; 2631 } 2632 2633 /** 2634 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2635 * @phba: pointer to lpfc hba data structure. 2636 * 2637 * This routine posts initial receive IOCB buffers to the ELS ring. The 2638 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2639 * set to 64 IOCBs. SLI3 only. 2640 * 2641 * Return codes 2642 * 0 - success (currently always success) 2643 **/ 2644 static int 2645 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2646 { 2647 struct lpfc_sli *psli = &phba->sli; 2648 2649 /* Ring 0, ELS / CT buffers */ 2650 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2651 /* Ring 2 - FCP no buffers needed */ 2652 2653 return 0; 2654 } 2655 2656 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2657 2658 /** 2659 * lpfc_sha_init - Set up initial array of hash table entries 2660 * @HashResultPointer: pointer to an array as hash table. 2661 * 2662 * This routine sets up the initial values to the array of hash table entries 2663 * for the LC HBAs. 2664 **/ 2665 static void 2666 lpfc_sha_init(uint32_t * HashResultPointer) 2667 { 2668 HashResultPointer[0] = 0x67452301; 2669 HashResultPointer[1] = 0xEFCDAB89; 2670 HashResultPointer[2] = 0x98BADCFE; 2671 HashResultPointer[3] = 0x10325476; 2672 HashResultPointer[4] = 0xC3D2E1F0; 2673 } 2674 2675 /** 2676 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2677 * @HashResultPointer: pointer to an initial/result hash table. 2678 * @HashWorkingPointer: pointer to an working hash table. 2679 * 2680 * This routine iterates an initial hash table pointed by @HashResultPointer 2681 * with the values from the working hash table pointeed by @HashWorkingPointer. 2682 * The results are putting back to the initial hash table, returned through 2683 * the @HashResultPointer as the result hash table. 2684 **/ 2685 static void 2686 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2687 { 2688 int t; 2689 uint32_t TEMP; 2690 uint32_t A, B, C, D, E; 2691 t = 16; 2692 do { 2693 HashWorkingPointer[t] = 2694 S(1, 2695 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2696 8] ^ 2697 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2698 } while (++t <= 79); 2699 t = 0; 2700 A = HashResultPointer[0]; 2701 B = HashResultPointer[1]; 2702 C = HashResultPointer[2]; 2703 D = HashResultPointer[3]; 2704 E = HashResultPointer[4]; 2705 2706 do { 2707 if (t < 20) { 2708 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2709 } else if (t < 40) { 2710 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2711 } else if (t < 60) { 2712 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2713 } else { 2714 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2715 } 2716 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2717 E = D; 2718 D = C; 2719 C = S(30, B); 2720 B = A; 2721 A = TEMP; 2722 } while (++t <= 79); 2723 2724 HashResultPointer[0] += A; 2725 HashResultPointer[1] += B; 2726 HashResultPointer[2] += C; 2727 HashResultPointer[3] += D; 2728 HashResultPointer[4] += E; 2729 2730 } 2731 2732 /** 2733 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2734 * @RandomChallenge: pointer to the entry of host challenge random number array. 2735 * @HashWorking: pointer to the entry of the working hash array. 2736 * 2737 * This routine calculates the working hash array referred by @HashWorking 2738 * from the challenge random numbers associated with the host, referred by 2739 * @RandomChallenge. The result is put into the entry of the working hash 2740 * array and returned by reference through @HashWorking. 2741 **/ 2742 static void 2743 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2744 { 2745 *HashWorking = (*RandomChallenge ^ *HashWorking); 2746 } 2747 2748 /** 2749 * lpfc_hba_init - Perform special handling for LC HBA initialization 2750 * @phba: pointer to lpfc hba data structure. 2751 * @hbainit: pointer to an array of unsigned 32-bit integers. 2752 * 2753 * This routine performs the special handling for LC HBA initialization. 2754 **/ 2755 void 2756 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2757 { 2758 int t; 2759 uint32_t *HashWorking; 2760 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2761 2762 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2763 if (!HashWorking) 2764 return; 2765 2766 HashWorking[0] = HashWorking[78] = *pwwnn++; 2767 HashWorking[1] = HashWorking[79] = *pwwnn; 2768 2769 for (t = 0; t < 7; t++) 2770 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2771 2772 lpfc_sha_init(hbainit); 2773 lpfc_sha_iterate(hbainit, HashWorking); 2774 kfree(HashWorking); 2775 } 2776 2777 /** 2778 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2779 * @vport: pointer to a virtual N_Port data structure. 2780 * 2781 * This routine performs the necessary cleanups before deleting the @vport. 2782 * It invokes the discovery state machine to perform necessary state 2783 * transitions and to release the ndlps associated with the @vport. Note, 2784 * the physical port is treated as @vport 0. 2785 **/ 2786 void 2787 lpfc_cleanup(struct lpfc_vport *vport) 2788 { 2789 struct lpfc_hba *phba = vport->phba; 2790 struct lpfc_nodelist *ndlp, *next_ndlp; 2791 int i = 0; 2792 2793 if (phba->link_state > LPFC_LINK_DOWN) 2794 lpfc_port_link_failure(vport); 2795 2796 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2797 if (!NLP_CHK_NODE_ACT(ndlp)) { 2798 ndlp = lpfc_enable_node(vport, ndlp, 2799 NLP_STE_UNUSED_NODE); 2800 if (!ndlp) 2801 continue; 2802 spin_lock_irq(&phba->ndlp_lock); 2803 NLP_SET_FREE_REQ(ndlp); 2804 spin_unlock_irq(&phba->ndlp_lock); 2805 /* Trigger the release of the ndlp memory */ 2806 lpfc_nlp_put(ndlp); 2807 continue; 2808 } 2809 spin_lock_irq(&phba->ndlp_lock); 2810 if (NLP_CHK_FREE_REQ(ndlp)) { 2811 /* The ndlp should not be in memory free mode already */ 2812 spin_unlock_irq(&phba->ndlp_lock); 2813 continue; 2814 } else 2815 /* Indicate request for freeing ndlp memory */ 2816 NLP_SET_FREE_REQ(ndlp); 2817 spin_unlock_irq(&phba->ndlp_lock); 2818 2819 if (vport->port_type != LPFC_PHYSICAL_PORT && 2820 ndlp->nlp_DID == Fabric_DID) { 2821 /* Just free up ndlp with Fabric_DID for vports */ 2822 lpfc_nlp_put(ndlp); 2823 continue; 2824 } 2825 2826 /* take care of nodes in unused state before the state 2827 * machine taking action. 2828 */ 2829 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2830 lpfc_nlp_put(ndlp); 2831 continue; 2832 } 2833 2834 if (ndlp->nlp_type & NLP_FABRIC) 2835 lpfc_disc_state_machine(vport, ndlp, NULL, 2836 NLP_EVT_DEVICE_RECOVERY); 2837 2838 lpfc_disc_state_machine(vport, ndlp, NULL, 2839 NLP_EVT_DEVICE_RM); 2840 } 2841 2842 /* At this point, ALL ndlp's should be gone 2843 * because of the previous NLP_EVT_DEVICE_RM. 2844 * Lets wait for this to happen, if needed. 2845 */ 2846 while (!list_empty(&vport->fc_nodes)) { 2847 if (i++ > 3000) { 2848 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2849 "0233 Nodelist not empty\n"); 2850 list_for_each_entry_safe(ndlp, next_ndlp, 2851 &vport->fc_nodes, nlp_listp) { 2852 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2853 LOG_NODE, 2854 "0282 did:x%x ndlp:x%p " 2855 "usgmap:x%x refcnt:%d\n", 2856 ndlp->nlp_DID, (void *)ndlp, 2857 ndlp->nlp_usg_map, 2858 kref_read(&ndlp->kref)); 2859 } 2860 break; 2861 } 2862 2863 /* Wait for any activity on ndlps to settle */ 2864 msleep(10); 2865 } 2866 lpfc_cleanup_vports_rrqs(vport, NULL); 2867 } 2868 2869 /** 2870 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2871 * @vport: pointer to a virtual N_Port data structure. 2872 * 2873 * This routine stops all the timers associated with a @vport. This function 2874 * is invoked before disabling or deleting a @vport. Note that the physical 2875 * port is treated as @vport 0. 2876 **/ 2877 void 2878 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2879 { 2880 del_timer_sync(&vport->els_tmofunc); 2881 del_timer_sync(&vport->delayed_disc_tmo); 2882 lpfc_can_disctmo(vport); 2883 return; 2884 } 2885 2886 /** 2887 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2888 * @phba: pointer to lpfc hba data structure. 2889 * 2890 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2891 * caller of this routine should already hold the host lock. 2892 **/ 2893 void 2894 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2895 { 2896 /* Clear pending FCF rediscovery wait flag */ 2897 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2898 2899 /* Now, try to stop the timer */ 2900 del_timer(&phba->fcf.redisc_wait); 2901 } 2902 2903 /** 2904 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2905 * @phba: pointer to lpfc hba data structure. 2906 * 2907 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2908 * checks whether the FCF rediscovery wait timer is pending with the host 2909 * lock held before proceeding with disabling the timer and clearing the 2910 * wait timer pendig flag. 2911 **/ 2912 void 2913 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2914 { 2915 spin_lock_irq(&phba->hbalock); 2916 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2917 /* FCF rediscovery timer already fired or stopped */ 2918 spin_unlock_irq(&phba->hbalock); 2919 return; 2920 } 2921 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2922 /* Clear failover in progress flags */ 2923 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2924 spin_unlock_irq(&phba->hbalock); 2925 } 2926 2927 /** 2928 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2929 * @phba: pointer to lpfc hba data structure. 2930 * 2931 * This routine stops all the timers associated with a HBA. This function is 2932 * invoked before either putting a HBA offline or unloading the driver. 2933 **/ 2934 void 2935 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2936 { 2937 lpfc_stop_vport_timers(phba->pport); 2938 del_timer_sync(&phba->sli.mbox_tmo); 2939 del_timer_sync(&phba->fabric_block_timer); 2940 del_timer_sync(&phba->eratt_poll); 2941 del_timer_sync(&phba->hb_tmofunc); 2942 if (phba->sli_rev == LPFC_SLI_REV4) { 2943 del_timer_sync(&phba->rrq_tmr); 2944 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2945 } 2946 phba->hb_outstanding = 0; 2947 2948 switch (phba->pci_dev_grp) { 2949 case LPFC_PCI_DEV_LP: 2950 /* Stop any LightPulse device specific driver timers */ 2951 del_timer_sync(&phba->fcp_poll_timer); 2952 break; 2953 case LPFC_PCI_DEV_OC: 2954 /* Stop any OneConnect device sepcific driver timers */ 2955 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2956 break; 2957 default: 2958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2959 "0297 Invalid device group (x%x)\n", 2960 phba->pci_dev_grp); 2961 break; 2962 } 2963 return; 2964 } 2965 2966 /** 2967 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2968 * @phba: pointer to lpfc hba data structure. 2969 * 2970 * This routine marks a HBA's management interface as blocked. Once the HBA's 2971 * management interface is marked as blocked, all the user space access to 2972 * the HBA, whether they are from sysfs interface or libdfc interface will 2973 * all be blocked. The HBA is set to block the management interface when the 2974 * driver prepares the HBA interface for online or offline. 2975 **/ 2976 static void 2977 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2978 { 2979 unsigned long iflag; 2980 uint8_t actcmd = MBX_HEARTBEAT; 2981 unsigned long timeout; 2982 2983 spin_lock_irqsave(&phba->hbalock, iflag); 2984 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2985 spin_unlock_irqrestore(&phba->hbalock, iflag); 2986 if (mbx_action == LPFC_MBX_NO_WAIT) 2987 return; 2988 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2989 spin_lock_irqsave(&phba->hbalock, iflag); 2990 if (phba->sli.mbox_active) { 2991 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2992 /* Determine how long we might wait for the active mailbox 2993 * command to be gracefully completed by firmware. 2994 */ 2995 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2996 phba->sli.mbox_active) * 1000) + jiffies; 2997 } 2998 spin_unlock_irqrestore(&phba->hbalock, iflag); 2999 3000 /* Wait for the outstnading mailbox command to complete */ 3001 while (phba->sli.mbox_active) { 3002 /* Check active mailbox complete status every 2ms */ 3003 msleep(2); 3004 if (time_after(jiffies, timeout)) { 3005 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3006 "2813 Mgmt IO is Blocked %x " 3007 "- mbox cmd %x still active\n", 3008 phba->sli.sli_flag, actcmd); 3009 break; 3010 } 3011 } 3012 } 3013 3014 /** 3015 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3016 * @phba: pointer to lpfc hba data structure. 3017 * 3018 * Allocate RPIs for all active remote nodes. This is needed whenever 3019 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3020 * is to fixup the temporary rpi assignments. 3021 **/ 3022 void 3023 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3024 { 3025 struct lpfc_nodelist *ndlp, *next_ndlp; 3026 struct lpfc_vport **vports; 3027 int i, rpi; 3028 unsigned long flags; 3029 3030 if (phba->sli_rev != LPFC_SLI_REV4) 3031 return; 3032 3033 vports = lpfc_create_vport_work_array(phba); 3034 if (vports == NULL) 3035 return; 3036 3037 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3038 if (vports[i]->load_flag & FC_UNLOADING) 3039 continue; 3040 3041 list_for_each_entry_safe(ndlp, next_ndlp, 3042 &vports[i]->fc_nodes, 3043 nlp_listp) { 3044 if (!NLP_CHK_NODE_ACT(ndlp)) 3045 continue; 3046 rpi = lpfc_sli4_alloc_rpi(phba); 3047 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3048 spin_lock_irqsave(&phba->ndlp_lock, flags); 3049 NLP_CLR_NODE_ACT(ndlp); 3050 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3051 continue; 3052 } 3053 ndlp->nlp_rpi = rpi; 3054 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3055 "0009 rpi:%x DID:%x " 3056 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 3057 ndlp->nlp_DID, ndlp->nlp_flag, 3058 ndlp->nlp_usg_map, ndlp); 3059 } 3060 } 3061 lpfc_destroy_vport_work_array(phba, vports); 3062 } 3063 3064 /** 3065 * lpfc_online - Initialize and bring a HBA online 3066 * @phba: pointer to lpfc hba data structure. 3067 * 3068 * This routine initializes the HBA and brings a HBA online. During this 3069 * process, the management interface is blocked to prevent user space access 3070 * to the HBA interfering with the driver initialization. 3071 * 3072 * Return codes 3073 * 0 - successful 3074 * 1 - failed 3075 **/ 3076 int 3077 lpfc_online(struct lpfc_hba *phba) 3078 { 3079 struct lpfc_vport *vport; 3080 struct lpfc_vport **vports; 3081 int i, error = 0; 3082 bool vpis_cleared = false; 3083 3084 if (!phba) 3085 return 0; 3086 vport = phba->pport; 3087 3088 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3089 return 0; 3090 3091 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3092 "0458 Bring Adapter online\n"); 3093 3094 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3095 3096 if (phba->sli_rev == LPFC_SLI_REV4) { 3097 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3098 lpfc_unblock_mgmt_io(phba); 3099 return 1; 3100 } 3101 spin_lock_irq(&phba->hbalock); 3102 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3103 vpis_cleared = true; 3104 spin_unlock_irq(&phba->hbalock); 3105 3106 /* Reestablish the local initiator port. 3107 * The offline process destroyed the previous lport. 3108 */ 3109 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3110 !phba->nvmet_support) { 3111 error = lpfc_nvme_create_localport(phba->pport); 3112 if (error) 3113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3114 "6132 NVME restore reg failed " 3115 "on nvmei error x%x\n", error); 3116 } 3117 } else { 3118 lpfc_sli_queue_init(phba); 3119 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3120 lpfc_unblock_mgmt_io(phba); 3121 return 1; 3122 } 3123 } 3124 3125 vports = lpfc_create_vport_work_array(phba); 3126 if (vports != NULL) { 3127 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3128 struct Scsi_Host *shost; 3129 shost = lpfc_shost_from_vport(vports[i]); 3130 spin_lock_irq(shost->host_lock); 3131 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3132 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3133 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3134 if (phba->sli_rev == LPFC_SLI_REV4) { 3135 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3136 if ((vpis_cleared) && 3137 (vports[i]->port_type != 3138 LPFC_PHYSICAL_PORT)) 3139 vports[i]->vpi = 0; 3140 } 3141 spin_unlock_irq(shost->host_lock); 3142 } 3143 } 3144 lpfc_destroy_vport_work_array(phba, vports); 3145 3146 lpfc_unblock_mgmt_io(phba); 3147 return 0; 3148 } 3149 3150 /** 3151 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3152 * @phba: pointer to lpfc hba data structure. 3153 * 3154 * This routine marks a HBA's management interface as not blocked. Once the 3155 * HBA's management interface is marked as not blocked, all the user space 3156 * access to the HBA, whether they are from sysfs interface or libdfc 3157 * interface will be allowed. The HBA is set to block the management interface 3158 * when the driver prepares the HBA interface for online or offline and then 3159 * set to unblock the management interface afterwards. 3160 **/ 3161 void 3162 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3163 { 3164 unsigned long iflag; 3165 3166 spin_lock_irqsave(&phba->hbalock, iflag); 3167 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3168 spin_unlock_irqrestore(&phba->hbalock, iflag); 3169 } 3170 3171 /** 3172 * lpfc_offline_prep - Prepare a HBA to be brought offline 3173 * @phba: pointer to lpfc hba data structure. 3174 * 3175 * This routine is invoked to prepare a HBA to be brought offline. It performs 3176 * unregistration login to all the nodes on all vports and flushes the mailbox 3177 * queue to make it ready to be brought offline. 3178 **/ 3179 void 3180 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3181 { 3182 struct lpfc_vport *vport = phba->pport; 3183 struct lpfc_nodelist *ndlp, *next_ndlp; 3184 struct lpfc_vport **vports; 3185 struct Scsi_Host *shost; 3186 int i; 3187 3188 if (vport->fc_flag & FC_OFFLINE_MODE) 3189 return; 3190 3191 lpfc_block_mgmt_io(phba, mbx_action); 3192 3193 lpfc_linkdown(phba); 3194 3195 /* Issue an unreg_login to all nodes on all vports */ 3196 vports = lpfc_create_vport_work_array(phba); 3197 if (vports != NULL) { 3198 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3199 if (vports[i]->load_flag & FC_UNLOADING) 3200 continue; 3201 shost = lpfc_shost_from_vport(vports[i]); 3202 spin_lock_irq(shost->host_lock); 3203 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3204 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3205 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3206 spin_unlock_irq(shost->host_lock); 3207 3208 shost = lpfc_shost_from_vport(vports[i]); 3209 list_for_each_entry_safe(ndlp, next_ndlp, 3210 &vports[i]->fc_nodes, 3211 nlp_listp) { 3212 if (!NLP_CHK_NODE_ACT(ndlp)) 3213 continue; 3214 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3215 continue; 3216 if (ndlp->nlp_type & NLP_FABRIC) { 3217 lpfc_disc_state_machine(vports[i], ndlp, 3218 NULL, NLP_EVT_DEVICE_RECOVERY); 3219 lpfc_disc_state_machine(vports[i], ndlp, 3220 NULL, NLP_EVT_DEVICE_RM); 3221 } 3222 spin_lock_irq(shost->host_lock); 3223 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3224 spin_unlock_irq(shost->host_lock); 3225 /* 3226 * Whenever an SLI4 port goes offline, free the 3227 * RPI. Get a new RPI when the adapter port 3228 * comes back online. 3229 */ 3230 if (phba->sli_rev == LPFC_SLI_REV4) { 3231 lpfc_printf_vlog(ndlp->vport, 3232 KERN_INFO, LOG_NODE, 3233 "0011 lpfc_offline: " 3234 "ndlp:x%p did %x " 3235 "usgmap:x%x rpi:%x\n", 3236 ndlp, ndlp->nlp_DID, 3237 ndlp->nlp_usg_map, 3238 ndlp->nlp_rpi); 3239 3240 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3241 } 3242 lpfc_unreg_rpi(vports[i], ndlp); 3243 } 3244 } 3245 } 3246 lpfc_destroy_vport_work_array(phba, vports); 3247 3248 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3249 3250 if (phba->wq) 3251 flush_workqueue(phba->wq); 3252 } 3253 3254 /** 3255 * lpfc_offline - Bring a HBA offline 3256 * @phba: pointer to lpfc hba data structure. 3257 * 3258 * This routine actually brings a HBA offline. It stops all the timers 3259 * associated with the HBA, brings down the SLI layer, and eventually 3260 * marks the HBA as in offline state for the upper layer protocol. 3261 **/ 3262 void 3263 lpfc_offline(struct lpfc_hba *phba) 3264 { 3265 struct Scsi_Host *shost; 3266 struct lpfc_vport **vports; 3267 int i; 3268 3269 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3270 return; 3271 3272 /* stop port and all timers associated with this hba */ 3273 lpfc_stop_port(phba); 3274 3275 /* Tear down the local and target port registrations. The 3276 * nvme transports need to cleanup. 3277 */ 3278 lpfc_nvmet_destroy_targetport(phba); 3279 lpfc_nvme_destroy_localport(phba->pport); 3280 3281 vports = lpfc_create_vport_work_array(phba); 3282 if (vports != NULL) 3283 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3284 lpfc_stop_vport_timers(vports[i]); 3285 lpfc_destroy_vport_work_array(phba, vports); 3286 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3287 "0460 Bring Adapter offline\n"); 3288 /* Bring down the SLI Layer and cleanup. The HBA is offline 3289 now. */ 3290 lpfc_sli_hba_down(phba); 3291 spin_lock_irq(&phba->hbalock); 3292 phba->work_ha = 0; 3293 spin_unlock_irq(&phba->hbalock); 3294 vports = lpfc_create_vport_work_array(phba); 3295 if (vports != NULL) 3296 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3297 shost = lpfc_shost_from_vport(vports[i]); 3298 spin_lock_irq(shost->host_lock); 3299 vports[i]->work_port_events = 0; 3300 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3301 spin_unlock_irq(shost->host_lock); 3302 } 3303 lpfc_destroy_vport_work_array(phba, vports); 3304 } 3305 3306 /** 3307 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3308 * @phba: pointer to lpfc hba data structure. 3309 * 3310 * This routine is to free all the SCSI buffers and IOCBs from the driver 3311 * list back to kernel. It is called from lpfc_pci_remove_one to free 3312 * the internal resources before the device is removed from the system. 3313 **/ 3314 static void 3315 lpfc_scsi_free(struct lpfc_hba *phba) 3316 { 3317 struct lpfc_scsi_buf *sb, *sb_next; 3318 3319 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3320 return; 3321 3322 spin_lock_irq(&phba->hbalock); 3323 3324 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3325 3326 spin_lock(&phba->scsi_buf_list_put_lock); 3327 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3328 list) { 3329 list_del(&sb->list); 3330 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3331 sb->dma_handle); 3332 kfree(sb); 3333 phba->total_scsi_bufs--; 3334 } 3335 spin_unlock(&phba->scsi_buf_list_put_lock); 3336 3337 spin_lock(&phba->scsi_buf_list_get_lock); 3338 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3339 list) { 3340 list_del(&sb->list); 3341 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3342 sb->dma_handle); 3343 kfree(sb); 3344 phba->total_scsi_bufs--; 3345 } 3346 spin_unlock(&phba->scsi_buf_list_get_lock); 3347 spin_unlock_irq(&phba->hbalock); 3348 } 3349 /** 3350 * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists 3351 * @phba: pointer to lpfc hba data structure. 3352 * 3353 * This routine is to free all the NVME buffers and IOCBs from the driver 3354 * list back to kernel. It is called from lpfc_pci_remove_one to free 3355 * the internal resources before the device is removed from the system. 3356 **/ 3357 static void 3358 lpfc_nvme_free(struct lpfc_hba *phba) 3359 { 3360 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; 3361 3362 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3363 return; 3364 3365 spin_lock_irq(&phba->hbalock); 3366 3367 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3368 spin_lock(&phba->nvme_buf_list_put_lock); 3369 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3370 &phba->lpfc_nvme_buf_list_put, list) { 3371 list_del(&lpfc_ncmd->list); 3372 phba->put_nvme_bufs--; 3373 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3374 lpfc_ncmd->dma_handle); 3375 kfree(lpfc_ncmd); 3376 phba->total_nvme_bufs--; 3377 } 3378 spin_unlock(&phba->nvme_buf_list_put_lock); 3379 3380 spin_lock(&phba->nvme_buf_list_get_lock); 3381 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3382 &phba->lpfc_nvme_buf_list_get, list) { 3383 list_del(&lpfc_ncmd->list); 3384 phba->get_nvme_bufs--; 3385 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3386 lpfc_ncmd->dma_handle); 3387 kfree(lpfc_ncmd); 3388 phba->total_nvme_bufs--; 3389 } 3390 spin_unlock(&phba->nvme_buf_list_get_lock); 3391 spin_unlock_irq(&phba->hbalock); 3392 } 3393 /** 3394 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3395 * @phba: pointer to lpfc hba data structure. 3396 * 3397 * This routine first calculates the sizes of the current els and allocated 3398 * scsi sgl lists, and then goes through all sgls to updates the physical 3399 * XRIs assigned due to port function reset. During port initialization, the 3400 * current els and allocated scsi sgl lists are 0s. 3401 * 3402 * Return codes 3403 * 0 - successful (for now, it always returns 0) 3404 **/ 3405 int 3406 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3407 { 3408 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3409 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3410 LIST_HEAD(els_sgl_list); 3411 int rc; 3412 3413 /* 3414 * update on pci function's els xri-sgl list 3415 */ 3416 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3417 3418 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3419 /* els xri-sgl expanded */ 3420 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3421 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3422 "3157 ELS xri-sgl count increased from " 3423 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3424 els_xri_cnt); 3425 /* allocate the additional els sgls */ 3426 for (i = 0; i < xri_cnt; i++) { 3427 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3428 GFP_KERNEL); 3429 if (sglq_entry == NULL) { 3430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3431 "2562 Failure to allocate an " 3432 "ELS sgl entry:%d\n", i); 3433 rc = -ENOMEM; 3434 goto out_free_mem; 3435 } 3436 sglq_entry->buff_type = GEN_BUFF_TYPE; 3437 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3438 &sglq_entry->phys); 3439 if (sglq_entry->virt == NULL) { 3440 kfree(sglq_entry); 3441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3442 "2563 Failure to allocate an " 3443 "ELS mbuf:%d\n", i); 3444 rc = -ENOMEM; 3445 goto out_free_mem; 3446 } 3447 sglq_entry->sgl = sglq_entry->virt; 3448 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3449 sglq_entry->state = SGL_FREED; 3450 list_add_tail(&sglq_entry->list, &els_sgl_list); 3451 } 3452 spin_lock_irq(&phba->hbalock); 3453 spin_lock(&phba->sli4_hba.sgl_list_lock); 3454 list_splice_init(&els_sgl_list, 3455 &phba->sli4_hba.lpfc_els_sgl_list); 3456 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3457 spin_unlock_irq(&phba->hbalock); 3458 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3459 /* els xri-sgl shrinked */ 3460 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3461 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3462 "3158 ELS xri-sgl count decreased from " 3463 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3464 els_xri_cnt); 3465 spin_lock_irq(&phba->hbalock); 3466 spin_lock(&phba->sli4_hba.sgl_list_lock); 3467 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3468 &els_sgl_list); 3469 /* release extra els sgls from list */ 3470 for (i = 0; i < xri_cnt; i++) { 3471 list_remove_head(&els_sgl_list, 3472 sglq_entry, struct lpfc_sglq, list); 3473 if (sglq_entry) { 3474 __lpfc_mbuf_free(phba, sglq_entry->virt, 3475 sglq_entry->phys); 3476 kfree(sglq_entry); 3477 } 3478 } 3479 list_splice_init(&els_sgl_list, 3480 &phba->sli4_hba.lpfc_els_sgl_list); 3481 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3482 spin_unlock_irq(&phba->hbalock); 3483 } else 3484 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3485 "3163 ELS xri-sgl count unchanged: %d\n", 3486 els_xri_cnt); 3487 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3488 3489 /* update xris to els sgls on the list */ 3490 sglq_entry = NULL; 3491 sglq_entry_next = NULL; 3492 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3493 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3494 lxri = lpfc_sli4_next_xritag(phba); 3495 if (lxri == NO_XRI) { 3496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3497 "2400 Failed to allocate xri for " 3498 "ELS sgl\n"); 3499 rc = -ENOMEM; 3500 goto out_free_mem; 3501 } 3502 sglq_entry->sli4_lxritag = lxri; 3503 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3504 } 3505 return 0; 3506 3507 out_free_mem: 3508 lpfc_free_els_sgl_list(phba); 3509 return rc; 3510 } 3511 3512 /** 3513 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3514 * @phba: pointer to lpfc hba data structure. 3515 * 3516 * This routine first calculates the sizes of the current els and allocated 3517 * scsi sgl lists, and then goes through all sgls to updates the physical 3518 * XRIs assigned due to port function reset. During port initialization, the 3519 * current els and allocated scsi sgl lists are 0s. 3520 * 3521 * Return codes 3522 * 0 - successful (for now, it always returns 0) 3523 **/ 3524 int 3525 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3526 { 3527 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3528 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3529 uint16_t nvmet_xri_cnt; 3530 LIST_HEAD(nvmet_sgl_list); 3531 int rc; 3532 3533 /* 3534 * update on pci function's nvmet xri-sgl list 3535 */ 3536 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3537 3538 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3539 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3540 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3541 /* els xri-sgl expanded */ 3542 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3543 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3544 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3545 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3546 /* allocate the additional nvmet sgls */ 3547 for (i = 0; i < xri_cnt; i++) { 3548 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3549 GFP_KERNEL); 3550 if (sglq_entry == NULL) { 3551 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3552 "6303 Failure to allocate an " 3553 "NVMET sgl entry:%d\n", i); 3554 rc = -ENOMEM; 3555 goto out_free_mem; 3556 } 3557 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3558 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3559 &sglq_entry->phys); 3560 if (sglq_entry->virt == NULL) { 3561 kfree(sglq_entry); 3562 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3563 "6304 Failure to allocate an " 3564 "NVMET buf:%d\n", i); 3565 rc = -ENOMEM; 3566 goto out_free_mem; 3567 } 3568 sglq_entry->sgl = sglq_entry->virt; 3569 memset(sglq_entry->sgl, 0, 3570 phba->cfg_sg_dma_buf_size); 3571 sglq_entry->state = SGL_FREED; 3572 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3573 } 3574 spin_lock_irq(&phba->hbalock); 3575 spin_lock(&phba->sli4_hba.sgl_list_lock); 3576 list_splice_init(&nvmet_sgl_list, 3577 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3578 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3579 spin_unlock_irq(&phba->hbalock); 3580 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3581 /* nvmet xri-sgl shrunk */ 3582 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3583 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3584 "6305 NVMET xri-sgl count decreased from " 3585 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3586 nvmet_xri_cnt); 3587 spin_lock_irq(&phba->hbalock); 3588 spin_lock(&phba->sli4_hba.sgl_list_lock); 3589 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3590 &nvmet_sgl_list); 3591 /* release extra nvmet sgls from list */ 3592 for (i = 0; i < xri_cnt; i++) { 3593 list_remove_head(&nvmet_sgl_list, 3594 sglq_entry, struct lpfc_sglq, list); 3595 if (sglq_entry) { 3596 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3597 sglq_entry->phys); 3598 kfree(sglq_entry); 3599 } 3600 } 3601 list_splice_init(&nvmet_sgl_list, 3602 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3603 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3604 spin_unlock_irq(&phba->hbalock); 3605 } else 3606 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3607 "6306 NVMET xri-sgl count unchanged: %d\n", 3608 nvmet_xri_cnt); 3609 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3610 3611 /* update xris to nvmet sgls on the list */ 3612 sglq_entry = NULL; 3613 sglq_entry_next = NULL; 3614 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3615 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3616 lxri = lpfc_sli4_next_xritag(phba); 3617 if (lxri == NO_XRI) { 3618 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3619 "6307 Failed to allocate xri for " 3620 "NVMET sgl\n"); 3621 rc = -ENOMEM; 3622 goto out_free_mem; 3623 } 3624 sglq_entry->sli4_lxritag = lxri; 3625 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3626 } 3627 return 0; 3628 3629 out_free_mem: 3630 lpfc_free_nvmet_sgl_list(phba); 3631 return rc; 3632 } 3633 3634 /** 3635 * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping 3636 * @phba: pointer to lpfc hba data structure. 3637 * 3638 * This routine first calculates the sizes of the current els and allocated 3639 * scsi sgl lists, and then goes through all sgls to updates the physical 3640 * XRIs assigned due to port function reset. During port initialization, the 3641 * current els and allocated scsi sgl lists are 0s. 3642 * 3643 * Return codes 3644 * 0 - successful (for now, it always returns 0) 3645 **/ 3646 int 3647 lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) 3648 { 3649 struct lpfc_scsi_buf *psb, *psb_next; 3650 uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; 3651 LIST_HEAD(scsi_sgl_list); 3652 int rc; 3653 3654 /* 3655 * update on pci function's els xri-sgl list 3656 */ 3657 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3658 phba->total_scsi_bufs = 0; 3659 3660 /* 3661 * update on pci function's allocated scsi xri-sgl list 3662 */ 3663 /* maximum number of xris available for scsi buffers */ 3664 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3665 els_xri_cnt; 3666 3667 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3668 return 0; 3669 3670 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3671 phba->sli4_hba.scsi_xri_max = /* Split them up */ 3672 (phba->sli4_hba.scsi_xri_max * 3673 phba->cfg_xri_split) / 100; 3674 3675 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3676 spin_lock(&phba->scsi_buf_list_put_lock); 3677 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3678 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3679 spin_unlock(&phba->scsi_buf_list_put_lock); 3680 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3681 3682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3683 "6060 Current allocated SCSI xri-sgl count:%d, " 3684 "maximum SCSI xri count:%d (split:%d)\n", 3685 phba->sli4_hba.scsi_xri_cnt, 3686 phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); 3687 3688 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3689 /* max scsi xri shrinked below the allocated scsi buffers */ 3690 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3691 phba->sli4_hba.scsi_xri_max; 3692 /* release the extra allocated scsi buffers */ 3693 for (i = 0; i < scsi_xri_cnt; i++) { 3694 list_remove_head(&scsi_sgl_list, psb, 3695 struct lpfc_scsi_buf, list); 3696 if (psb) { 3697 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3698 psb->data, psb->dma_handle); 3699 kfree(psb); 3700 } 3701 } 3702 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3703 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3704 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3705 } 3706 3707 /* update xris associated to remaining allocated scsi buffers */ 3708 psb = NULL; 3709 psb_next = NULL; 3710 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3711 lxri = lpfc_sli4_next_xritag(phba); 3712 if (lxri == NO_XRI) { 3713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3714 "2560 Failed to allocate xri for " 3715 "scsi buffer\n"); 3716 rc = -ENOMEM; 3717 goto out_free_mem; 3718 } 3719 psb->cur_iocbq.sli4_lxritag = lxri; 3720 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3721 } 3722 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3723 spin_lock(&phba->scsi_buf_list_put_lock); 3724 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3725 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3726 spin_unlock(&phba->scsi_buf_list_put_lock); 3727 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3728 return 0; 3729 3730 out_free_mem: 3731 lpfc_scsi_free(phba); 3732 return rc; 3733 } 3734 3735 static uint64_t 3736 lpfc_get_wwpn(struct lpfc_hba *phba) 3737 { 3738 uint64_t wwn; 3739 int rc; 3740 LPFC_MBOXQ_t *mboxq; 3741 MAILBOX_t *mb; 3742 3743 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3744 GFP_KERNEL); 3745 if (!mboxq) 3746 return (uint64_t)-1; 3747 3748 /* First get WWN of HBA instance */ 3749 lpfc_read_nv(phba, mboxq); 3750 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3751 if (rc != MBX_SUCCESS) { 3752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3753 "6019 Mailbox failed , mbxCmd x%x " 3754 "READ_NV, mbxStatus x%x\n", 3755 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 3756 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 3757 mempool_free(mboxq, phba->mbox_mem_pool); 3758 return (uint64_t) -1; 3759 } 3760 mb = &mboxq->u.mb; 3761 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 3762 /* wwn is WWPN of HBA instance */ 3763 mempool_free(mboxq, phba->mbox_mem_pool); 3764 if (phba->sli_rev == LPFC_SLI_REV4) 3765 return be64_to_cpu(wwn); 3766 else 3767 return rol64(wwn, 32); 3768 } 3769 3770 /** 3771 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping 3772 * @phba: pointer to lpfc hba data structure. 3773 * 3774 * This routine first calculates the sizes of the current els and allocated 3775 * scsi sgl lists, and then goes through all sgls to updates the physical 3776 * XRIs assigned due to port function reset. During port initialization, the 3777 * current els and allocated scsi sgl lists are 0s. 3778 * 3779 * Return codes 3780 * 0 - successful (for now, it always returns 0) 3781 **/ 3782 int 3783 lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) 3784 { 3785 struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 3786 uint16_t i, lxri, els_xri_cnt; 3787 uint16_t nvme_xri_cnt, nvme_xri_max; 3788 LIST_HEAD(nvme_sgl_list); 3789 int rc, cnt; 3790 3791 phba->total_nvme_bufs = 0; 3792 phba->get_nvme_bufs = 0; 3793 phba->put_nvme_bufs = 0; 3794 3795 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3796 return 0; 3797 /* 3798 * update on pci function's allocated nvme xri-sgl list 3799 */ 3800 3801 /* maximum number of xris available for nvme buffers */ 3802 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3803 nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3804 phba->sli4_hba.nvme_xri_max = nvme_xri_max; 3805 phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; 3806 3807 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3808 "6074 Current allocated NVME xri-sgl count:%d, " 3809 "maximum NVME xri count:%d\n", 3810 phba->sli4_hba.nvme_xri_cnt, 3811 phba->sli4_hba.nvme_xri_max); 3812 3813 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3814 spin_lock(&phba->nvme_buf_list_put_lock); 3815 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); 3816 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); 3817 cnt = phba->get_nvme_bufs + phba->put_nvme_bufs; 3818 phba->get_nvme_bufs = 0; 3819 phba->put_nvme_bufs = 0; 3820 spin_unlock(&phba->nvme_buf_list_put_lock); 3821 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3822 3823 if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { 3824 /* max nvme xri shrunk below the allocated nvme buffers */ 3825 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3826 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - 3827 phba->sli4_hba.nvme_xri_max; 3828 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3829 /* release the extra allocated nvme buffers */ 3830 for (i = 0; i < nvme_xri_cnt; i++) { 3831 list_remove_head(&nvme_sgl_list, lpfc_ncmd, 3832 struct lpfc_nvme_buf, list); 3833 if (lpfc_ncmd) { 3834 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3835 lpfc_ncmd->data, 3836 lpfc_ncmd->dma_handle); 3837 kfree(lpfc_ncmd); 3838 } 3839 } 3840 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3841 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; 3842 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3843 } 3844 3845 /* update xris associated to remaining allocated nvme buffers */ 3846 lpfc_ncmd = NULL; 3847 lpfc_ncmd_next = NULL; 3848 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3849 &nvme_sgl_list, list) { 3850 lxri = lpfc_sli4_next_xritag(phba); 3851 if (lxri == NO_XRI) { 3852 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3853 "6075 Failed to allocate xri for " 3854 "nvme buffer\n"); 3855 rc = -ENOMEM; 3856 goto out_free_mem; 3857 } 3858 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 3859 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3860 } 3861 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3862 spin_lock(&phba->nvme_buf_list_put_lock); 3863 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); 3864 phba->get_nvme_bufs = cnt; 3865 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 3866 spin_unlock(&phba->nvme_buf_list_put_lock); 3867 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3868 return 0; 3869 3870 out_free_mem: 3871 lpfc_nvme_free(phba); 3872 return rc; 3873 } 3874 3875 /** 3876 * lpfc_create_port - Create an FC port 3877 * @phba: pointer to lpfc hba data structure. 3878 * @instance: a unique integer ID to this FC port. 3879 * @dev: pointer to the device data structure. 3880 * 3881 * This routine creates a FC port for the upper layer protocol. The FC port 3882 * can be created on top of either a physical port or a virtual port provided 3883 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3884 * and associates the FC port created before adding the shost into the SCSI 3885 * layer. 3886 * 3887 * Return codes 3888 * @vport - pointer to the virtual N_Port data structure. 3889 * NULL - port create failed. 3890 **/ 3891 struct lpfc_vport * 3892 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3893 { 3894 struct lpfc_vport *vport; 3895 struct Scsi_Host *shost = NULL; 3896 int error = 0; 3897 int i; 3898 uint64_t wwn; 3899 bool use_no_reset_hba = false; 3900 int rc; 3901 3902 if (lpfc_no_hba_reset_cnt) { 3903 if (phba->sli_rev < LPFC_SLI_REV4 && 3904 dev == &phba->pcidev->dev) { 3905 /* Reset the port first */ 3906 lpfc_sli_brdrestart(phba); 3907 rc = lpfc_sli_chipset_init(phba); 3908 if (rc) 3909 return NULL; 3910 } 3911 wwn = lpfc_get_wwpn(phba); 3912 } 3913 3914 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 3915 if (wwn == lpfc_no_hba_reset[i]) { 3916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3917 "6020 Setting use_no_reset port=%llx\n", 3918 wwn); 3919 use_no_reset_hba = true; 3920 break; 3921 } 3922 } 3923 3924 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 3925 if (dev != &phba->pcidev->dev) { 3926 shost = scsi_host_alloc(&lpfc_vport_template, 3927 sizeof(struct lpfc_vport)); 3928 } else { 3929 if (!use_no_reset_hba) 3930 shost = scsi_host_alloc(&lpfc_template, 3931 sizeof(struct lpfc_vport)); 3932 else 3933 shost = scsi_host_alloc(&lpfc_template_no_hr, 3934 sizeof(struct lpfc_vport)); 3935 } 3936 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 3937 shost = scsi_host_alloc(&lpfc_template_nvme, 3938 sizeof(struct lpfc_vport)); 3939 } 3940 if (!shost) 3941 goto out; 3942 3943 vport = (struct lpfc_vport *) shost->hostdata; 3944 vport->phba = phba; 3945 vport->load_flag |= FC_LOADING; 3946 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3947 vport->fc_rscn_flush = 0; 3948 lpfc_get_vport_cfgparam(vport); 3949 3950 shost->unique_id = instance; 3951 shost->max_id = LPFC_MAX_TARGET; 3952 shost->max_lun = vport->cfg_max_luns; 3953 shost->this_id = -1; 3954 shost->max_cmd_len = 16; 3955 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3956 if (phba->sli_rev == LPFC_SLI_REV4) { 3957 shost->dma_boundary = 3958 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3959 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3960 } 3961 3962 /* 3963 * Set initial can_queue value since 0 is no longer supported and 3964 * scsi_add_host will fail. This will be adjusted later based on the 3965 * max xri value determined in hba setup. 3966 */ 3967 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3968 if (dev != &phba->pcidev->dev) { 3969 shost->transportt = lpfc_vport_transport_template; 3970 vport->port_type = LPFC_NPIV_PORT; 3971 } else { 3972 shost->transportt = lpfc_transport_template; 3973 vport->port_type = LPFC_PHYSICAL_PORT; 3974 } 3975 3976 /* Initialize all internally managed lists. */ 3977 INIT_LIST_HEAD(&vport->fc_nodes); 3978 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3979 spin_lock_init(&vport->work_port_lock); 3980 3981 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 3982 3983 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 3984 3985 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 3986 3987 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3988 if (error) 3989 goto out_put_shost; 3990 3991 spin_lock_irq(&phba->hbalock); 3992 list_add_tail(&vport->listentry, &phba->port_list); 3993 spin_unlock_irq(&phba->hbalock); 3994 return vport; 3995 3996 out_put_shost: 3997 scsi_host_put(shost); 3998 out: 3999 return NULL; 4000 } 4001 4002 /** 4003 * destroy_port - destroy an FC port 4004 * @vport: pointer to an lpfc virtual N_Port data structure. 4005 * 4006 * This routine destroys a FC port from the upper layer protocol. All the 4007 * resources associated with the port are released. 4008 **/ 4009 void 4010 destroy_port(struct lpfc_vport *vport) 4011 { 4012 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4013 struct lpfc_hba *phba = vport->phba; 4014 4015 lpfc_debugfs_terminate(vport); 4016 fc_remove_host(shost); 4017 scsi_remove_host(shost); 4018 4019 spin_lock_irq(&phba->hbalock); 4020 list_del_init(&vport->listentry); 4021 spin_unlock_irq(&phba->hbalock); 4022 4023 lpfc_cleanup(vport); 4024 return; 4025 } 4026 4027 /** 4028 * lpfc_get_instance - Get a unique integer ID 4029 * 4030 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4031 * uses the kernel idr facility to perform the task. 4032 * 4033 * Return codes: 4034 * instance - a unique integer ID allocated as the new instance. 4035 * -1 - lpfc get instance failed. 4036 **/ 4037 int 4038 lpfc_get_instance(void) 4039 { 4040 int ret; 4041 4042 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4043 return ret < 0 ? -1 : ret; 4044 } 4045 4046 /** 4047 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4048 * @shost: pointer to SCSI host data structure. 4049 * @time: elapsed time of the scan in jiffies. 4050 * 4051 * This routine is called by the SCSI layer with a SCSI host to determine 4052 * whether the scan host is finished. 4053 * 4054 * Note: there is no scan_start function as adapter initialization will have 4055 * asynchronously kicked off the link initialization. 4056 * 4057 * Return codes 4058 * 0 - SCSI host scan is not over yet. 4059 * 1 - SCSI host scan is over. 4060 **/ 4061 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4062 { 4063 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4064 struct lpfc_hba *phba = vport->phba; 4065 int stat = 0; 4066 4067 spin_lock_irq(shost->host_lock); 4068 4069 if (vport->load_flag & FC_UNLOADING) { 4070 stat = 1; 4071 goto finished; 4072 } 4073 if (time >= msecs_to_jiffies(30 * 1000)) { 4074 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4075 "0461 Scanning longer than 30 " 4076 "seconds. Continuing initialization\n"); 4077 stat = 1; 4078 goto finished; 4079 } 4080 if (time >= msecs_to_jiffies(15 * 1000) && 4081 phba->link_state <= LPFC_LINK_DOWN) { 4082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4083 "0465 Link down longer than 15 " 4084 "seconds. Continuing initialization\n"); 4085 stat = 1; 4086 goto finished; 4087 } 4088 4089 if (vport->port_state != LPFC_VPORT_READY) 4090 goto finished; 4091 if (vport->num_disc_nodes || vport->fc_prli_sent) 4092 goto finished; 4093 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4094 goto finished; 4095 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4096 goto finished; 4097 4098 stat = 1; 4099 4100 finished: 4101 spin_unlock_irq(shost->host_lock); 4102 return stat; 4103 } 4104 4105 /** 4106 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4107 * @shost: pointer to SCSI host data structure. 4108 * 4109 * This routine initializes a given SCSI host attributes on a FC port. The 4110 * SCSI host can be either on top of a physical port or a virtual port. 4111 **/ 4112 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4113 { 4114 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4115 struct lpfc_hba *phba = vport->phba; 4116 /* 4117 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4118 */ 4119 4120 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4121 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4122 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4123 4124 memset(fc_host_supported_fc4s(shost), 0, 4125 sizeof(fc_host_supported_fc4s(shost))); 4126 fc_host_supported_fc4s(shost)[2] = 1; 4127 fc_host_supported_fc4s(shost)[7] = 1; 4128 4129 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4130 sizeof fc_host_symbolic_name(shost)); 4131 4132 fc_host_supported_speeds(shost) = 0; 4133 if (phba->lmt & LMT_64Gb) 4134 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4135 if (phba->lmt & LMT_32Gb) 4136 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4137 if (phba->lmt & LMT_16Gb) 4138 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4139 if (phba->lmt & LMT_10Gb) 4140 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4141 if (phba->lmt & LMT_8Gb) 4142 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4143 if (phba->lmt & LMT_4Gb) 4144 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4145 if (phba->lmt & LMT_2Gb) 4146 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4147 if (phba->lmt & LMT_1Gb) 4148 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4149 4150 fc_host_maxframe_size(shost) = 4151 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4152 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4153 4154 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4155 4156 /* This value is also unchanging */ 4157 memset(fc_host_active_fc4s(shost), 0, 4158 sizeof(fc_host_active_fc4s(shost))); 4159 fc_host_active_fc4s(shost)[2] = 1; 4160 fc_host_active_fc4s(shost)[7] = 1; 4161 4162 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4163 spin_lock_irq(shost->host_lock); 4164 vport->load_flag &= ~FC_LOADING; 4165 spin_unlock_irq(shost->host_lock); 4166 } 4167 4168 /** 4169 * lpfc_stop_port_s3 - Stop SLI3 device port 4170 * @phba: pointer to lpfc hba data structure. 4171 * 4172 * This routine is invoked to stop an SLI3 device port, it stops the device 4173 * from generating interrupts and stops the device driver's timers for the 4174 * device. 4175 **/ 4176 static void 4177 lpfc_stop_port_s3(struct lpfc_hba *phba) 4178 { 4179 /* Clear all interrupt enable conditions */ 4180 writel(0, phba->HCregaddr); 4181 readl(phba->HCregaddr); /* flush */ 4182 /* Clear all pending interrupts */ 4183 writel(0xffffffff, phba->HAregaddr); 4184 readl(phba->HAregaddr); /* flush */ 4185 4186 /* Reset some HBA SLI setup states */ 4187 lpfc_stop_hba_timers(phba); 4188 phba->pport->work_port_events = 0; 4189 } 4190 4191 /** 4192 * lpfc_stop_port_s4 - Stop SLI4 device port 4193 * @phba: pointer to lpfc hba data structure. 4194 * 4195 * This routine is invoked to stop an SLI4 device port, it stops the device 4196 * from generating interrupts and stops the device driver's timers for the 4197 * device. 4198 **/ 4199 static void 4200 lpfc_stop_port_s4(struct lpfc_hba *phba) 4201 { 4202 /* Reset some HBA SLI4 setup states */ 4203 lpfc_stop_hba_timers(phba); 4204 phba->pport->work_port_events = 0; 4205 phba->sli4_hba.intr_enable = 0; 4206 } 4207 4208 /** 4209 * lpfc_stop_port - Wrapper function for stopping hba port 4210 * @phba: Pointer to HBA context object. 4211 * 4212 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4213 * the API jump table function pointer from the lpfc_hba struct. 4214 **/ 4215 void 4216 lpfc_stop_port(struct lpfc_hba *phba) 4217 { 4218 phba->lpfc_stop_port(phba); 4219 4220 if (phba->wq) 4221 flush_workqueue(phba->wq); 4222 } 4223 4224 /** 4225 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4226 * @phba: Pointer to hba for which this call is being executed. 4227 * 4228 * This routine starts the timer waiting for the FCF rediscovery to complete. 4229 **/ 4230 void 4231 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4232 { 4233 unsigned long fcf_redisc_wait_tmo = 4234 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4235 /* Start fcf rediscovery wait period timer */ 4236 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4237 spin_lock_irq(&phba->hbalock); 4238 /* Allow action to new fcf asynchronous event */ 4239 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4240 /* Mark the FCF rediscovery pending state */ 4241 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4242 spin_unlock_irq(&phba->hbalock); 4243 } 4244 4245 /** 4246 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4247 * @ptr: Map to lpfc_hba data structure pointer. 4248 * 4249 * This routine is invoked when waiting for FCF table rediscover has been 4250 * timed out. If new FCF record(s) has (have) been discovered during the 4251 * wait period, a new FCF event shall be added to the FCOE async event 4252 * list, and then worker thread shall be waked up for processing from the 4253 * worker thread context. 4254 **/ 4255 static void 4256 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4257 { 4258 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4259 4260 /* Don't send FCF rediscovery event if timer cancelled */ 4261 spin_lock_irq(&phba->hbalock); 4262 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4263 spin_unlock_irq(&phba->hbalock); 4264 return; 4265 } 4266 /* Clear FCF rediscovery timer pending flag */ 4267 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4268 /* FCF rediscovery event to worker thread */ 4269 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4270 spin_unlock_irq(&phba->hbalock); 4271 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4272 "2776 FCF rediscover quiescent timer expired\n"); 4273 /* wake up worker thread */ 4274 lpfc_worker_wake_up(phba); 4275 } 4276 4277 /** 4278 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4279 * @phba: pointer to lpfc hba data structure. 4280 * @acqe_link: pointer to the async link completion queue entry. 4281 * 4282 * This routine is to parse the SLI4 link-attention link fault code. 4283 **/ 4284 static void 4285 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4286 struct lpfc_acqe_link *acqe_link) 4287 { 4288 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4289 case LPFC_ASYNC_LINK_FAULT_NONE: 4290 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4291 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4292 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4293 break; 4294 default: 4295 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4296 "0398 Unknown link fault code: x%x\n", 4297 bf_get(lpfc_acqe_link_fault, acqe_link)); 4298 break; 4299 } 4300 } 4301 4302 /** 4303 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4304 * @phba: pointer to lpfc hba data structure. 4305 * @acqe_link: pointer to the async link completion queue entry. 4306 * 4307 * This routine is to parse the SLI4 link attention type and translate it 4308 * into the base driver's link attention type coding. 4309 * 4310 * Return: Link attention type in terms of base driver's coding. 4311 **/ 4312 static uint8_t 4313 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4314 struct lpfc_acqe_link *acqe_link) 4315 { 4316 uint8_t att_type; 4317 4318 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4319 case LPFC_ASYNC_LINK_STATUS_DOWN: 4320 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4321 att_type = LPFC_ATT_LINK_DOWN; 4322 break; 4323 case LPFC_ASYNC_LINK_STATUS_UP: 4324 /* Ignore physical link up events - wait for logical link up */ 4325 att_type = LPFC_ATT_RESERVED; 4326 break; 4327 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4328 att_type = LPFC_ATT_LINK_UP; 4329 break; 4330 default: 4331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4332 "0399 Invalid link attention type: x%x\n", 4333 bf_get(lpfc_acqe_link_status, acqe_link)); 4334 att_type = LPFC_ATT_RESERVED; 4335 break; 4336 } 4337 return att_type; 4338 } 4339 4340 /** 4341 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4342 * @phba: pointer to lpfc hba data structure. 4343 * 4344 * This routine is to get an SLI3 FC port's link speed in Mbps. 4345 * 4346 * Return: link speed in terms of Mbps. 4347 **/ 4348 uint32_t 4349 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4350 { 4351 uint32_t link_speed; 4352 4353 if (!lpfc_is_link_up(phba)) 4354 return 0; 4355 4356 if (phba->sli_rev <= LPFC_SLI_REV3) { 4357 switch (phba->fc_linkspeed) { 4358 case LPFC_LINK_SPEED_1GHZ: 4359 link_speed = 1000; 4360 break; 4361 case LPFC_LINK_SPEED_2GHZ: 4362 link_speed = 2000; 4363 break; 4364 case LPFC_LINK_SPEED_4GHZ: 4365 link_speed = 4000; 4366 break; 4367 case LPFC_LINK_SPEED_8GHZ: 4368 link_speed = 8000; 4369 break; 4370 case LPFC_LINK_SPEED_10GHZ: 4371 link_speed = 10000; 4372 break; 4373 case LPFC_LINK_SPEED_16GHZ: 4374 link_speed = 16000; 4375 break; 4376 default: 4377 link_speed = 0; 4378 } 4379 } else { 4380 if (phba->sli4_hba.link_state.logical_speed) 4381 link_speed = 4382 phba->sli4_hba.link_state.logical_speed; 4383 else 4384 link_speed = phba->sli4_hba.link_state.speed; 4385 } 4386 return link_speed; 4387 } 4388 4389 /** 4390 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4391 * @phba: pointer to lpfc hba data structure. 4392 * @evt_code: asynchronous event code. 4393 * @speed_code: asynchronous event link speed code. 4394 * 4395 * This routine is to parse the giving SLI4 async event link speed code into 4396 * value of Mbps for the link speed. 4397 * 4398 * Return: link speed in terms of Mbps. 4399 **/ 4400 static uint32_t 4401 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4402 uint8_t speed_code) 4403 { 4404 uint32_t port_speed; 4405 4406 switch (evt_code) { 4407 case LPFC_TRAILER_CODE_LINK: 4408 switch (speed_code) { 4409 case LPFC_ASYNC_LINK_SPEED_ZERO: 4410 port_speed = 0; 4411 break; 4412 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4413 port_speed = 10; 4414 break; 4415 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4416 port_speed = 100; 4417 break; 4418 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4419 port_speed = 1000; 4420 break; 4421 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4422 port_speed = 10000; 4423 break; 4424 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4425 port_speed = 20000; 4426 break; 4427 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4428 port_speed = 25000; 4429 break; 4430 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4431 port_speed = 40000; 4432 break; 4433 default: 4434 port_speed = 0; 4435 } 4436 break; 4437 case LPFC_TRAILER_CODE_FC: 4438 switch (speed_code) { 4439 case LPFC_FC_LA_SPEED_UNKNOWN: 4440 port_speed = 0; 4441 break; 4442 case LPFC_FC_LA_SPEED_1G: 4443 port_speed = 1000; 4444 break; 4445 case LPFC_FC_LA_SPEED_2G: 4446 port_speed = 2000; 4447 break; 4448 case LPFC_FC_LA_SPEED_4G: 4449 port_speed = 4000; 4450 break; 4451 case LPFC_FC_LA_SPEED_8G: 4452 port_speed = 8000; 4453 break; 4454 case LPFC_FC_LA_SPEED_10G: 4455 port_speed = 10000; 4456 break; 4457 case LPFC_FC_LA_SPEED_16G: 4458 port_speed = 16000; 4459 break; 4460 case LPFC_FC_LA_SPEED_32G: 4461 port_speed = 32000; 4462 break; 4463 case LPFC_FC_LA_SPEED_64G: 4464 port_speed = 64000; 4465 break; 4466 default: 4467 port_speed = 0; 4468 } 4469 break; 4470 default: 4471 port_speed = 0; 4472 } 4473 return port_speed; 4474 } 4475 4476 /** 4477 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4478 * @phba: pointer to lpfc hba data structure. 4479 * @acqe_link: pointer to the async link completion queue entry. 4480 * 4481 * This routine is to handle the SLI4 asynchronous FCoE link event. 4482 **/ 4483 static void 4484 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4485 struct lpfc_acqe_link *acqe_link) 4486 { 4487 struct lpfc_dmabuf *mp; 4488 LPFC_MBOXQ_t *pmb; 4489 MAILBOX_t *mb; 4490 struct lpfc_mbx_read_top *la; 4491 uint8_t att_type; 4492 int rc; 4493 4494 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4495 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4496 return; 4497 phba->fcoe_eventtag = acqe_link->event_tag; 4498 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4499 if (!pmb) { 4500 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4501 "0395 The mboxq allocation failed\n"); 4502 return; 4503 } 4504 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4505 if (!mp) { 4506 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4507 "0396 The lpfc_dmabuf allocation failed\n"); 4508 goto out_free_pmb; 4509 } 4510 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4511 if (!mp->virt) { 4512 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4513 "0397 The mbuf allocation failed\n"); 4514 goto out_free_dmabuf; 4515 } 4516 4517 /* Cleanup any outstanding ELS commands */ 4518 lpfc_els_flush_all_cmd(phba); 4519 4520 /* Block ELS IOCBs until we have done process link event */ 4521 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4522 4523 /* Update link event statistics */ 4524 phba->sli.slistat.link_event++; 4525 4526 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4527 lpfc_read_topology(phba, pmb, mp); 4528 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4529 pmb->vport = phba->pport; 4530 4531 /* Keep the link status for extra SLI4 state machine reference */ 4532 phba->sli4_hba.link_state.speed = 4533 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4534 bf_get(lpfc_acqe_link_speed, acqe_link)); 4535 phba->sli4_hba.link_state.duplex = 4536 bf_get(lpfc_acqe_link_duplex, acqe_link); 4537 phba->sli4_hba.link_state.status = 4538 bf_get(lpfc_acqe_link_status, acqe_link); 4539 phba->sli4_hba.link_state.type = 4540 bf_get(lpfc_acqe_link_type, acqe_link); 4541 phba->sli4_hba.link_state.number = 4542 bf_get(lpfc_acqe_link_number, acqe_link); 4543 phba->sli4_hba.link_state.fault = 4544 bf_get(lpfc_acqe_link_fault, acqe_link); 4545 phba->sli4_hba.link_state.logical_speed = 4546 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4547 4548 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4549 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4550 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4551 "Logical speed:%dMbps Fault:%d\n", 4552 phba->sli4_hba.link_state.speed, 4553 phba->sli4_hba.link_state.topology, 4554 phba->sli4_hba.link_state.status, 4555 phba->sli4_hba.link_state.type, 4556 phba->sli4_hba.link_state.number, 4557 phba->sli4_hba.link_state.logical_speed, 4558 phba->sli4_hba.link_state.fault); 4559 /* 4560 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4561 * topology info. Note: Optional for non FC-AL ports. 4562 */ 4563 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4564 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4565 if (rc == MBX_NOT_FINISHED) 4566 goto out_free_dmabuf; 4567 return; 4568 } 4569 /* 4570 * For FCoE Mode: fill in all the topology information we need and call 4571 * the READ_TOPOLOGY completion routine to continue without actually 4572 * sending the READ_TOPOLOGY mailbox command to the port. 4573 */ 4574 /* Initialize completion status */ 4575 mb = &pmb->u.mb; 4576 mb->mbxStatus = MBX_SUCCESS; 4577 4578 /* Parse port fault information field */ 4579 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4580 4581 /* Parse and translate link attention fields */ 4582 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4583 la->eventTag = acqe_link->event_tag; 4584 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4585 bf_set(lpfc_mbx_read_top_link_spd, la, 4586 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4587 4588 /* Fake the the following irrelvant fields */ 4589 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4590 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4591 bf_set(lpfc_mbx_read_top_il, la, 0); 4592 bf_set(lpfc_mbx_read_top_pb, la, 0); 4593 bf_set(lpfc_mbx_read_top_fa, la, 0); 4594 bf_set(lpfc_mbx_read_top_mm, la, 0); 4595 4596 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4597 lpfc_mbx_cmpl_read_topology(phba, pmb); 4598 4599 return; 4600 4601 out_free_dmabuf: 4602 kfree(mp); 4603 out_free_pmb: 4604 mempool_free(pmb, phba->mbox_mem_pool); 4605 } 4606 4607 /** 4608 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4609 * @phba: pointer to lpfc hba data structure. 4610 * @acqe_fc: pointer to the async fc completion queue entry. 4611 * 4612 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4613 * that the event was received and then issue a read_topology mailbox command so 4614 * that the rest of the driver will treat it the same as SLI3. 4615 **/ 4616 static void 4617 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4618 { 4619 struct lpfc_dmabuf *mp; 4620 LPFC_MBOXQ_t *pmb; 4621 MAILBOX_t *mb; 4622 struct lpfc_mbx_read_top *la; 4623 int rc; 4624 4625 if (bf_get(lpfc_trailer_type, acqe_fc) != 4626 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4627 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4628 "2895 Non FC link Event detected.(%d)\n", 4629 bf_get(lpfc_trailer_type, acqe_fc)); 4630 return; 4631 } 4632 /* Keep the link status for extra SLI4 state machine reference */ 4633 phba->sli4_hba.link_state.speed = 4634 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4635 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4636 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4637 phba->sli4_hba.link_state.topology = 4638 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4639 phba->sli4_hba.link_state.status = 4640 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4641 phba->sli4_hba.link_state.type = 4642 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4643 phba->sli4_hba.link_state.number = 4644 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4645 phba->sli4_hba.link_state.fault = 4646 bf_get(lpfc_acqe_link_fault, acqe_fc); 4647 phba->sli4_hba.link_state.logical_speed = 4648 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4649 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4650 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4651 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4652 "%dMbps Fault:%d\n", 4653 phba->sli4_hba.link_state.speed, 4654 phba->sli4_hba.link_state.topology, 4655 phba->sli4_hba.link_state.status, 4656 phba->sli4_hba.link_state.type, 4657 phba->sli4_hba.link_state.number, 4658 phba->sli4_hba.link_state.logical_speed, 4659 phba->sli4_hba.link_state.fault); 4660 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4661 if (!pmb) { 4662 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4663 "2897 The mboxq allocation failed\n"); 4664 return; 4665 } 4666 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4667 if (!mp) { 4668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4669 "2898 The lpfc_dmabuf allocation failed\n"); 4670 goto out_free_pmb; 4671 } 4672 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4673 if (!mp->virt) { 4674 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4675 "2899 The mbuf allocation failed\n"); 4676 goto out_free_dmabuf; 4677 } 4678 4679 /* Cleanup any outstanding ELS commands */ 4680 lpfc_els_flush_all_cmd(phba); 4681 4682 /* Block ELS IOCBs until we have done process link event */ 4683 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4684 4685 /* Update link event statistics */ 4686 phba->sli.slistat.link_event++; 4687 4688 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4689 lpfc_read_topology(phba, pmb, mp); 4690 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4691 pmb->vport = phba->pport; 4692 4693 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4694 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 4695 4696 switch (phba->sli4_hba.link_state.status) { 4697 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 4698 phba->link_flag |= LS_MDS_LINK_DOWN; 4699 break; 4700 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 4701 phba->link_flag |= LS_MDS_LOOPBACK; 4702 break; 4703 default: 4704 break; 4705 } 4706 4707 /* Initialize completion status */ 4708 mb = &pmb->u.mb; 4709 mb->mbxStatus = MBX_SUCCESS; 4710 4711 /* Parse port fault information field */ 4712 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 4713 4714 /* Parse and translate link attention fields */ 4715 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4716 la->eventTag = acqe_fc->event_tag; 4717 4718 if (phba->sli4_hba.link_state.status == 4719 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 4720 bf_set(lpfc_mbx_read_top_att_type, la, 4721 LPFC_FC_LA_TYPE_UNEXP_WWPN); 4722 } else { 4723 bf_set(lpfc_mbx_read_top_att_type, la, 4724 LPFC_FC_LA_TYPE_LINK_DOWN); 4725 } 4726 /* Invoke the mailbox command callback function */ 4727 lpfc_mbx_cmpl_read_topology(phba, pmb); 4728 4729 return; 4730 } 4731 4732 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4733 if (rc == MBX_NOT_FINISHED) 4734 goto out_free_dmabuf; 4735 return; 4736 4737 out_free_dmabuf: 4738 kfree(mp); 4739 out_free_pmb: 4740 mempool_free(pmb, phba->mbox_mem_pool); 4741 } 4742 4743 /** 4744 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4745 * @phba: pointer to lpfc hba data structure. 4746 * @acqe_fc: pointer to the async SLI completion queue entry. 4747 * 4748 * This routine is to handle the SLI4 asynchronous SLI events. 4749 **/ 4750 static void 4751 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4752 { 4753 char port_name; 4754 char message[128]; 4755 uint8_t status; 4756 uint8_t evt_type; 4757 uint8_t operational = 0; 4758 struct temp_event temp_event_data; 4759 struct lpfc_acqe_misconfigured_event *misconfigured; 4760 struct Scsi_Host *shost; 4761 4762 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4763 4764 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4765 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4766 "x%08x SLI Event Type:%d\n", 4767 acqe_sli->event_data1, acqe_sli->event_data2, 4768 evt_type); 4769 4770 port_name = phba->Port[0]; 4771 if (port_name == 0x00) 4772 port_name = '?'; /* get port name is empty */ 4773 4774 switch (evt_type) { 4775 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4776 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4777 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4778 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4779 4780 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4781 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4782 acqe_sli->event_data1, port_name); 4783 4784 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 4785 shost = lpfc_shost_from_vport(phba->pport); 4786 fc_host_post_vendor_event(shost, fc_get_event_number(), 4787 sizeof(temp_event_data), 4788 (char *)&temp_event_data, 4789 SCSI_NL_VID_TYPE_PCI 4790 | PCI_VENDOR_ID_EMULEX); 4791 break; 4792 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4793 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4794 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4795 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4796 4797 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4798 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4799 acqe_sli->event_data1, port_name); 4800 4801 shost = lpfc_shost_from_vport(phba->pport); 4802 fc_host_post_vendor_event(shost, fc_get_event_number(), 4803 sizeof(temp_event_data), 4804 (char *)&temp_event_data, 4805 SCSI_NL_VID_TYPE_PCI 4806 | PCI_VENDOR_ID_EMULEX); 4807 break; 4808 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4809 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4810 &acqe_sli->event_data1; 4811 4812 /* fetch the status for this port */ 4813 switch (phba->sli4_hba.lnk_info.lnk_no) { 4814 case LPFC_LINK_NUMBER_0: 4815 status = bf_get(lpfc_sli_misconfigured_port0_state, 4816 &misconfigured->theEvent); 4817 operational = bf_get(lpfc_sli_misconfigured_port0_op, 4818 &misconfigured->theEvent); 4819 break; 4820 case LPFC_LINK_NUMBER_1: 4821 status = bf_get(lpfc_sli_misconfigured_port1_state, 4822 &misconfigured->theEvent); 4823 operational = bf_get(lpfc_sli_misconfigured_port1_op, 4824 &misconfigured->theEvent); 4825 break; 4826 case LPFC_LINK_NUMBER_2: 4827 status = bf_get(lpfc_sli_misconfigured_port2_state, 4828 &misconfigured->theEvent); 4829 operational = bf_get(lpfc_sli_misconfigured_port2_op, 4830 &misconfigured->theEvent); 4831 break; 4832 case LPFC_LINK_NUMBER_3: 4833 status = bf_get(lpfc_sli_misconfigured_port3_state, 4834 &misconfigured->theEvent); 4835 operational = bf_get(lpfc_sli_misconfigured_port3_op, 4836 &misconfigured->theEvent); 4837 break; 4838 default: 4839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4840 "3296 " 4841 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 4842 "event: Invalid link %d", 4843 phba->sli4_hba.lnk_info.lnk_no); 4844 return; 4845 } 4846 4847 /* Skip if optic state unchanged */ 4848 if (phba->sli4_hba.lnk_info.optic_state == status) 4849 return; 4850 4851 switch (status) { 4852 case LPFC_SLI_EVENT_STATUS_VALID: 4853 sprintf(message, "Physical Link is functional"); 4854 break; 4855 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4856 sprintf(message, "Optics faulted/incorrectly " 4857 "installed/not installed - Reseat optics, " 4858 "if issue not resolved, replace."); 4859 break; 4860 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4861 sprintf(message, 4862 "Optics of two types installed - Remove one " 4863 "optic or install matching pair of optics."); 4864 break; 4865 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4866 sprintf(message, "Incompatible optics - Replace with " 4867 "compatible optics for card to function."); 4868 break; 4869 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 4870 sprintf(message, "Unqualified optics - Replace with " 4871 "Avago optics for Warranty and Technical " 4872 "Support - Link is%s operational", 4873 (operational) ? " not" : ""); 4874 break; 4875 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 4876 sprintf(message, "Uncertified optics - Replace with " 4877 "Avago-certified optics to enable link " 4878 "operation - Link is%s operational", 4879 (operational) ? " not" : ""); 4880 break; 4881 default: 4882 /* firmware is reporting a status we don't know about */ 4883 sprintf(message, "Unknown event status x%02x", status); 4884 break; 4885 } 4886 phba->sli4_hba.lnk_info.optic_state = status; 4887 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4888 "3176 Port Name %c %s\n", port_name, message); 4889 break; 4890 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4892 "3192 Remote DPort Test Initiated - " 4893 "Event Data1:x%08x Event Data2: x%08x\n", 4894 acqe_sli->event_data1, acqe_sli->event_data2); 4895 break; 4896 default: 4897 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4898 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4899 "x%08x SLI Event Type:%d\n", 4900 acqe_sli->event_data1, acqe_sli->event_data2, 4901 evt_type); 4902 break; 4903 } 4904 } 4905 4906 /** 4907 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4908 * @vport: pointer to vport data structure. 4909 * 4910 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4911 * response to a CVL event. 4912 * 4913 * Return the pointer to the ndlp with the vport if successful, otherwise 4914 * return NULL. 4915 **/ 4916 static struct lpfc_nodelist * 4917 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4918 { 4919 struct lpfc_nodelist *ndlp; 4920 struct Scsi_Host *shost; 4921 struct lpfc_hba *phba; 4922 4923 if (!vport) 4924 return NULL; 4925 phba = vport->phba; 4926 if (!phba) 4927 return NULL; 4928 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4929 if (!ndlp) { 4930 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4931 ndlp = lpfc_nlp_init(vport, Fabric_DID); 4932 if (!ndlp) 4933 return 0; 4934 /* Set the node type */ 4935 ndlp->nlp_type |= NLP_FABRIC; 4936 /* Put ndlp onto node list */ 4937 lpfc_enqueue_node(vport, ndlp); 4938 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4939 /* re-setup ndlp without removing from node list */ 4940 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4941 if (!ndlp) 4942 return 0; 4943 } 4944 if ((phba->pport->port_state < LPFC_FLOGI) && 4945 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4946 return NULL; 4947 /* If virtual link is not yet instantiated ignore CVL */ 4948 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4949 && (vport->port_state != LPFC_VPORT_FAILED)) 4950 return NULL; 4951 shost = lpfc_shost_from_vport(vport); 4952 if (!shost) 4953 return NULL; 4954 lpfc_linkdown_port(vport); 4955 lpfc_cleanup_pending_mbox(vport); 4956 spin_lock_irq(shost->host_lock); 4957 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4958 spin_unlock_irq(shost->host_lock); 4959 4960 return ndlp; 4961 } 4962 4963 /** 4964 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4965 * @vport: pointer to lpfc hba data structure. 4966 * 4967 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4968 * response to a FCF dead event. 4969 **/ 4970 static void 4971 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4972 { 4973 struct lpfc_vport **vports; 4974 int i; 4975 4976 vports = lpfc_create_vport_work_array(phba); 4977 if (vports) 4978 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4979 lpfc_sli4_perform_vport_cvl(vports[i]); 4980 lpfc_destroy_vport_work_array(phba, vports); 4981 } 4982 4983 /** 4984 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4985 * @phba: pointer to lpfc hba data structure. 4986 * @acqe_link: pointer to the async fcoe completion queue entry. 4987 * 4988 * This routine is to handle the SLI4 asynchronous fcoe event. 4989 **/ 4990 static void 4991 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4992 struct lpfc_acqe_fip *acqe_fip) 4993 { 4994 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4995 int rc; 4996 struct lpfc_vport *vport; 4997 struct lpfc_nodelist *ndlp; 4998 struct Scsi_Host *shost; 4999 int active_vlink_present; 5000 struct lpfc_vport **vports; 5001 int i; 5002 5003 phba->fc_eventTag = acqe_fip->event_tag; 5004 phba->fcoe_eventtag = acqe_fip->event_tag; 5005 switch (event_type) { 5006 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5007 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5008 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5009 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5010 LOG_DISCOVERY, 5011 "2546 New FCF event, evt_tag:x%x, " 5012 "index:x%x\n", 5013 acqe_fip->event_tag, 5014 acqe_fip->index); 5015 else 5016 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5017 LOG_DISCOVERY, 5018 "2788 FCF param modified event, " 5019 "evt_tag:x%x, index:x%x\n", 5020 acqe_fip->event_tag, 5021 acqe_fip->index); 5022 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5023 /* 5024 * During period of FCF discovery, read the FCF 5025 * table record indexed by the event to update 5026 * FCF roundrobin failover eligible FCF bmask. 5027 */ 5028 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5029 LOG_DISCOVERY, 5030 "2779 Read FCF (x%x) for updating " 5031 "roundrobin FCF failover bmask\n", 5032 acqe_fip->index); 5033 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5034 } 5035 5036 /* If the FCF discovery is in progress, do nothing. */ 5037 spin_lock_irq(&phba->hbalock); 5038 if (phba->hba_flag & FCF_TS_INPROG) { 5039 spin_unlock_irq(&phba->hbalock); 5040 break; 5041 } 5042 /* If fast FCF failover rescan event is pending, do nothing */ 5043 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 5044 spin_unlock_irq(&phba->hbalock); 5045 break; 5046 } 5047 5048 /* If the FCF has been in discovered state, do nothing. */ 5049 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5050 spin_unlock_irq(&phba->hbalock); 5051 break; 5052 } 5053 spin_unlock_irq(&phba->hbalock); 5054 5055 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5056 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5057 "2770 Start FCF table scan per async FCF " 5058 "event, evt_tag:x%x, index:x%x\n", 5059 acqe_fip->event_tag, acqe_fip->index); 5060 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5061 LPFC_FCOE_FCF_GET_FIRST); 5062 if (rc) 5063 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5064 "2547 Issue FCF scan read FCF mailbox " 5065 "command failed (x%x)\n", rc); 5066 break; 5067 5068 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5070 "2548 FCF Table full count 0x%x tag 0x%x\n", 5071 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5072 acqe_fip->event_tag); 5073 break; 5074 5075 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5076 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5077 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5078 "2549 FCF (x%x) disconnected from network, " 5079 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5080 /* 5081 * If we are in the middle of FCF failover process, clear 5082 * the corresponding FCF bit in the roundrobin bitmap. 5083 */ 5084 spin_lock_irq(&phba->hbalock); 5085 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5086 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5087 spin_unlock_irq(&phba->hbalock); 5088 /* Update FLOGI FCF failover eligible FCF bmask */ 5089 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5090 break; 5091 } 5092 spin_unlock_irq(&phba->hbalock); 5093 5094 /* If the event is not for currently used fcf do nothing */ 5095 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5096 break; 5097 5098 /* 5099 * Otherwise, request the port to rediscover the entire FCF 5100 * table for a fast recovery from case that the current FCF 5101 * is no longer valid as we are not in the middle of FCF 5102 * failover process already. 5103 */ 5104 spin_lock_irq(&phba->hbalock); 5105 /* Mark the fast failover process in progress */ 5106 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5107 spin_unlock_irq(&phba->hbalock); 5108 5109 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5110 "2771 Start FCF fast failover process due to " 5111 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5112 "\n", acqe_fip->event_tag, acqe_fip->index); 5113 rc = lpfc_sli4_redisc_fcf_table(phba); 5114 if (rc) { 5115 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5116 LOG_DISCOVERY, 5117 "2772 Issue FCF rediscover mailbox " 5118 "command failed, fail through to FCF " 5119 "dead event\n"); 5120 spin_lock_irq(&phba->hbalock); 5121 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5122 spin_unlock_irq(&phba->hbalock); 5123 /* 5124 * Last resort will fail over by treating this 5125 * as a link down to FCF registration. 5126 */ 5127 lpfc_sli4_fcf_dead_failthrough(phba); 5128 } else { 5129 /* Reset FCF roundrobin bmask for new discovery */ 5130 lpfc_sli4_clear_fcf_rr_bmask(phba); 5131 /* 5132 * Handling fast FCF failover to a DEAD FCF event is 5133 * considered equalivant to receiving CVL to all vports. 5134 */ 5135 lpfc_sli4_perform_all_vport_cvl(phba); 5136 } 5137 break; 5138 case LPFC_FIP_EVENT_TYPE_CVL: 5139 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5140 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5141 "2718 Clear Virtual Link Received for VPI 0x%x" 5142 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5143 5144 vport = lpfc_find_vport_by_vpid(phba, 5145 acqe_fip->index); 5146 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5147 if (!ndlp) 5148 break; 5149 active_vlink_present = 0; 5150 5151 vports = lpfc_create_vport_work_array(phba); 5152 if (vports) { 5153 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5154 i++) { 5155 if ((!(vports[i]->fc_flag & 5156 FC_VPORT_CVL_RCVD)) && 5157 (vports[i]->port_state > LPFC_FDISC)) { 5158 active_vlink_present = 1; 5159 break; 5160 } 5161 } 5162 lpfc_destroy_vport_work_array(phba, vports); 5163 } 5164 5165 /* 5166 * Don't re-instantiate if vport is marked for deletion. 5167 * If we are here first then vport_delete is going to wait 5168 * for discovery to complete. 5169 */ 5170 if (!(vport->load_flag & FC_UNLOADING) && 5171 active_vlink_present) { 5172 /* 5173 * If there are other active VLinks present, 5174 * re-instantiate the Vlink using FDISC. 5175 */ 5176 mod_timer(&ndlp->nlp_delayfunc, 5177 jiffies + msecs_to_jiffies(1000)); 5178 shost = lpfc_shost_from_vport(vport); 5179 spin_lock_irq(shost->host_lock); 5180 ndlp->nlp_flag |= NLP_DELAY_TMO; 5181 spin_unlock_irq(shost->host_lock); 5182 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5183 vport->port_state = LPFC_FDISC; 5184 } else { 5185 /* 5186 * Otherwise, we request port to rediscover 5187 * the entire FCF table for a fast recovery 5188 * from possible case that the current FCF 5189 * is no longer valid if we are not already 5190 * in the FCF failover process. 5191 */ 5192 spin_lock_irq(&phba->hbalock); 5193 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5194 spin_unlock_irq(&phba->hbalock); 5195 break; 5196 } 5197 /* Mark the fast failover process in progress */ 5198 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5199 spin_unlock_irq(&phba->hbalock); 5200 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5201 LOG_DISCOVERY, 5202 "2773 Start FCF failover per CVL, " 5203 "evt_tag:x%x\n", acqe_fip->event_tag); 5204 rc = lpfc_sli4_redisc_fcf_table(phba); 5205 if (rc) { 5206 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5207 LOG_DISCOVERY, 5208 "2774 Issue FCF rediscover " 5209 "mailbox command failed, " 5210 "through to CVL event\n"); 5211 spin_lock_irq(&phba->hbalock); 5212 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5213 spin_unlock_irq(&phba->hbalock); 5214 /* 5215 * Last resort will be re-try on the 5216 * the current registered FCF entry. 5217 */ 5218 lpfc_retry_pport_discovery(phba); 5219 } else 5220 /* 5221 * Reset FCF roundrobin bmask for new 5222 * discovery. 5223 */ 5224 lpfc_sli4_clear_fcf_rr_bmask(phba); 5225 } 5226 break; 5227 default: 5228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5229 "0288 Unknown FCoE event type 0x%x event tag " 5230 "0x%x\n", event_type, acqe_fip->event_tag); 5231 break; 5232 } 5233 } 5234 5235 /** 5236 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5237 * @phba: pointer to lpfc hba data structure. 5238 * @acqe_link: pointer to the async dcbx completion queue entry. 5239 * 5240 * This routine is to handle the SLI4 asynchronous dcbx event. 5241 **/ 5242 static void 5243 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5244 struct lpfc_acqe_dcbx *acqe_dcbx) 5245 { 5246 phba->fc_eventTag = acqe_dcbx->event_tag; 5247 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5248 "0290 The SLI4 DCBX asynchronous event is not " 5249 "handled yet\n"); 5250 } 5251 5252 /** 5253 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5254 * @phba: pointer to lpfc hba data structure. 5255 * @acqe_link: pointer to the async grp5 completion queue entry. 5256 * 5257 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5258 * is an asynchronous notified of a logical link speed change. The Port 5259 * reports the logical link speed in units of 10Mbps. 5260 **/ 5261 static void 5262 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5263 struct lpfc_acqe_grp5 *acqe_grp5) 5264 { 5265 uint16_t prev_ll_spd; 5266 5267 phba->fc_eventTag = acqe_grp5->event_tag; 5268 phba->fcoe_eventtag = acqe_grp5->event_tag; 5269 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5270 phba->sli4_hba.link_state.logical_speed = 5271 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5272 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5273 "2789 GRP5 Async Event: Updating logical link speed " 5274 "from %dMbps to %dMbps\n", prev_ll_spd, 5275 phba->sli4_hba.link_state.logical_speed); 5276 } 5277 5278 /** 5279 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5280 * @phba: pointer to lpfc hba data structure. 5281 * 5282 * This routine is invoked by the worker thread to process all the pending 5283 * SLI4 asynchronous events. 5284 **/ 5285 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5286 { 5287 struct lpfc_cq_event *cq_event; 5288 5289 /* First, declare the async event has been handled */ 5290 spin_lock_irq(&phba->hbalock); 5291 phba->hba_flag &= ~ASYNC_EVENT; 5292 spin_unlock_irq(&phba->hbalock); 5293 /* Now, handle all the async events */ 5294 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5295 /* Get the first event from the head of the event queue */ 5296 spin_lock_irq(&phba->hbalock); 5297 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5298 cq_event, struct lpfc_cq_event, list); 5299 spin_unlock_irq(&phba->hbalock); 5300 /* Process the asynchronous event */ 5301 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5302 case LPFC_TRAILER_CODE_LINK: 5303 lpfc_sli4_async_link_evt(phba, 5304 &cq_event->cqe.acqe_link); 5305 break; 5306 case LPFC_TRAILER_CODE_FCOE: 5307 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5308 break; 5309 case LPFC_TRAILER_CODE_DCBX: 5310 lpfc_sli4_async_dcbx_evt(phba, 5311 &cq_event->cqe.acqe_dcbx); 5312 break; 5313 case LPFC_TRAILER_CODE_GRP5: 5314 lpfc_sli4_async_grp5_evt(phba, 5315 &cq_event->cqe.acqe_grp5); 5316 break; 5317 case LPFC_TRAILER_CODE_FC: 5318 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5319 break; 5320 case LPFC_TRAILER_CODE_SLI: 5321 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5322 break; 5323 default: 5324 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5325 "1804 Invalid asynchrous event code: " 5326 "x%x\n", bf_get(lpfc_trailer_code, 5327 &cq_event->cqe.mcqe_cmpl)); 5328 break; 5329 } 5330 /* Free the completion event processed to the free pool */ 5331 lpfc_sli4_cq_event_release(phba, cq_event); 5332 } 5333 } 5334 5335 /** 5336 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5337 * @phba: pointer to lpfc hba data structure. 5338 * 5339 * This routine is invoked by the worker thread to process FCF table 5340 * rediscovery pending completion event. 5341 **/ 5342 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5343 { 5344 int rc; 5345 5346 spin_lock_irq(&phba->hbalock); 5347 /* Clear FCF rediscovery timeout event */ 5348 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5349 /* Clear driver fast failover FCF record flag */ 5350 phba->fcf.failover_rec.flag = 0; 5351 /* Set state for FCF fast failover */ 5352 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5353 spin_unlock_irq(&phba->hbalock); 5354 5355 /* Scan FCF table from the first entry to re-discover SAN */ 5356 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5357 "2777 Start post-quiescent FCF table scan\n"); 5358 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5359 if (rc) 5360 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5361 "2747 Issue FCF scan read FCF mailbox " 5362 "command failed 0x%x\n", rc); 5363 } 5364 5365 /** 5366 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5367 * @phba: pointer to lpfc hba data structure. 5368 * @dev_grp: The HBA PCI-Device group number. 5369 * 5370 * This routine is invoked to set up the per HBA PCI-Device group function 5371 * API jump table entries. 5372 * 5373 * Return: 0 if success, otherwise -ENODEV 5374 **/ 5375 int 5376 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5377 { 5378 int rc; 5379 5380 /* Set up lpfc PCI-device group */ 5381 phba->pci_dev_grp = dev_grp; 5382 5383 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5384 if (dev_grp == LPFC_PCI_DEV_OC) 5385 phba->sli_rev = LPFC_SLI_REV4; 5386 5387 /* Set up device INIT API function jump table */ 5388 rc = lpfc_init_api_table_setup(phba, dev_grp); 5389 if (rc) 5390 return -ENODEV; 5391 /* Set up SCSI API function jump table */ 5392 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5393 if (rc) 5394 return -ENODEV; 5395 /* Set up SLI API function jump table */ 5396 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5397 if (rc) 5398 return -ENODEV; 5399 /* Set up MBOX API function jump table */ 5400 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5401 if (rc) 5402 return -ENODEV; 5403 5404 return 0; 5405 } 5406 5407 /** 5408 * lpfc_log_intr_mode - Log the active interrupt mode 5409 * @phba: pointer to lpfc hba data structure. 5410 * @intr_mode: active interrupt mode adopted. 5411 * 5412 * This routine it invoked to log the currently used active interrupt mode 5413 * to the device. 5414 **/ 5415 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5416 { 5417 switch (intr_mode) { 5418 case 0: 5419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5420 "0470 Enable INTx interrupt mode.\n"); 5421 break; 5422 case 1: 5423 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5424 "0481 Enabled MSI interrupt mode.\n"); 5425 break; 5426 case 2: 5427 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5428 "0480 Enabled MSI-X interrupt mode.\n"); 5429 break; 5430 default: 5431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5432 "0482 Illegal interrupt mode.\n"); 5433 break; 5434 } 5435 return; 5436 } 5437 5438 /** 5439 * lpfc_enable_pci_dev - Enable a generic PCI device. 5440 * @phba: pointer to lpfc hba data structure. 5441 * 5442 * This routine is invoked to enable the PCI device that is common to all 5443 * PCI devices. 5444 * 5445 * Return codes 5446 * 0 - successful 5447 * other values - error 5448 **/ 5449 static int 5450 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5451 { 5452 struct pci_dev *pdev; 5453 5454 /* Obtain PCI device reference */ 5455 if (!phba->pcidev) 5456 goto out_error; 5457 else 5458 pdev = phba->pcidev; 5459 /* Enable PCI device */ 5460 if (pci_enable_device_mem(pdev)) 5461 goto out_error; 5462 /* Request PCI resource for the device */ 5463 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 5464 goto out_disable_device; 5465 /* Set up device as PCI master and save state for EEH */ 5466 pci_set_master(pdev); 5467 pci_try_set_mwi(pdev); 5468 pci_save_state(pdev); 5469 5470 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 5471 if (pci_is_pcie(pdev)) 5472 pdev->needs_freset = 1; 5473 5474 return 0; 5475 5476 out_disable_device: 5477 pci_disable_device(pdev); 5478 out_error: 5479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5480 "1401 Failed to enable pci device\n"); 5481 return -ENODEV; 5482 } 5483 5484 /** 5485 * lpfc_disable_pci_dev - Disable a generic PCI device. 5486 * @phba: pointer to lpfc hba data structure. 5487 * 5488 * This routine is invoked to disable the PCI device that is common to all 5489 * PCI devices. 5490 **/ 5491 static void 5492 lpfc_disable_pci_dev(struct lpfc_hba *phba) 5493 { 5494 struct pci_dev *pdev; 5495 5496 /* Obtain PCI device reference */ 5497 if (!phba->pcidev) 5498 return; 5499 else 5500 pdev = phba->pcidev; 5501 /* Release PCI resource and disable PCI device */ 5502 pci_release_mem_regions(pdev); 5503 pci_disable_device(pdev); 5504 5505 return; 5506 } 5507 5508 /** 5509 * lpfc_reset_hba - Reset a hba 5510 * @phba: pointer to lpfc hba data structure. 5511 * 5512 * This routine is invoked to reset a hba device. It brings the HBA 5513 * offline, performs a board restart, and then brings the board back 5514 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 5515 * on outstanding mailbox commands. 5516 **/ 5517 void 5518 lpfc_reset_hba(struct lpfc_hba *phba) 5519 { 5520 /* If resets are disabled then set error state and return. */ 5521 if (!phba->cfg_enable_hba_reset) { 5522 phba->link_state = LPFC_HBA_ERROR; 5523 return; 5524 } 5525 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 5526 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5527 else 5528 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 5529 lpfc_offline(phba); 5530 lpfc_sli_brdrestart(phba); 5531 lpfc_online(phba); 5532 lpfc_unblock_mgmt_io(phba); 5533 } 5534 5535 /** 5536 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 5537 * @phba: pointer to lpfc hba data structure. 5538 * 5539 * This function enables the PCI SR-IOV virtual functions to a physical 5540 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 5541 * enable the number of virtual functions to the physical function. As 5542 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 5543 * API call does not considered as an error condition for most of the device. 5544 **/ 5545 uint16_t 5546 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 5547 { 5548 struct pci_dev *pdev = phba->pcidev; 5549 uint16_t nr_virtfn; 5550 int pos; 5551 5552 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 5553 if (pos == 0) 5554 return 0; 5555 5556 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 5557 return nr_virtfn; 5558 } 5559 5560 /** 5561 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 5562 * @phba: pointer to lpfc hba data structure. 5563 * @nr_vfn: number of virtual functions to be enabled. 5564 * 5565 * This function enables the PCI SR-IOV virtual functions to a physical 5566 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 5567 * enable the number of virtual functions to the physical function. As 5568 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 5569 * API call does not considered as an error condition for most of the device. 5570 **/ 5571 int 5572 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 5573 { 5574 struct pci_dev *pdev = phba->pcidev; 5575 uint16_t max_nr_vfn; 5576 int rc; 5577 5578 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 5579 if (nr_vfn > max_nr_vfn) { 5580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5581 "3057 Requested vfs (%d) greater than " 5582 "supported vfs (%d)", nr_vfn, max_nr_vfn); 5583 return -EINVAL; 5584 } 5585 5586 rc = pci_enable_sriov(pdev, nr_vfn); 5587 if (rc) { 5588 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5589 "2806 Failed to enable sriov on this device " 5590 "with vfn number nr_vf:%d, rc:%d\n", 5591 nr_vfn, rc); 5592 } else 5593 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5594 "2807 Successful enable sriov on this device " 5595 "with vfn number nr_vf:%d\n", nr_vfn); 5596 return rc; 5597 } 5598 5599 /** 5600 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5601 * @phba: pointer to lpfc hba data structure. 5602 * 5603 * This routine is invoked to set up the driver internal resources before the 5604 * device specific resource setup to support the HBA device it attached to. 5605 * 5606 * Return codes 5607 * 0 - successful 5608 * other values - error 5609 **/ 5610 static int 5611 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5612 { 5613 struct lpfc_sli *psli = &phba->sli; 5614 5615 /* 5616 * Driver resources common to all SLI revisions 5617 */ 5618 atomic_set(&phba->fast_event_count, 0); 5619 spin_lock_init(&phba->hbalock); 5620 5621 /* Initialize ndlp management spinlock */ 5622 spin_lock_init(&phba->ndlp_lock); 5623 5624 INIT_LIST_HEAD(&phba->port_list); 5625 INIT_LIST_HEAD(&phba->work_list); 5626 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5627 5628 /* Initialize the wait queue head for the kernel thread */ 5629 init_waitqueue_head(&phba->work_waitq); 5630 5631 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5632 "1403 Protocols supported %s %s %s\n", 5633 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 5634 "SCSI" : " "), 5635 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 5636 "NVME" : " "), 5637 (phba->nvmet_support ? "NVMET" : " ")); 5638 5639 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 5640 /* Initialize the scsi buffer list used by driver for scsi IO */ 5641 spin_lock_init(&phba->scsi_buf_list_get_lock); 5642 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5643 spin_lock_init(&phba->scsi_buf_list_put_lock); 5644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5645 } 5646 5647 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 5648 (phba->nvmet_support == 0)) { 5649 /* Initialize the NVME buffer list used by driver for NVME IO */ 5650 spin_lock_init(&phba->nvme_buf_list_get_lock); 5651 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); 5652 phba->get_nvme_bufs = 0; 5653 spin_lock_init(&phba->nvme_buf_list_put_lock); 5654 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 5655 phba->put_nvme_bufs = 0; 5656 } 5657 5658 /* Initialize the fabric iocb list */ 5659 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5660 5661 /* Initialize list to save ELS buffers */ 5662 INIT_LIST_HEAD(&phba->elsbuf); 5663 5664 /* Initialize FCF connection rec list */ 5665 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5666 5667 /* Initialize OAS configuration list */ 5668 spin_lock_init(&phba->devicelock); 5669 INIT_LIST_HEAD(&phba->luns); 5670 5671 /* MBOX heartbeat timer */ 5672 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 5673 /* Fabric block timer */ 5674 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 5675 /* EA polling mode timer */ 5676 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 5677 /* Heartbeat timer */ 5678 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 5679 5680 return 0; 5681 } 5682 5683 /** 5684 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 5685 * @phba: pointer to lpfc hba data structure. 5686 * 5687 * This routine is invoked to set up the driver internal resources specific to 5688 * support the SLI-3 HBA device it attached to. 5689 * 5690 * Return codes 5691 * 0 - successful 5692 * other values - error 5693 **/ 5694 static int 5695 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 5696 { 5697 int rc; 5698 5699 /* 5700 * Initialize timers used by driver 5701 */ 5702 5703 /* FCP polling mode timer */ 5704 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 5705 5706 /* Host attention work mask setup */ 5707 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5708 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 5709 5710 /* Get all the module params for configuring this host */ 5711 lpfc_get_cfgparam(phba); 5712 /* Set up phase-1 common device driver resources */ 5713 5714 rc = lpfc_setup_driver_resource_phase1(phba); 5715 if (rc) 5716 return -ENODEV; 5717 5718 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 5719 phba->menlo_flag |= HBA_MENLO_SUPPORT; 5720 /* check for menlo minimum sg count */ 5721 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 5722 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 5723 } 5724 5725 if (!phba->sli.sli3_ring) 5726 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 5727 sizeof(struct lpfc_sli_ring), 5728 GFP_KERNEL); 5729 if (!phba->sli.sli3_ring) 5730 return -ENOMEM; 5731 5732 /* 5733 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5734 * used to create the sg_dma_buf_pool must be dynamically calculated. 5735 */ 5736 5737 /* Initialize the host templates the configured values. */ 5738 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5739 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5740 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5741 5742 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5743 if (phba->cfg_enable_bg) { 5744 /* 5745 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5746 * the FCP rsp, and a BDE for each. Sice we have no control 5747 * over how many protection data segments the SCSI Layer 5748 * will hand us (ie: there could be one for every block 5749 * in the IO), we just allocate enough BDEs to accomidate 5750 * our max amount and we need to limit lpfc_sg_seg_cnt to 5751 * minimize the risk of running out. 5752 */ 5753 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5754 sizeof(struct fcp_rsp) + 5755 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5756 5757 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5758 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5759 5760 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5761 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5762 } else { 5763 /* 5764 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5765 * the FCP rsp, a BDE for each, and a BDE for up to 5766 * cfg_sg_seg_cnt data segments. 5767 */ 5768 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5769 sizeof(struct fcp_rsp) + 5770 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5771 5772 /* Total BDEs in BPL for scsi_sg_list */ 5773 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5774 } 5775 5776 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5777 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5778 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5779 phba->cfg_total_seg_cnt); 5780 5781 phba->max_vpi = LPFC_MAX_VPI; 5782 /* This will be set to correct value after config_port mbox */ 5783 phba->max_vports = 0; 5784 5785 /* 5786 * Initialize the SLI Layer to run with lpfc HBAs. 5787 */ 5788 lpfc_sli_setup(phba); 5789 lpfc_sli_queue_init(phba); 5790 5791 /* Allocate device driver memory */ 5792 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5793 return -ENOMEM; 5794 5795 /* 5796 * Enable sr-iov virtual functions if supported and configured 5797 * through the module parameter. 5798 */ 5799 if (phba->cfg_sriov_nr_virtfn > 0) { 5800 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5801 phba->cfg_sriov_nr_virtfn); 5802 if (rc) { 5803 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5804 "2808 Requested number of SR-IOV " 5805 "virtual functions (%d) is not " 5806 "supported\n", 5807 phba->cfg_sriov_nr_virtfn); 5808 phba->cfg_sriov_nr_virtfn = 0; 5809 } 5810 } 5811 5812 return 0; 5813 } 5814 5815 /** 5816 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5817 * @phba: pointer to lpfc hba data structure. 5818 * 5819 * This routine is invoked to unset the driver internal resources set up 5820 * specific for supporting the SLI-3 HBA device it attached to. 5821 **/ 5822 static void 5823 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5824 { 5825 /* Free device driver memory allocated */ 5826 lpfc_mem_free_all(phba); 5827 5828 return; 5829 } 5830 5831 /** 5832 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5833 * @phba: pointer to lpfc hba data structure. 5834 * 5835 * This routine is invoked to set up the driver internal resources specific to 5836 * support the SLI-4 HBA device it attached to. 5837 * 5838 * Return codes 5839 * 0 - successful 5840 * other values - error 5841 **/ 5842 static int 5843 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5844 { 5845 LPFC_MBOXQ_t *mboxq; 5846 MAILBOX_t *mb; 5847 int rc, i, max_buf_size; 5848 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5849 struct lpfc_mqe *mqe; 5850 int longs; 5851 int fof_vectors = 0; 5852 int extra; 5853 uint64_t wwn; 5854 u32 if_type; 5855 u32 if_fam; 5856 5857 phba->sli4_hba.num_online_cpu = num_online_cpus(); 5858 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 5859 phba->sli4_hba.curr_disp_cpu = 0; 5860 5861 /* Get all the module params for configuring this host */ 5862 lpfc_get_cfgparam(phba); 5863 5864 /* Set up phase-1 common device driver resources */ 5865 rc = lpfc_setup_driver_resource_phase1(phba); 5866 if (rc) 5867 return -ENODEV; 5868 5869 /* Before proceed, wait for POST done and device ready */ 5870 rc = lpfc_sli4_post_status_check(phba); 5871 if (rc) 5872 return -ENODEV; 5873 5874 /* 5875 * Initialize timers used by driver 5876 */ 5877 5878 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 5879 5880 /* FCF rediscover timer */ 5881 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 5882 5883 /* 5884 * Control structure for handling external multi-buffer mailbox 5885 * command pass-through. 5886 */ 5887 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5888 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5889 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5890 5891 phba->max_vpi = LPFC_MAX_VPI; 5892 5893 /* This will be set to correct value after the read_config mbox */ 5894 phba->max_vports = 0; 5895 5896 /* Program the default value of vlan_id and fc_map */ 5897 phba->valid_vlan = 0; 5898 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5899 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5900 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5901 5902 /* 5903 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5904 * we will associate a new ring, for each EQ/CQ/WQ tuple. 5905 * The WQ create will allocate the ring. 5906 */ 5907 5908 /* 5909 * 1 for cmd, 1 for rsp, NVME adds an extra one 5910 * for boundary conditions in its max_sgl_segment template. 5911 */ 5912 extra = 2; 5913 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 5914 extra++; 5915 5916 /* 5917 * It doesn't matter what family our adapter is in, we are 5918 * limited to 2 Pages, 512 SGEs, for our SGL. 5919 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5920 */ 5921 max_buf_size = (2 * SLI4_PAGE_SIZE); 5922 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra) 5923 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra; 5924 5925 /* 5926 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 5927 * used to create the sg_dma_buf_pool must be calculated. 5928 */ 5929 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5930 /* 5931 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 5932 * the FCP rsp, and a SGE. Sice we have no control 5933 * over how many protection segments the SCSI Layer 5934 * will hand us (ie: there could be one for every block 5935 * in the IO), just allocate enough SGEs to accomidate 5936 * our max amount and we need to limit lpfc_sg_seg_cnt 5937 * to minimize the risk of running out. 5938 */ 5939 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5940 sizeof(struct fcp_rsp) + max_buf_size; 5941 5942 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5943 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5944 5945 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5946 phba->cfg_sg_seg_cnt = 5947 LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5948 } else { 5949 /* 5950 * The scsi_buf for a regular I/O holds the FCP cmnd, 5951 * the FCP rsp, a SGE for each, and a SGE for up to 5952 * cfg_sg_seg_cnt data segments. 5953 */ 5954 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5955 sizeof(struct fcp_rsp) + 5956 ((phba->cfg_sg_seg_cnt + extra) * 5957 sizeof(struct sli4_sge)); 5958 5959 /* Total SGEs for scsi_sg_list */ 5960 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 5961 5962 /* 5963 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 5964 * need to post 1 page for the SGL. 5965 */ 5966 } 5967 5968 /* Initialize the host templates with the updated values. */ 5969 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5970 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5971 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5972 5973 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5974 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5975 else 5976 phba->cfg_sg_dma_buf_size = 5977 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5978 5979 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5980 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5981 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5982 phba->cfg_total_seg_cnt); 5983 5984 /* Initialize buffer queue management fields */ 5985 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 5986 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5987 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5988 5989 /* 5990 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5991 */ 5992 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 5993 /* Initialize the Abort scsi buffer list used by driver */ 5994 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5995 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5996 } 5997 5998 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 5999 /* Initialize the Abort nvme buffer list used by driver */ 6000 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 6001 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 6002 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6003 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6004 } 6005 6006 /* This abort list used by worker thread */ 6007 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6008 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6009 6010 /* 6011 * Initialize driver internal slow-path work queues 6012 */ 6013 6014 /* Driver internel slow-path CQ Event pool */ 6015 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6016 /* Response IOCB work queue list */ 6017 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6018 /* Asynchronous event CQ Event work queue list */ 6019 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6020 /* Fast-path XRI aborted CQ Event work queue list */ 6021 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6022 /* Slow-path XRI aborted CQ Event work queue list */ 6023 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6024 /* Receive queue CQ Event work queue list */ 6025 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6026 6027 /* Initialize extent block lists. */ 6028 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6029 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6030 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6031 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6032 6033 /* Initialize mboxq lists. If the early init routines fail 6034 * these lists need to be correctly initialized. 6035 */ 6036 INIT_LIST_HEAD(&phba->sli.mboxq); 6037 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6038 6039 /* initialize optic_state to 0xFF */ 6040 phba->sli4_hba.lnk_info.optic_state = 0xff; 6041 6042 /* Allocate device driver memory */ 6043 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6044 if (rc) 6045 return -ENOMEM; 6046 6047 /* IF Type 2 ports get initialized now. */ 6048 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6049 LPFC_SLI_INTF_IF_TYPE_2) { 6050 rc = lpfc_pci_function_reset(phba); 6051 if (unlikely(rc)) { 6052 rc = -ENODEV; 6053 goto out_free_mem; 6054 } 6055 phba->temp_sensor_support = 1; 6056 } 6057 6058 /* Create the bootstrap mailbox command */ 6059 rc = lpfc_create_bootstrap_mbox(phba); 6060 if (unlikely(rc)) 6061 goto out_free_mem; 6062 6063 /* Set up the host's endian order with the device. */ 6064 rc = lpfc_setup_endian_order(phba); 6065 if (unlikely(rc)) 6066 goto out_free_bsmbx; 6067 6068 /* Set up the hba's configuration parameters. */ 6069 rc = lpfc_sli4_read_config(phba); 6070 if (unlikely(rc)) 6071 goto out_free_bsmbx; 6072 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6073 if (unlikely(rc)) 6074 goto out_free_bsmbx; 6075 6076 /* IF Type 0 ports get initialized now. */ 6077 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6078 LPFC_SLI_INTF_IF_TYPE_0) { 6079 rc = lpfc_pci_function_reset(phba); 6080 if (unlikely(rc)) 6081 goto out_free_bsmbx; 6082 } 6083 6084 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6085 GFP_KERNEL); 6086 if (!mboxq) { 6087 rc = -ENOMEM; 6088 goto out_free_bsmbx; 6089 } 6090 6091 /* Check for NVMET being configured */ 6092 phba->nvmet_support = 0; 6093 if (lpfc_enable_nvmet_cnt) { 6094 6095 /* First get WWN of HBA instance */ 6096 lpfc_read_nv(phba, mboxq); 6097 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6098 if (rc != MBX_SUCCESS) { 6099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6100 "6016 Mailbox failed , mbxCmd x%x " 6101 "READ_NV, mbxStatus x%x\n", 6102 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6103 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6104 mempool_free(mboxq, phba->mbox_mem_pool); 6105 rc = -EIO; 6106 goto out_free_bsmbx; 6107 } 6108 mb = &mboxq->u.mb; 6109 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6110 sizeof(uint64_t)); 6111 wwn = cpu_to_be64(wwn); 6112 phba->sli4_hba.wwnn.u.name = wwn; 6113 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6114 sizeof(uint64_t)); 6115 /* wwn is WWPN of HBA instance */ 6116 wwn = cpu_to_be64(wwn); 6117 phba->sli4_hba.wwpn.u.name = wwn; 6118 6119 /* Check to see if it matches any module parameter */ 6120 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6121 if (wwn == lpfc_enable_nvmet[i]) { 6122 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6123 if (lpfc_nvmet_mem_alloc(phba)) 6124 break; 6125 6126 phba->nvmet_support = 1; /* a match */ 6127 6128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6129 "6017 NVME Target %016llx\n", 6130 wwn); 6131 #else 6132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6133 "6021 Can't enable NVME Target." 6134 " NVME_TARGET_FC infrastructure" 6135 " is not in kernel\n"); 6136 #endif 6137 break; 6138 } 6139 } 6140 } 6141 6142 lpfc_nvme_mod_param_dep(phba); 6143 6144 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6145 lpfc_supported_pages(mboxq); 6146 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6147 if (!rc) { 6148 mqe = &mboxq->u.mqe; 6149 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6150 LPFC_MAX_SUPPORTED_PAGES); 6151 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6152 switch (pn_page[i]) { 6153 case LPFC_SLI4_PARAMETERS: 6154 phba->sli4_hba.pc_sli4_params.supported = 1; 6155 break; 6156 default: 6157 break; 6158 } 6159 } 6160 /* Read the port's SLI4 Parameters capabilities if supported. */ 6161 if (phba->sli4_hba.pc_sli4_params.supported) 6162 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6163 if (rc) { 6164 mempool_free(mboxq, phba->mbox_mem_pool); 6165 rc = -EIO; 6166 goto out_free_bsmbx; 6167 } 6168 } 6169 6170 /* 6171 * Get sli4 parameters that override parameters from Port capabilities. 6172 * If this call fails, it isn't critical unless the SLI4 parameters come 6173 * back in conflict. 6174 */ 6175 rc = lpfc_get_sli4_parameters(phba, mboxq); 6176 if (rc) { 6177 if_type = bf_get(lpfc_sli_intf_if_type, 6178 &phba->sli4_hba.sli_intf); 6179 if_fam = bf_get(lpfc_sli_intf_sli_family, 6180 &phba->sli4_hba.sli_intf); 6181 if (phba->sli4_hba.extents_in_use && 6182 phba->sli4_hba.rpi_hdrs_in_use) { 6183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6184 "2999 Unsupported SLI4 Parameters " 6185 "Extents and RPI headers enabled.\n"); 6186 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6187 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6188 mempool_free(mboxq, phba->mbox_mem_pool); 6189 rc = -EIO; 6190 goto out_free_bsmbx; 6191 } 6192 } 6193 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6194 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6195 mempool_free(mboxq, phba->mbox_mem_pool); 6196 rc = -EIO; 6197 goto out_free_bsmbx; 6198 } 6199 } 6200 6201 mempool_free(mboxq, phba->mbox_mem_pool); 6202 6203 /* Verify OAS is supported */ 6204 lpfc_sli4_oas_verify(phba); 6205 if (phba->cfg_fof) 6206 fof_vectors = 1; 6207 6208 /* Verify all the SLI4 queues */ 6209 rc = lpfc_sli4_queue_verify(phba); 6210 if (rc) 6211 goto out_free_bsmbx; 6212 6213 /* Create driver internal CQE event pool */ 6214 rc = lpfc_sli4_cq_event_pool_create(phba); 6215 if (rc) 6216 goto out_free_bsmbx; 6217 6218 /* Initialize sgl lists per host */ 6219 lpfc_init_sgl_list(phba); 6220 6221 /* Allocate and initialize active sgl array */ 6222 rc = lpfc_init_active_sgl_array(phba); 6223 if (rc) { 6224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6225 "1430 Failed to initialize sgl list.\n"); 6226 goto out_destroy_cq_event_pool; 6227 } 6228 rc = lpfc_sli4_init_rpi_hdrs(phba); 6229 if (rc) { 6230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6231 "1432 Failed to initialize rpi headers.\n"); 6232 goto out_free_active_sgl; 6233 } 6234 6235 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6236 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6237 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6238 GFP_KERNEL); 6239 if (!phba->fcf.fcf_rr_bmask) { 6240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6241 "2759 Failed allocate memory for FCF round " 6242 "robin failover bmask\n"); 6243 rc = -ENOMEM; 6244 goto out_remove_rpi_hdrs; 6245 } 6246 6247 phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, 6248 sizeof(struct lpfc_hba_eq_hdl), 6249 GFP_KERNEL); 6250 if (!phba->sli4_hba.hba_eq_hdl) { 6251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6252 "2572 Failed allocate memory for " 6253 "fast-path per-EQ handle array\n"); 6254 rc = -ENOMEM; 6255 goto out_free_fcf_rr_bmask; 6256 } 6257 6258 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, 6259 sizeof(struct lpfc_vector_map_info), 6260 GFP_KERNEL); 6261 if (!phba->sli4_hba.cpu_map) { 6262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6263 "3327 Failed allocate memory for msi-x " 6264 "interrupt vector mapping\n"); 6265 rc = -ENOMEM; 6266 goto out_free_hba_eq_hdl; 6267 } 6268 if (lpfc_used_cpu == NULL) { 6269 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), 6270 GFP_KERNEL); 6271 if (!lpfc_used_cpu) { 6272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6273 "3335 Failed allocate memory for msi-x " 6274 "interrupt vector mapping\n"); 6275 kfree(phba->sli4_hba.cpu_map); 6276 rc = -ENOMEM; 6277 goto out_free_hba_eq_hdl; 6278 } 6279 for (i = 0; i < lpfc_present_cpu; i++) 6280 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 6281 } 6282 6283 /* 6284 * Enable sr-iov virtual functions if supported and configured 6285 * through the module parameter. 6286 */ 6287 if (phba->cfg_sriov_nr_virtfn > 0) { 6288 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6289 phba->cfg_sriov_nr_virtfn); 6290 if (rc) { 6291 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6292 "3020 Requested number of SR-IOV " 6293 "virtual functions (%d) is not " 6294 "supported\n", 6295 phba->cfg_sriov_nr_virtfn); 6296 phba->cfg_sriov_nr_virtfn = 0; 6297 } 6298 } 6299 6300 return 0; 6301 6302 out_free_hba_eq_hdl: 6303 kfree(phba->sli4_hba.hba_eq_hdl); 6304 out_free_fcf_rr_bmask: 6305 kfree(phba->fcf.fcf_rr_bmask); 6306 out_remove_rpi_hdrs: 6307 lpfc_sli4_remove_rpi_hdrs(phba); 6308 out_free_active_sgl: 6309 lpfc_free_active_sgl(phba); 6310 out_destroy_cq_event_pool: 6311 lpfc_sli4_cq_event_pool_destroy(phba); 6312 out_free_bsmbx: 6313 lpfc_destroy_bootstrap_mbox(phba); 6314 out_free_mem: 6315 lpfc_mem_free(phba); 6316 return rc; 6317 } 6318 6319 /** 6320 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6321 * @phba: pointer to lpfc hba data structure. 6322 * 6323 * This routine is invoked to unset the driver internal resources set up 6324 * specific for supporting the SLI-4 HBA device it attached to. 6325 **/ 6326 static void 6327 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6328 { 6329 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6330 6331 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6332 kfree(phba->sli4_hba.cpu_map); 6333 phba->sli4_hba.num_present_cpu = 0; 6334 phba->sli4_hba.num_online_cpu = 0; 6335 phba->sli4_hba.curr_disp_cpu = 0; 6336 6337 /* Free memory allocated for fast-path work queue handles */ 6338 kfree(phba->sli4_hba.hba_eq_hdl); 6339 6340 /* Free the allocated rpi headers. */ 6341 lpfc_sli4_remove_rpi_hdrs(phba); 6342 lpfc_sli4_remove_rpis(phba); 6343 6344 /* Free eligible FCF index bmask */ 6345 kfree(phba->fcf.fcf_rr_bmask); 6346 6347 /* Free the ELS sgl list */ 6348 lpfc_free_active_sgl(phba); 6349 lpfc_free_els_sgl_list(phba); 6350 lpfc_free_nvmet_sgl_list(phba); 6351 6352 /* Free the completion queue EQ event pool */ 6353 lpfc_sli4_cq_event_release_all(phba); 6354 lpfc_sli4_cq_event_pool_destroy(phba); 6355 6356 /* Release resource identifiers. */ 6357 lpfc_sli4_dealloc_resource_identifiers(phba); 6358 6359 /* Free the bsmbx region. */ 6360 lpfc_destroy_bootstrap_mbox(phba); 6361 6362 /* Free the SLI Layer memory with SLI4 HBAs */ 6363 lpfc_mem_free_all(phba); 6364 6365 /* Free the current connect table */ 6366 list_for_each_entry_safe(conn_entry, next_conn_entry, 6367 &phba->fcf_conn_rec_list, list) { 6368 list_del_init(&conn_entry->list); 6369 kfree(conn_entry); 6370 } 6371 6372 return; 6373 } 6374 6375 /** 6376 * lpfc_init_api_table_setup - Set up init api function jump table 6377 * @phba: The hba struct for which this call is being executed. 6378 * @dev_grp: The HBA PCI-Device group number. 6379 * 6380 * This routine sets up the device INIT interface API function jump table 6381 * in @phba struct. 6382 * 6383 * Returns: 0 - success, -ENODEV - failure. 6384 **/ 6385 int 6386 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6387 { 6388 phba->lpfc_hba_init_link = lpfc_hba_init_link; 6389 phba->lpfc_hba_down_link = lpfc_hba_down_link; 6390 phba->lpfc_selective_reset = lpfc_selective_reset; 6391 switch (dev_grp) { 6392 case LPFC_PCI_DEV_LP: 6393 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 6394 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 6395 phba->lpfc_stop_port = lpfc_stop_port_s3; 6396 break; 6397 case LPFC_PCI_DEV_OC: 6398 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 6399 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 6400 phba->lpfc_stop_port = lpfc_stop_port_s4; 6401 break; 6402 default: 6403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6404 "1431 Invalid HBA PCI-device group: 0x%x\n", 6405 dev_grp); 6406 return -ENODEV; 6407 break; 6408 } 6409 return 0; 6410 } 6411 6412 /** 6413 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 6414 * @phba: pointer to lpfc hba data structure. 6415 * 6416 * This routine is invoked to set up the driver internal resources after the 6417 * device specific resource setup to support the HBA device it attached to. 6418 * 6419 * Return codes 6420 * 0 - successful 6421 * other values - error 6422 **/ 6423 static int 6424 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 6425 { 6426 int error; 6427 6428 /* Startup the kernel thread for this host adapter. */ 6429 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6430 "lpfc_worker_%d", phba->brd_no); 6431 if (IS_ERR(phba->worker_thread)) { 6432 error = PTR_ERR(phba->worker_thread); 6433 return error; 6434 } 6435 6436 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */ 6437 if (phba->sli_rev == LPFC_SLI_REV4) 6438 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6439 else 6440 phba->wq = NULL; 6441 6442 return 0; 6443 } 6444 6445 /** 6446 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 6447 * @phba: pointer to lpfc hba data structure. 6448 * 6449 * This routine is invoked to unset the driver internal resources set up after 6450 * the device specific resource setup for supporting the HBA device it 6451 * attached to. 6452 **/ 6453 static void 6454 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 6455 { 6456 if (phba->wq) { 6457 flush_workqueue(phba->wq); 6458 destroy_workqueue(phba->wq); 6459 phba->wq = NULL; 6460 } 6461 6462 /* Stop kernel worker thread */ 6463 if (phba->worker_thread) 6464 kthread_stop(phba->worker_thread); 6465 } 6466 6467 /** 6468 * lpfc_free_iocb_list - Free iocb list. 6469 * @phba: pointer to lpfc hba data structure. 6470 * 6471 * This routine is invoked to free the driver's IOCB list and memory. 6472 **/ 6473 void 6474 lpfc_free_iocb_list(struct lpfc_hba *phba) 6475 { 6476 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 6477 6478 spin_lock_irq(&phba->hbalock); 6479 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6480 &phba->lpfc_iocb_list, list) { 6481 list_del(&iocbq_entry->list); 6482 kfree(iocbq_entry); 6483 phba->total_iocbq_bufs--; 6484 } 6485 spin_unlock_irq(&phba->hbalock); 6486 6487 return; 6488 } 6489 6490 /** 6491 * lpfc_init_iocb_list - Allocate and initialize iocb list. 6492 * @phba: pointer to lpfc hba data structure. 6493 * 6494 * This routine is invoked to allocate and initizlize the driver's IOCB 6495 * list and set up the IOCB tag array accordingly. 6496 * 6497 * Return codes 6498 * 0 - successful 6499 * other values - error 6500 **/ 6501 int 6502 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 6503 { 6504 struct lpfc_iocbq *iocbq_entry = NULL; 6505 uint16_t iotag; 6506 int i; 6507 6508 /* Initialize and populate the iocb list per host. */ 6509 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6510 for (i = 0; i < iocb_count; i++) { 6511 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 6512 if (iocbq_entry == NULL) { 6513 printk(KERN_ERR "%s: only allocated %d iocbs of " 6514 "expected %d count. Unloading driver.\n", 6515 __func__, i, LPFC_IOCB_LIST_CNT); 6516 goto out_free_iocbq; 6517 } 6518 6519 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6520 if (iotag == 0) { 6521 kfree(iocbq_entry); 6522 printk(KERN_ERR "%s: failed to allocate IOTAG. " 6523 "Unloading driver.\n", __func__); 6524 goto out_free_iocbq; 6525 } 6526 iocbq_entry->sli4_lxritag = NO_XRI; 6527 iocbq_entry->sli4_xritag = NO_XRI; 6528 6529 spin_lock_irq(&phba->hbalock); 6530 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6531 phba->total_iocbq_bufs++; 6532 spin_unlock_irq(&phba->hbalock); 6533 } 6534 6535 return 0; 6536 6537 out_free_iocbq: 6538 lpfc_free_iocb_list(phba); 6539 6540 return -ENOMEM; 6541 } 6542 6543 /** 6544 * lpfc_free_sgl_list - Free a given sgl list. 6545 * @phba: pointer to lpfc hba data structure. 6546 * @sglq_list: pointer to the head of sgl list. 6547 * 6548 * This routine is invoked to free a give sgl list and memory. 6549 **/ 6550 void 6551 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 6552 { 6553 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6554 6555 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 6556 list_del(&sglq_entry->list); 6557 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 6558 kfree(sglq_entry); 6559 } 6560 } 6561 6562 /** 6563 * lpfc_free_els_sgl_list - Free els sgl list. 6564 * @phba: pointer to lpfc hba data structure. 6565 * 6566 * This routine is invoked to free the driver's els sgl list and memory. 6567 **/ 6568 static void 6569 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 6570 { 6571 LIST_HEAD(sglq_list); 6572 6573 /* Retrieve all els sgls from driver list */ 6574 spin_lock_irq(&phba->hbalock); 6575 spin_lock(&phba->sli4_hba.sgl_list_lock); 6576 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 6577 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6578 spin_unlock_irq(&phba->hbalock); 6579 6580 /* Now free the sgl list */ 6581 lpfc_free_sgl_list(phba, &sglq_list); 6582 } 6583 6584 /** 6585 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 6586 * @phba: pointer to lpfc hba data structure. 6587 * 6588 * This routine is invoked to free the driver's nvmet sgl list and memory. 6589 **/ 6590 static void 6591 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 6592 { 6593 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6594 LIST_HEAD(sglq_list); 6595 6596 /* Retrieve all nvmet sgls from driver list */ 6597 spin_lock_irq(&phba->hbalock); 6598 spin_lock(&phba->sli4_hba.sgl_list_lock); 6599 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 6600 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6601 spin_unlock_irq(&phba->hbalock); 6602 6603 /* Now free the sgl list */ 6604 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 6605 list_del(&sglq_entry->list); 6606 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 6607 kfree(sglq_entry); 6608 } 6609 6610 /* Update the nvmet_xri_cnt to reflect no current sgls. 6611 * The next initialization cycle sets the count and allocates 6612 * the sgls over again. 6613 */ 6614 phba->sli4_hba.nvmet_xri_cnt = 0; 6615 } 6616 6617 /** 6618 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 6619 * @phba: pointer to lpfc hba data structure. 6620 * 6621 * This routine is invoked to allocate the driver's active sgl memory. 6622 * This array will hold the sglq_entry's for active IOs. 6623 **/ 6624 static int 6625 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 6626 { 6627 int size; 6628 size = sizeof(struct lpfc_sglq *); 6629 size *= phba->sli4_hba.max_cfg_param.max_xri; 6630 6631 phba->sli4_hba.lpfc_sglq_active_list = 6632 kzalloc(size, GFP_KERNEL); 6633 if (!phba->sli4_hba.lpfc_sglq_active_list) 6634 return -ENOMEM; 6635 return 0; 6636 } 6637 6638 /** 6639 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 6640 * @phba: pointer to lpfc hba data structure. 6641 * 6642 * This routine is invoked to walk through the array of active sglq entries 6643 * and free all of the resources. 6644 * This is just a place holder for now. 6645 **/ 6646 static void 6647 lpfc_free_active_sgl(struct lpfc_hba *phba) 6648 { 6649 kfree(phba->sli4_hba.lpfc_sglq_active_list); 6650 } 6651 6652 /** 6653 * lpfc_init_sgl_list - Allocate and initialize sgl list. 6654 * @phba: pointer to lpfc hba data structure. 6655 * 6656 * This routine is invoked to allocate and initizlize the driver's sgl 6657 * list and set up the sgl xritag tag array accordingly. 6658 * 6659 **/ 6660 static void 6661 lpfc_init_sgl_list(struct lpfc_hba *phba) 6662 { 6663 /* Initialize and populate the sglq list per host/VF. */ 6664 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 6665 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 6666 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 6667 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6668 6669 /* els xri-sgl book keeping */ 6670 phba->sli4_hba.els_xri_cnt = 0; 6671 6672 /* scsi xri-buffer book keeping */ 6673 phba->sli4_hba.scsi_xri_cnt = 0; 6674 6675 /* nvme xri-buffer book keeping */ 6676 phba->sli4_hba.nvme_xri_cnt = 0; 6677 } 6678 6679 /** 6680 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 6681 * @phba: pointer to lpfc hba data structure. 6682 * 6683 * This routine is invoked to post rpi header templates to the 6684 * port for those SLI4 ports that do not support extents. This routine 6685 * posts a PAGE_SIZE memory region to the port to hold up to 6686 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 6687 * and should be called only when interrupts are disabled. 6688 * 6689 * Return codes 6690 * 0 - successful 6691 * -ERROR - otherwise. 6692 **/ 6693 int 6694 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 6695 { 6696 int rc = 0; 6697 struct lpfc_rpi_hdr *rpi_hdr; 6698 6699 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 6700 if (!phba->sli4_hba.rpi_hdrs_in_use) 6701 return rc; 6702 if (phba->sli4_hba.extents_in_use) 6703 return -EIO; 6704 6705 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 6706 if (!rpi_hdr) { 6707 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6708 "0391 Error during rpi post operation\n"); 6709 lpfc_sli4_remove_rpis(phba); 6710 rc = -ENODEV; 6711 } 6712 6713 return rc; 6714 } 6715 6716 /** 6717 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 6718 * @phba: pointer to lpfc hba data structure. 6719 * 6720 * This routine is invoked to allocate a single 4KB memory region to 6721 * support rpis and stores them in the phba. This single region 6722 * provides support for up to 64 rpis. The region is used globally 6723 * by the device. 6724 * 6725 * Returns: 6726 * A valid rpi hdr on success. 6727 * A NULL pointer on any failure. 6728 **/ 6729 struct lpfc_rpi_hdr * 6730 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 6731 { 6732 uint16_t rpi_limit, curr_rpi_range; 6733 struct lpfc_dmabuf *dmabuf; 6734 struct lpfc_rpi_hdr *rpi_hdr; 6735 6736 /* 6737 * If the SLI4 port supports extents, posting the rpi header isn't 6738 * required. Set the expected maximum count and let the actual value 6739 * get set when extents are fully allocated. 6740 */ 6741 if (!phba->sli4_hba.rpi_hdrs_in_use) 6742 return NULL; 6743 if (phba->sli4_hba.extents_in_use) 6744 return NULL; 6745 6746 /* The limit on the logical index is just the max_rpi count. */ 6747 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 6748 6749 spin_lock_irq(&phba->hbalock); 6750 /* 6751 * Establish the starting RPI in this header block. The starting 6752 * rpi is normalized to a zero base because the physical rpi is 6753 * port based. 6754 */ 6755 curr_rpi_range = phba->sli4_hba.next_rpi; 6756 spin_unlock_irq(&phba->hbalock); 6757 6758 /* Reached full RPI range */ 6759 if (curr_rpi_range == rpi_limit) 6760 return NULL; 6761 6762 /* 6763 * First allocate the protocol header region for the port. The 6764 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6765 */ 6766 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6767 if (!dmabuf) 6768 return NULL; 6769 6770 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6771 LPFC_HDR_TEMPLATE_SIZE, 6772 &dmabuf->phys, GFP_KERNEL); 6773 if (!dmabuf->virt) { 6774 rpi_hdr = NULL; 6775 goto err_free_dmabuf; 6776 } 6777 6778 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 6779 rpi_hdr = NULL; 6780 goto err_free_coherent; 6781 } 6782 6783 /* Save the rpi header data for cleanup later. */ 6784 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 6785 if (!rpi_hdr) 6786 goto err_free_coherent; 6787 6788 rpi_hdr->dmabuf = dmabuf; 6789 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6790 rpi_hdr->page_count = 1; 6791 spin_lock_irq(&phba->hbalock); 6792 6793 /* The rpi_hdr stores the logical index only. */ 6794 rpi_hdr->start_rpi = curr_rpi_range; 6795 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 6796 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6797 6798 spin_unlock_irq(&phba->hbalock); 6799 return rpi_hdr; 6800 6801 err_free_coherent: 6802 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6803 dmabuf->virt, dmabuf->phys); 6804 err_free_dmabuf: 6805 kfree(dmabuf); 6806 return NULL; 6807 } 6808 6809 /** 6810 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6811 * @phba: pointer to lpfc hba data structure. 6812 * 6813 * This routine is invoked to remove all memory resources allocated 6814 * to support rpis for SLI4 ports not supporting extents. This routine 6815 * presumes the caller has released all rpis consumed by fabric or port 6816 * logins and is prepared to have the header pages removed. 6817 **/ 6818 void 6819 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6820 { 6821 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6822 6823 if (!phba->sli4_hba.rpi_hdrs_in_use) 6824 goto exit; 6825 6826 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6827 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6828 list_del(&rpi_hdr->list); 6829 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6830 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6831 kfree(rpi_hdr->dmabuf); 6832 kfree(rpi_hdr); 6833 } 6834 exit: 6835 /* There are no rpis available to the port now. */ 6836 phba->sli4_hba.next_rpi = 0; 6837 } 6838 6839 /** 6840 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6841 * @pdev: pointer to pci device data structure. 6842 * 6843 * This routine is invoked to allocate the driver hba data structure for an 6844 * HBA device. If the allocation is successful, the phba reference to the 6845 * PCI device data structure is set. 6846 * 6847 * Return codes 6848 * pointer to @phba - successful 6849 * NULL - error 6850 **/ 6851 static struct lpfc_hba * 6852 lpfc_hba_alloc(struct pci_dev *pdev) 6853 { 6854 struct lpfc_hba *phba; 6855 6856 /* Allocate memory for HBA structure */ 6857 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6858 if (!phba) { 6859 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6860 return NULL; 6861 } 6862 6863 /* Set reference to PCI device in HBA structure */ 6864 phba->pcidev = pdev; 6865 6866 /* Assign an unused board number */ 6867 phba->brd_no = lpfc_get_instance(); 6868 if (phba->brd_no < 0) { 6869 kfree(phba); 6870 return NULL; 6871 } 6872 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 6873 6874 spin_lock_init(&phba->ct_ev_lock); 6875 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6876 6877 return phba; 6878 } 6879 6880 /** 6881 * lpfc_hba_free - Free driver hba data structure with a device. 6882 * @phba: pointer to lpfc hba data structure. 6883 * 6884 * This routine is invoked to free the driver hba data structure with an 6885 * HBA device. 6886 **/ 6887 static void 6888 lpfc_hba_free(struct lpfc_hba *phba) 6889 { 6890 /* Release the driver assigned board number */ 6891 idr_remove(&lpfc_hba_index, phba->brd_no); 6892 6893 /* Free memory allocated with sli3 rings */ 6894 kfree(phba->sli.sli3_ring); 6895 phba->sli.sli3_ring = NULL; 6896 6897 kfree(phba); 6898 return; 6899 } 6900 6901 /** 6902 * lpfc_create_shost - Create hba physical port with associated scsi host. 6903 * @phba: pointer to lpfc hba data structure. 6904 * 6905 * This routine is invoked to create HBA physical port and associate a SCSI 6906 * host with it. 6907 * 6908 * Return codes 6909 * 0 - successful 6910 * other values - error 6911 **/ 6912 static int 6913 lpfc_create_shost(struct lpfc_hba *phba) 6914 { 6915 struct lpfc_vport *vport; 6916 struct Scsi_Host *shost; 6917 6918 /* Initialize HBA FC structure */ 6919 phba->fc_edtov = FF_DEF_EDTOV; 6920 phba->fc_ratov = FF_DEF_RATOV; 6921 phba->fc_altov = FF_DEF_ALTOV; 6922 phba->fc_arbtov = FF_DEF_ARBTOV; 6923 6924 atomic_set(&phba->sdev_cnt, 0); 6925 atomic_set(&phba->fc4ScsiInputRequests, 0); 6926 atomic_set(&phba->fc4ScsiOutputRequests, 0); 6927 atomic_set(&phba->fc4ScsiControlRequests, 0); 6928 atomic_set(&phba->fc4ScsiIoCmpls, 0); 6929 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6930 if (!vport) 6931 return -ENODEV; 6932 6933 shost = lpfc_shost_from_vport(vport); 6934 phba->pport = vport; 6935 6936 if (phba->nvmet_support) { 6937 /* Only 1 vport (pport) will support NVME target */ 6938 if (phba->txrdy_payload_pool == NULL) { 6939 phba->txrdy_payload_pool = dma_pool_create( 6940 "txrdy_pool", &phba->pcidev->dev, 6941 TXRDY_PAYLOAD_LEN, 16, 0); 6942 if (phba->txrdy_payload_pool) { 6943 phba->targetport = NULL; 6944 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 6945 lpfc_printf_log(phba, KERN_INFO, 6946 LOG_INIT | LOG_NVME_DISC, 6947 "6076 NVME Target Found\n"); 6948 } 6949 } 6950 } 6951 6952 lpfc_debugfs_initialize(vport); 6953 /* Put reference to SCSI host to driver's device private data */ 6954 pci_set_drvdata(phba->pcidev, shost); 6955 6956 /* 6957 * At this point we are fully registered with PSA. In addition, 6958 * any initial discovery should be completed. 6959 */ 6960 vport->load_flag |= FC_ALLOW_FDMI; 6961 if (phba->cfg_enable_SmartSAN || 6962 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6963 6964 /* Setup appropriate attribute masks */ 6965 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6966 if (phba->cfg_enable_SmartSAN) 6967 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6968 else 6969 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 6970 } 6971 return 0; 6972 } 6973 6974 /** 6975 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6976 * @phba: pointer to lpfc hba data structure. 6977 * 6978 * This routine is invoked to destroy HBA physical port and the associated 6979 * SCSI host. 6980 **/ 6981 static void 6982 lpfc_destroy_shost(struct lpfc_hba *phba) 6983 { 6984 struct lpfc_vport *vport = phba->pport; 6985 6986 /* Destroy physical port that associated with the SCSI host */ 6987 destroy_port(vport); 6988 6989 return; 6990 } 6991 6992 /** 6993 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6994 * @phba: pointer to lpfc hba data structure. 6995 * @shost: the shost to be used to detect Block guard settings. 6996 * 6997 * This routine sets up the local Block guard protocol settings for @shost. 6998 * This routine also allocates memory for debugging bg buffers. 6999 **/ 7000 static void 7001 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7002 { 7003 uint32_t old_mask; 7004 uint32_t old_guard; 7005 7006 int pagecnt = 10; 7007 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7009 "1478 Registering BlockGuard with the " 7010 "SCSI layer\n"); 7011 7012 old_mask = phba->cfg_prot_mask; 7013 old_guard = phba->cfg_prot_guard; 7014 7015 /* Only allow supported values */ 7016 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7017 SHOST_DIX_TYPE0_PROTECTION | 7018 SHOST_DIX_TYPE1_PROTECTION); 7019 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7020 SHOST_DIX_GUARD_CRC); 7021 7022 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7023 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7024 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7025 7026 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7027 if ((old_mask != phba->cfg_prot_mask) || 7028 (old_guard != phba->cfg_prot_guard)) 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7030 "1475 Registering BlockGuard with the " 7031 "SCSI layer: mask %d guard %d\n", 7032 phba->cfg_prot_mask, 7033 phba->cfg_prot_guard); 7034 7035 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7036 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7037 } else 7038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7039 "1479 Not Registering BlockGuard with the SCSI " 7040 "layer, Bad protection parameters: %d %d\n", 7041 old_mask, old_guard); 7042 } 7043 7044 if (!_dump_buf_data) { 7045 while (pagecnt) { 7046 spin_lock_init(&_dump_buf_lock); 7047 _dump_buf_data = 7048 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7049 if (_dump_buf_data) { 7050 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7051 "9043 BLKGRD: allocated %d pages for " 7052 "_dump_buf_data at 0x%p\n", 7053 (1 << pagecnt), _dump_buf_data); 7054 _dump_buf_data_order = pagecnt; 7055 memset(_dump_buf_data, 0, 7056 ((1 << PAGE_SHIFT) << pagecnt)); 7057 break; 7058 } else 7059 --pagecnt; 7060 } 7061 if (!_dump_buf_data_order) 7062 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7063 "9044 BLKGRD: ERROR unable to allocate " 7064 "memory for hexdump\n"); 7065 } else 7066 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7067 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 7068 "\n", _dump_buf_data); 7069 if (!_dump_buf_dif) { 7070 while (pagecnt) { 7071 _dump_buf_dif = 7072 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7073 if (_dump_buf_dif) { 7074 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7075 "9046 BLKGRD: allocated %d pages for " 7076 "_dump_buf_dif at 0x%p\n", 7077 (1 << pagecnt), _dump_buf_dif); 7078 _dump_buf_dif_order = pagecnt; 7079 memset(_dump_buf_dif, 0, 7080 ((1 << PAGE_SHIFT) << pagecnt)); 7081 break; 7082 } else 7083 --pagecnt; 7084 } 7085 if (!_dump_buf_dif_order) 7086 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7087 "9047 BLKGRD: ERROR unable to allocate " 7088 "memory for hexdump\n"); 7089 } else 7090 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7091 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 7092 _dump_buf_dif); 7093 } 7094 7095 /** 7096 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7097 * @phba: pointer to lpfc hba data structure. 7098 * 7099 * This routine is invoked to perform all the necessary post initialization 7100 * setup for the device. 7101 **/ 7102 static void 7103 lpfc_post_init_setup(struct lpfc_hba *phba) 7104 { 7105 struct Scsi_Host *shost; 7106 struct lpfc_adapter_event_header adapter_event; 7107 7108 /* Get the default values for Model Name and Description */ 7109 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7110 7111 /* 7112 * hba setup may have changed the hba_queue_depth so we need to 7113 * adjust the value of can_queue. 7114 */ 7115 shost = pci_get_drvdata(phba->pcidev); 7116 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7117 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 7118 lpfc_setup_bg(phba, shost); 7119 7120 lpfc_host_attrib_init(shost); 7121 7122 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7123 spin_lock_irq(shost->host_lock); 7124 lpfc_poll_start_timer(phba); 7125 spin_unlock_irq(shost->host_lock); 7126 } 7127 7128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7129 "0428 Perform SCSI scan\n"); 7130 /* Send board arrival event to upper layer */ 7131 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7132 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7133 fc_host_post_vendor_event(shost, fc_get_event_number(), 7134 sizeof(adapter_event), 7135 (char *) &adapter_event, 7136 LPFC_NL_VENDOR_ID); 7137 return; 7138 } 7139 7140 /** 7141 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7142 * @phba: pointer to lpfc hba data structure. 7143 * 7144 * This routine is invoked to set up the PCI device memory space for device 7145 * with SLI-3 interface spec. 7146 * 7147 * Return codes 7148 * 0 - successful 7149 * other values - error 7150 **/ 7151 static int 7152 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7153 { 7154 struct pci_dev *pdev; 7155 unsigned long bar0map_len, bar2map_len; 7156 int i, hbq_count; 7157 void *ptr; 7158 int error = -ENODEV; 7159 7160 /* Obtain PCI device reference */ 7161 if (!phba->pcidev) 7162 return error; 7163 else 7164 pdev = phba->pcidev; 7165 7166 /* Set the device DMA mask size */ 7167 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7168 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7169 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7170 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7171 return error; 7172 } 7173 } 7174 7175 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7176 * required by each mapping. 7177 */ 7178 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7179 bar0map_len = pci_resource_len(pdev, 0); 7180 7181 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7182 bar2map_len = pci_resource_len(pdev, 2); 7183 7184 /* Map HBA SLIM to a kernel virtual address. */ 7185 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7186 if (!phba->slim_memmap_p) { 7187 dev_printk(KERN_ERR, &pdev->dev, 7188 "ioremap failed for SLIM memory.\n"); 7189 goto out; 7190 } 7191 7192 /* Map HBA Control Registers to a kernel virtual address. */ 7193 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7194 if (!phba->ctrl_regs_memmap_p) { 7195 dev_printk(KERN_ERR, &pdev->dev, 7196 "ioremap failed for HBA control registers.\n"); 7197 goto out_iounmap_slim; 7198 } 7199 7200 /* Allocate memory for SLI-2 structures */ 7201 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7202 &phba->slim2p.phys, GFP_KERNEL); 7203 if (!phba->slim2p.virt) 7204 goto out_iounmap; 7205 7206 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7207 phba->mbox_ext = (phba->slim2p.virt + 7208 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7209 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7210 phba->IOCBs = (phba->slim2p.virt + 7211 offsetof(struct lpfc_sli2_slim, IOCBs)); 7212 7213 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7214 lpfc_sli_hbq_size(), 7215 &phba->hbqslimp.phys, 7216 GFP_KERNEL); 7217 if (!phba->hbqslimp.virt) 7218 goto out_free_slim; 7219 7220 hbq_count = lpfc_sli_hbq_count(); 7221 ptr = phba->hbqslimp.virt; 7222 for (i = 0; i < hbq_count; ++i) { 7223 phba->hbqs[i].hbq_virt = ptr; 7224 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7225 ptr += (lpfc_hbq_defs[i]->entry_count * 7226 sizeof(struct lpfc_hbq_entry)); 7227 } 7228 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7229 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7230 7231 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7232 7233 phba->MBslimaddr = phba->slim_memmap_p; 7234 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7235 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7236 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7237 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7238 7239 return 0; 7240 7241 out_free_slim: 7242 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7243 phba->slim2p.virt, phba->slim2p.phys); 7244 out_iounmap: 7245 iounmap(phba->ctrl_regs_memmap_p); 7246 out_iounmap_slim: 7247 iounmap(phba->slim_memmap_p); 7248 out: 7249 return error; 7250 } 7251 7252 /** 7253 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7254 * @phba: pointer to lpfc hba data structure. 7255 * 7256 * This routine is invoked to unset the PCI device memory space for device 7257 * with SLI-3 interface spec. 7258 **/ 7259 static void 7260 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7261 { 7262 struct pci_dev *pdev; 7263 7264 /* Obtain PCI device reference */ 7265 if (!phba->pcidev) 7266 return; 7267 else 7268 pdev = phba->pcidev; 7269 7270 /* Free coherent DMA memory allocated */ 7271 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7272 phba->hbqslimp.virt, phba->hbqslimp.phys); 7273 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7274 phba->slim2p.virt, phba->slim2p.phys); 7275 7276 /* I/O memory unmap */ 7277 iounmap(phba->ctrl_regs_memmap_p); 7278 iounmap(phba->slim_memmap_p); 7279 7280 return; 7281 } 7282 7283 /** 7284 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7285 * @phba: pointer to lpfc hba data structure. 7286 * 7287 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7288 * done and check status. 7289 * 7290 * Return 0 if successful, otherwise -ENODEV. 7291 **/ 7292 int 7293 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7294 { 7295 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7296 struct lpfc_register reg_data; 7297 int i, port_error = 0; 7298 uint32_t if_type; 7299 7300 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7301 memset(®_data, 0, sizeof(reg_data)); 7302 if (!phba->sli4_hba.PSMPHRregaddr) 7303 return -ENODEV; 7304 7305 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7306 for (i = 0; i < 3000; i++) { 7307 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7308 &portsmphr_reg.word0) || 7309 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7310 /* Port has a fatal POST error, break out */ 7311 port_error = -ENODEV; 7312 break; 7313 } 7314 if (LPFC_POST_STAGE_PORT_READY == 7315 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7316 break; 7317 msleep(10); 7318 } 7319 7320 /* 7321 * If there was a port error during POST, then don't proceed with 7322 * other register reads as the data may not be valid. Just exit. 7323 */ 7324 if (port_error) { 7325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7326 "1408 Port Failed POST - portsmphr=0x%x, " 7327 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7328 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7329 portsmphr_reg.word0, 7330 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7331 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7332 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7333 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7334 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7335 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7336 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7337 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7338 } else { 7339 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7340 "2534 Device Info: SLIFamily=0x%x, " 7341 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7342 "SLIHint_2=0x%x, FT=0x%x\n", 7343 bf_get(lpfc_sli_intf_sli_family, 7344 &phba->sli4_hba.sli_intf), 7345 bf_get(lpfc_sli_intf_slirev, 7346 &phba->sli4_hba.sli_intf), 7347 bf_get(lpfc_sli_intf_if_type, 7348 &phba->sli4_hba.sli_intf), 7349 bf_get(lpfc_sli_intf_sli_hint1, 7350 &phba->sli4_hba.sli_intf), 7351 bf_get(lpfc_sli_intf_sli_hint2, 7352 &phba->sli4_hba.sli_intf), 7353 bf_get(lpfc_sli_intf_func_type, 7354 &phba->sli4_hba.sli_intf)); 7355 /* 7356 * Check for other Port errors during the initialization 7357 * process. Fail the load if the port did not come up 7358 * correctly. 7359 */ 7360 if_type = bf_get(lpfc_sli_intf_if_type, 7361 &phba->sli4_hba.sli_intf); 7362 switch (if_type) { 7363 case LPFC_SLI_INTF_IF_TYPE_0: 7364 phba->sli4_hba.ue_mask_lo = 7365 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7366 phba->sli4_hba.ue_mask_hi = 7367 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7368 uerrlo_reg.word0 = 7369 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7370 uerrhi_reg.word0 = 7371 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7372 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7373 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7375 "1422 Unrecoverable Error " 7376 "Detected during POST " 7377 "uerr_lo_reg=0x%x, " 7378 "uerr_hi_reg=0x%x, " 7379 "ue_mask_lo_reg=0x%x, " 7380 "ue_mask_hi_reg=0x%x\n", 7381 uerrlo_reg.word0, 7382 uerrhi_reg.word0, 7383 phba->sli4_hba.ue_mask_lo, 7384 phba->sli4_hba.ue_mask_hi); 7385 port_error = -ENODEV; 7386 } 7387 break; 7388 case LPFC_SLI_INTF_IF_TYPE_2: 7389 case LPFC_SLI_INTF_IF_TYPE_6: 7390 /* Final checks. The port status should be clean. */ 7391 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7392 ®_data.word0) || 7393 (bf_get(lpfc_sliport_status_err, ®_data) && 7394 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7395 phba->work_status[0] = 7396 readl(phba->sli4_hba.u.if_type2. 7397 ERR1regaddr); 7398 phba->work_status[1] = 7399 readl(phba->sli4_hba.u.if_type2. 7400 ERR2regaddr); 7401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7402 "2888 Unrecoverable port error " 7403 "following POST: port status reg " 7404 "0x%x, port_smphr reg 0x%x, " 7405 "error 1=0x%x, error 2=0x%x\n", 7406 reg_data.word0, 7407 portsmphr_reg.word0, 7408 phba->work_status[0], 7409 phba->work_status[1]); 7410 port_error = -ENODEV; 7411 } 7412 break; 7413 case LPFC_SLI_INTF_IF_TYPE_1: 7414 default: 7415 break; 7416 } 7417 } 7418 return port_error; 7419 } 7420 7421 /** 7422 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7423 * @phba: pointer to lpfc hba data structure. 7424 * @if_type: The SLI4 interface type getting configured. 7425 * 7426 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7427 * memory map. 7428 **/ 7429 static void 7430 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7431 { 7432 switch (if_type) { 7433 case LPFC_SLI_INTF_IF_TYPE_0: 7434 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7435 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7436 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7437 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7438 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7439 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7440 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7441 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7442 phba->sli4_hba.SLIINTFregaddr = 7443 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7444 break; 7445 case LPFC_SLI_INTF_IF_TYPE_2: 7446 phba->sli4_hba.u.if_type2.EQDregaddr = 7447 phba->sli4_hba.conf_regs_memmap_p + 7448 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 7449 phba->sli4_hba.u.if_type2.ERR1regaddr = 7450 phba->sli4_hba.conf_regs_memmap_p + 7451 LPFC_CTL_PORT_ER1_OFFSET; 7452 phba->sli4_hba.u.if_type2.ERR2regaddr = 7453 phba->sli4_hba.conf_regs_memmap_p + 7454 LPFC_CTL_PORT_ER2_OFFSET; 7455 phba->sli4_hba.u.if_type2.CTRLregaddr = 7456 phba->sli4_hba.conf_regs_memmap_p + 7457 LPFC_CTL_PORT_CTL_OFFSET; 7458 phba->sli4_hba.u.if_type2.STATUSregaddr = 7459 phba->sli4_hba.conf_regs_memmap_p + 7460 LPFC_CTL_PORT_STA_OFFSET; 7461 phba->sli4_hba.SLIINTFregaddr = 7462 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7463 phba->sli4_hba.PSMPHRregaddr = 7464 phba->sli4_hba.conf_regs_memmap_p + 7465 LPFC_CTL_PORT_SEM_OFFSET; 7466 phba->sli4_hba.RQDBregaddr = 7467 phba->sli4_hba.conf_regs_memmap_p + 7468 LPFC_ULP0_RQ_DOORBELL; 7469 phba->sli4_hba.WQDBregaddr = 7470 phba->sli4_hba.conf_regs_memmap_p + 7471 LPFC_ULP0_WQ_DOORBELL; 7472 phba->sli4_hba.CQDBregaddr = 7473 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 7474 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 7475 phba->sli4_hba.MQDBregaddr = 7476 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 7477 phba->sli4_hba.BMBXregaddr = 7478 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 7479 break; 7480 case LPFC_SLI_INTF_IF_TYPE_6: 7481 phba->sli4_hba.u.if_type2.EQDregaddr = 7482 phba->sli4_hba.conf_regs_memmap_p + 7483 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 7484 phba->sli4_hba.u.if_type2.ERR1regaddr = 7485 phba->sli4_hba.conf_regs_memmap_p + 7486 LPFC_CTL_PORT_ER1_OFFSET; 7487 phba->sli4_hba.u.if_type2.ERR2regaddr = 7488 phba->sli4_hba.conf_regs_memmap_p + 7489 LPFC_CTL_PORT_ER2_OFFSET; 7490 phba->sli4_hba.u.if_type2.CTRLregaddr = 7491 phba->sli4_hba.conf_regs_memmap_p + 7492 LPFC_CTL_PORT_CTL_OFFSET; 7493 phba->sli4_hba.u.if_type2.STATUSregaddr = 7494 phba->sli4_hba.conf_regs_memmap_p + 7495 LPFC_CTL_PORT_STA_OFFSET; 7496 phba->sli4_hba.PSMPHRregaddr = 7497 phba->sli4_hba.conf_regs_memmap_p + 7498 LPFC_CTL_PORT_SEM_OFFSET; 7499 phba->sli4_hba.BMBXregaddr = 7500 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 7501 break; 7502 case LPFC_SLI_INTF_IF_TYPE_1: 7503 default: 7504 dev_printk(KERN_ERR, &phba->pcidev->dev, 7505 "FATAL - unsupported SLI4 interface type - %d\n", 7506 if_type); 7507 break; 7508 } 7509 } 7510 7511 /** 7512 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 7513 * @phba: pointer to lpfc hba data structure. 7514 * 7515 * This routine is invoked to set up SLI4 BAR1 register memory map. 7516 **/ 7517 static void 7518 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7519 { 7520 switch (if_type) { 7521 case LPFC_SLI_INTF_IF_TYPE_0: 7522 phba->sli4_hba.PSMPHRregaddr = 7523 phba->sli4_hba.ctrl_regs_memmap_p + 7524 LPFC_SLIPORT_IF0_SMPHR; 7525 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7526 LPFC_HST_ISR0; 7527 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7528 LPFC_HST_IMR0; 7529 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7530 LPFC_HST_ISCR0; 7531 break; 7532 case LPFC_SLI_INTF_IF_TYPE_6: 7533 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 7534 LPFC_IF6_RQ_DOORBELL; 7535 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 7536 LPFC_IF6_WQ_DOORBELL; 7537 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 7538 LPFC_IF6_CQ_DOORBELL; 7539 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 7540 LPFC_IF6_EQ_DOORBELL; 7541 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 7542 LPFC_IF6_MQ_DOORBELL; 7543 break; 7544 case LPFC_SLI_INTF_IF_TYPE_2: 7545 case LPFC_SLI_INTF_IF_TYPE_1: 7546 default: 7547 dev_err(&phba->pcidev->dev, 7548 "FATAL - unsupported SLI4 interface type - %d\n", 7549 if_type); 7550 break; 7551 } 7552 } 7553 7554 /** 7555 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 7556 * @phba: pointer to lpfc hba data structure. 7557 * @vf: virtual function number 7558 * 7559 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 7560 * based on the given viftual function number, @vf. 7561 * 7562 * Return 0 if successful, otherwise -ENODEV. 7563 **/ 7564 static int 7565 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 7566 { 7567 if (vf > LPFC_VIR_FUNC_MAX) 7568 return -ENODEV; 7569 7570 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7571 vf * LPFC_VFR_PAGE_SIZE + 7572 LPFC_ULP0_RQ_DOORBELL); 7573 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7574 vf * LPFC_VFR_PAGE_SIZE + 7575 LPFC_ULP0_WQ_DOORBELL); 7576 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7577 vf * LPFC_VFR_PAGE_SIZE + 7578 LPFC_EQCQ_DOORBELL); 7579 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 7580 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7581 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 7582 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7583 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 7584 return 0; 7585 } 7586 7587 /** 7588 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 7589 * @phba: pointer to lpfc hba data structure. 7590 * 7591 * This routine is invoked to create the bootstrap mailbox 7592 * region consistent with the SLI-4 interface spec. This 7593 * routine allocates all memory necessary to communicate 7594 * mailbox commands to the port and sets up all alignment 7595 * needs. No locks are expected to be held when calling 7596 * this routine. 7597 * 7598 * Return codes 7599 * 0 - successful 7600 * -ENOMEM - could not allocated memory. 7601 **/ 7602 static int 7603 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 7604 { 7605 uint32_t bmbx_size; 7606 struct lpfc_dmabuf *dmabuf; 7607 struct dma_address *dma_address; 7608 uint32_t pa_addr; 7609 uint64_t phys_addr; 7610 7611 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7612 if (!dmabuf) 7613 return -ENOMEM; 7614 7615 /* 7616 * The bootstrap mailbox region is comprised of 2 parts 7617 * plus an alignment restriction of 16 bytes. 7618 */ 7619 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7620 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7621 &dmabuf->phys, GFP_KERNEL); 7622 if (!dmabuf->virt) { 7623 kfree(dmabuf); 7624 return -ENOMEM; 7625 } 7626 7627 /* 7628 * Initialize the bootstrap mailbox pointers now so that the register 7629 * operations are simple later. The mailbox dma address is required 7630 * to be 16-byte aligned. Also align the virtual memory as each 7631 * maibox is copied into the bmbx mailbox region before issuing the 7632 * command to the port. 7633 */ 7634 phba->sli4_hba.bmbx.dmabuf = dmabuf; 7635 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 7636 7637 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 7638 LPFC_ALIGN_16_BYTE); 7639 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 7640 LPFC_ALIGN_16_BYTE); 7641 7642 /* 7643 * Set the high and low physical addresses now. The SLI4 alignment 7644 * requirement is 16 bytes and the mailbox is posted to the port 7645 * as two 30-bit addresses. The other data is a bit marking whether 7646 * the 30-bit address is the high or low address. 7647 * Upcast bmbx aphys to 64bits so shift instruction compiles 7648 * clean on 32 bit machines. 7649 */ 7650 dma_address = &phba->sli4_hba.bmbx.dma_address; 7651 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 7652 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 7653 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 7654 LPFC_BMBX_BIT1_ADDR_HI); 7655 7656 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 7657 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 7658 LPFC_BMBX_BIT1_ADDR_LO); 7659 return 0; 7660 } 7661 7662 /** 7663 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 7664 * @phba: pointer to lpfc hba data structure. 7665 * 7666 * This routine is invoked to teardown the bootstrap mailbox 7667 * region and release all host resources. This routine requires 7668 * the caller to ensure all mailbox commands recovered, no 7669 * additional mailbox comands are sent, and interrupts are disabled 7670 * before calling this routine. 7671 * 7672 **/ 7673 static void 7674 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 7675 { 7676 dma_free_coherent(&phba->pcidev->dev, 7677 phba->sli4_hba.bmbx.bmbx_size, 7678 phba->sli4_hba.bmbx.dmabuf->virt, 7679 phba->sli4_hba.bmbx.dmabuf->phys); 7680 7681 kfree(phba->sli4_hba.bmbx.dmabuf); 7682 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 7683 } 7684 7685 /** 7686 * lpfc_sli4_read_config - Get the config parameters. 7687 * @phba: pointer to lpfc hba data structure. 7688 * 7689 * This routine is invoked to read the configuration parameters from the HBA. 7690 * The configuration parameters are used to set the base and maximum values 7691 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 7692 * allocation for the port. 7693 * 7694 * Return codes 7695 * 0 - successful 7696 * -ENOMEM - No available memory 7697 * -EIO - The mailbox failed to complete successfully. 7698 **/ 7699 int 7700 lpfc_sli4_read_config(struct lpfc_hba *phba) 7701 { 7702 LPFC_MBOXQ_t *pmb; 7703 struct lpfc_mbx_read_config *rd_config; 7704 union lpfc_sli4_cfg_shdr *shdr; 7705 uint32_t shdr_status, shdr_add_status; 7706 struct lpfc_mbx_get_func_cfg *get_func_cfg; 7707 struct lpfc_rsrc_desc_fcfcoe *desc; 7708 char *pdesc_0; 7709 uint16_t forced_link_speed; 7710 uint32_t if_type; 7711 int length, i, rc = 0, rc2; 7712 7713 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7714 if (!pmb) { 7715 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7716 "2011 Unable to allocate memory for issuing " 7717 "SLI_CONFIG_SPECIAL mailbox command\n"); 7718 return -ENOMEM; 7719 } 7720 7721 lpfc_read_config(phba, pmb); 7722 7723 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7724 if (rc != MBX_SUCCESS) { 7725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7726 "2012 Mailbox failed , mbxCmd x%x " 7727 "READ_CONFIG, mbxStatus x%x\n", 7728 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7729 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7730 rc = -EIO; 7731 } else { 7732 rd_config = &pmb->u.mqe.un.rd_config; 7733 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 7734 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 7735 phba->sli4_hba.lnk_info.lnk_tp = 7736 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 7737 phba->sli4_hba.lnk_info.lnk_no = 7738 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 7739 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7740 "3081 lnk_type:%d, lnk_numb:%d\n", 7741 phba->sli4_hba.lnk_info.lnk_tp, 7742 phba->sli4_hba.lnk_info.lnk_no); 7743 } else 7744 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7745 "3082 Mailbox (x%x) returned ldv:x0\n", 7746 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 7747 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 7748 phba->bbcredit_support = 1; 7749 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 7750 } 7751 7752 phba->sli4_hba.extents_in_use = 7753 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 7754 phba->sli4_hba.max_cfg_param.max_xri = 7755 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 7756 phba->sli4_hba.max_cfg_param.xri_base = 7757 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 7758 phba->sli4_hba.max_cfg_param.max_vpi = 7759 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 7760 phba->sli4_hba.max_cfg_param.vpi_base = 7761 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 7762 phba->sli4_hba.max_cfg_param.max_rpi = 7763 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 7764 phba->sli4_hba.max_cfg_param.rpi_base = 7765 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 7766 phba->sli4_hba.max_cfg_param.max_vfi = 7767 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 7768 phba->sli4_hba.max_cfg_param.vfi_base = 7769 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 7770 phba->sli4_hba.max_cfg_param.max_fcfi = 7771 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 7772 phba->sli4_hba.max_cfg_param.max_eq = 7773 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 7774 phba->sli4_hba.max_cfg_param.max_rq = 7775 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 7776 phba->sli4_hba.max_cfg_param.max_wq = 7777 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 7778 phba->sli4_hba.max_cfg_param.max_cq = 7779 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 7780 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 7781 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 7782 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 7783 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 7784 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 7785 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 7786 phba->max_vports = phba->max_vpi; 7787 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7788 "2003 cfg params Extents? %d " 7789 "XRI(B:%d M:%d), " 7790 "VPI(B:%d M:%d) " 7791 "VFI(B:%d M:%d) " 7792 "RPI(B:%d M:%d) " 7793 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 7794 phba->sli4_hba.extents_in_use, 7795 phba->sli4_hba.max_cfg_param.xri_base, 7796 phba->sli4_hba.max_cfg_param.max_xri, 7797 phba->sli4_hba.max_cfg_param.vpi_base, 7798 phba->sli4_hba.max_cfg_param.max_vpi, 7799 phba->sli4_hba.max_cfg_param.vfi_base, 7800 phba->sli4_hba.max_cfg_param.max_vfi, 7801 phba->sli4_hba.max_cfg_param.rpi_base, 7802 phba->sli4_hba.max_cfg_param.max_rpi, 7803 phba->sli4_hba.max_cfg_param.max_fcfi, 7804 phba->sli4_hba.max_cfg_param.max_eq, 7805 phba->sli4_hba.max_cfg_param.max_cq, 7806 phba->sli4_hba.max_cfg_param.max_wq, 7807 phba->sli4_hba.max_cfg_param.max_rq); 7808 7809 /* 7810 * Calculate NVME queue resources based on how 7811 * many WQ/CQs are available. 7812 */ 7813 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7814 length = phba->sli4_hba.max_cfg_param.max_wq; 7815 if (phba->sli4_hba.max_cfg_param.max_cq < 7816 phba->sli4_hba.max_cfg_param.max_wq) 7817 length = phba->sli4_hba.max_cfg_param.max_cq; 7818 7819 /* 7820 * Whats left after this can go toward NVME. 7821 * The minus 6 accounts for ELS, NVME LS, MBOX 7822 * fof plus a couple extra. When configured for 7823 * NVMET, FCP io channel WQs are not created. 7824 */ 7825 length -= 6; 7826 if (!phba->nvmet_support) 7827 length -= phba->cfg_fcp_io_channel; 7828 7829 if (phba->cfg_nvme_io_channel > length) { 7830 lpfc_printf_log( 7831 phba, KERN_ERR, LOG_SLI, 7832 "2005 Reducing NVME IO channel to %d: " 7833 "WQ %d CQ %d NVMEIO %d FCPIO %d\n", 7834 length, 7835 phba->sli4_hba.max_cfg_param.max_wq, 7836 phba->sli4_hba.max_cfg_param.max_cq, 7837 phba->cfg_nvme_io_channel, 7838 phba->cfg_fcp_io_channel); 7839 7840 phba->cfg_nvme_io_channel = length; 7841 } 7842 } 7843 } 7844 7845 if (rc) 7846 goto read_cfg_out; 7847 7848 /* Update link speed if forced link speed is supported */ 7849 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7850 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 7851 forced_link_speed = 7852 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 7853 if (forced_link_speed) { 7854 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 7855 7856 switch (forced_link_speed) { 7857 case LINK_SPEED_1G: 7858 phba->cfg_link_speed = 7859 LPFC_USER_LINK_SPEED_1G; 7860 break; 7861 case LINK_SPEED_2G: 7862 phba->cfg_link_speed = 7863 LPFC_USER_LINK_SPEED_2G; 7864 break; 7865 case LINK_SPEED_4G: 7866 phba->cfg_link_speed = 7867 LPFC_USER_LINK_SPEED_4G; 7868 break; 7869 case LINK_SPEED_8G: 7870 phba->cfg_link_speed = 7871 LPFC_USER_LINK_SPEED_8G; 7872 break; 7873 case LINK_SPEED_10G: 7874 phba->cfg_link_speed = 7875 LPFC_USER_LINK_SPEED_10G; 7876 break; 7877 case LINK_SPEED_16G: 7878 phba->cfg_link_speed = 7879 LPFC_USER_LINK_SPEED_16G; 7880 break; 7881 case LINK_SPEED_32G: 7882 phba->cfg_link_speed = 7883 LPFC_USER_LINK_SPEED_32G; 7884 break; 7885 case LINK_SPEED_64G: 7886 phba->cfg_link_speed = 7887 LPFC_USER_LINK_SPEED_64G; 7888 break; 7889 case 0xffff: 7890 phba->cfg_link_speed = 7891 LPFC_USER_LINK_SPEED_AUTO; 7892 break; 7893 default: 7894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7895 "0047 Unrecognized link " 7896 "speed : %d\n", 7897 forced_link_speed); 7898 phba->cfg_link_speed = 7899 LPFC_USER_LINK_SPEED_AUTO; 7900 } 7901 } 7902 } 7903 7904 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7905 length = phba->sli4_hba.max_cfg_param.max_xri - 7906 lpfc_sli4_get_els_iocb_cnt(phba); 7907 if (phba->cfg_hba_queue_depth > length) { 7908 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7909 "3361 HBA queue depth changed from %d to %d\n", 7910 phba->cfg_hba_queue_depth, length); 7911 phba->cfg_hba_queue_depth = length; 7912 } 7913 7914 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7915 LPFC_SLI_INTF_IF_TYPE_2) 7916 goto read_cfg_out; 7917 7918 /* get the pf# and vf# for SLI4 if_type 2 port */ 7919 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 7920 sizeof(struct lpfc_sli4_cfg_mhdr)); 7921 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 7922 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 7923 length, LPFC_SLI4_MBX_EMBED); 7924 7925 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7926 shdr = (union lpfc_sli4_cfg_shdr *) 7927 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7928 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7929 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7930 if (rc2 || shdr_status || shdr_add_status) { 7931 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7932 "3026 Mailbox failed , mbxCmd x%x " 7933 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 7934 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7935 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7936 goto read_cfg_out; 7937 } 7938 7939 /* search for fc_fcoe resrouce descriptor */ 7940 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 7941 7942 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 7943 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 7944 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 7945 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 7946 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 7947 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 7948 goto read_cfg_out; 7949 7950 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 7951 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 7952 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 7953 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 7954 phba->sli4_hba.iov.pf_number = 7955 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 7956 phba->sli4_hba.iov.vf_number = 7957 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 7958 break; 7959 } 7960 } 7961 7962 if (i < LPFC_RSRC_DESC_MAX_NUM) 7963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7964 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 7965 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 7966 phba->sli4_hba.iov.vf_number); 7967 else 7968 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7969 "3028 GET_FUNCTION_CONFIG: failed to find " 7970 "Resrouce Descriptor:x%x\n", 7971 LPFC_RSRC_DESC_TYPE_FCFCOE); 7972 7973 read_cfg_out: 7974 mempool_free(pmb, phba->mbox_mem_pool); 7975 return rc; 7976 } 7977 7978 /** 7979 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7980 * @phba: pointer to lpfc hba data structure. 7981 * 7982 * This routine is invoked to setup the port-side endian order when 7983 * the port if_type is 0. This routine has no function for other 7984 * if_types. 7985 * 7986 * Return codes 7987 * 0 - successful 7988 * -ENOMEM - No available memory 7989 * -EIO - The mailbox failed to complete successfully. 7990 **/ 7991 static int 7992 lpfc_setup_endian_order(struct lpfc_hba *phba) 7993 { 7994 LPFC_MBOXQ_t *mboxq; 7995 uint32_t if_type, rc = 0; 7996 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7997 HOST_ENDIAN_HIGH_WORD1}; 7998 7999 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8000 switch (if_type) { 8001 case LPFC_SLI_INTF_IF_TYPE_0: 8002 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8003 GFP_KERNEL); 8004 if (!mboxq) { 8005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8006 "0492 Unable to allocate memory for " 8007 "issuing SLI_CONFIG_SPECIAL mailbox " 8008 "command\n"); 8009 return -ENOMEM; 8010 } 8011 8012 /* 8013 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8014 * two words to contain special data values and no other data. 8015 */ 8016 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8017 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8018 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8019 if (rc != MBX_SUCCESS) { 8020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8021 "0493 SLI_CONFIG_SPECIAL mailbox " 8022 "failed with status x%x\n", 8023 rc); 8024 rc = -EIO; 8025 } 8026 mempool_free(mboxq, phba->mbox_mem_pool); 8027 break; 8028 case LPFC_SLI_INTF_IF_TYPE_6: 8029 case LPFC_SLI_INTF_IF_TYPE_2: 8030 case LPFC_SLI_INTF_IF_TYPE_1: 8031 default: 8032 break; 8033 } 8034 return rc; 8035 } 8036 8037 /** 8038 * lpfc_sli4_queue_verify - Verify and update EQ counts 8039 * @phba: pointer to lpfc hba data structure. 8040 * 8041 * This routine is invoked to check the user settable queue counts for EQs. 8042 * After this routine is called the counts will be set to valid values that 8043 * adhere to the constraints of the system's interrupt vectors and the port's 8044 * queue resources. 8045 * 8046 * Return codes 8047 * 0 - successful 8048 * -ENOMEM - No available memory 8049 **/ 8050 static int 8051 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8052 { 8053 int io_channel; 8054 int fof_vectors = phba->cfg_fof ? 1 : 0; 8055 8056 /* 8057 * Sanity check for configured queue parameters against the run-time 8058 * device parameters 8059 */ 8060 8061 /* Sanity check on HBA EQ parameters */ 8062 io_channel = phba->io_channel_irqs; 8063 8064 if (phba->sli4_hba.num_online_cpu < io_channel) { 8065 lpfc_printf_log(phba, 8066 KERN_ERR, LOG_INIT, 8067 "3188 Reducing IO channels to match number of " 8068 "online CPUs: from %d to %d\n", 8069 io_channel, phba->sli4_hba.num_online_cpu); 8070 io_channel = phba->sli4_hba.num_online_cpu; 8071 } 8072 8073 if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { 8074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8075 "2575 Reducing IO channels to match number of " 8076 "available EQs: from %d to %d\n", 8077 io_channel, 8078 phba->sli4_hba.max_cfg_param.max_eq); 8079 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; 8080 } 8081 8082 /* The actual number of FCP / NVME event queues adopted */ 8083 if (io_channel != phba->io_channel_irqs) 8084 phba->io_channel_irqs = io_channel; 8085 if (phba->cfg_fcp_io_channel > io_channel) 8086 phba->cfg_fcp_io_channel = io_channel; 8087 if (phba->cfg_nvme_io_channel > io_channel) 8088 phba->cfg_nvme_io_channel = io_channel; 8089 if (phba->nvmet_support) { 8090 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) 8091 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; 8092 } 8093 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8094 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8095 8096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8097 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", 8098 phba->io_channel_irqs, phba->cfg_fcp_io_channel, 8099 phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); 8100 8101 /* Get EQ depth from module parameter, fake the default for now */ 8102 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8103 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8104 8105 /* Get CQ depth from module parameter, fake the default for now */ 8106 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8107 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8108 return 0; 8109 } 8110 8111 static int 8112 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 8113 { 8114 struct lpfc_queue *qdesc; 8115 8116 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8117 phba->sli4_hba.cq_esize, 8118 LPFC_CQE_EXP_COUNT); 8119 if (!qdesc) { 8120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8121 "0508 Failed allocate fast-path NVME CQ (%d)\n", 8122 wqidx); 8123 return 1; 8124 } 8125 qdesc->qe_valid = 1; 8126 phba->sli4_hba.nvme_cq[wqidx] = qdesc; 8127 8128 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8129 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); 8130 if (!qdesc) { 8131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8132 "0509 Failed allocate fast-path NVME WQ (%d)\n", 8133 wqidx); 8134 return 1; 8135 } 8136 phba->sli4_hba.nvme_wq[wqidx] = qdesc; 8137 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8138 return 0; 8139 } 8140 8141 static int 8142 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 8143 { 8144 struct lpfc_queue *qdesc; 8145 uint32_t wqesize; 8146 8147 /* Create Fast Path FCP CQs */ 8148 if (phba->enab_exp_wqcq_pages) 8149 /* Increase the CQ size when WQEs contain an embedded cdb */ 8150 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8151 phba->sli4_hba.cq_esize, 8152 LPFC_CQE_EXP_COUNT); 8153 8154 else 8155 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8156 phba->sli4_hba.cq_esize, 8157 phba->sli4_hba.cq_ecount); 8158 if (!qdesc) { 8159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8160 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8161 return 1; 8162 } 8163 qdesc->qe_valid = 1; 8164 phba->sli4_hba.fcp_cq[wqidx] = qdesc; 8165 8166 /* Create Fast Path FCP WQs */ 8167 if (phba->enab_exp_wqcq_pages) { 8168 /* Increase the WQ size when WQEs contain an embedded cdb */ 8169 wqesize = (phba->fcp_embed_io) ? 8170 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8171 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8172 wqesize, 8173 LPFC_WQE_EXP_COUNT); 8174 } else 8175 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8176 phba->sli4_hba.wq_esize, 8177 phba->sli4_hba.wq_ecount); 8178 8179 if (!qdesc) { 8180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8181 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8182 wqidx); 8183 return 1; 8184 } 8185 phba->sli4_hba.fcp_wq[wqidx] = qdesc; 8186 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8187 return 0; 8188 } 8189 8190 /** 8191 * lpfc_sli4_queue_create - Create all the SLI4 queues 8192 * @phba: pointer to lpfc hba data structure. 8193 * 8194 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8195 * operation. For each SLI4 queue type, the parameters such as queue entry 8196 * count (queue depth) shall be taken from the module parameter. For now, 8197 * we just use some constant number as place holder. 8198 * 8199 * Return codes 8200 * 0 - successful 8201 * -ENOMEM - No availble memory 8202 * -EIO - The mailbox failed to complete successfully. 8203 **/ 8204 int 8205 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8206 { 8207 struct lpfc_queue *qdesc; 8208 int idx, io_channel; 8209 8210 /* 8211 * Create HBA Record arrays. 8212 * Both NVME and FCP will share that same vectors / EQs 8213 */ 8214 io_channel = phba->io_channel_irqs; 8215 if (!io_channel) 8216 return -ERANGE; 8217 8218 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8219 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8220 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8221 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8222 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8223 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8224 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8225 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8226 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8227 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8228 8229 phba->sli4_hba.hba_eq = kcalloc(io_channel, 8230 sizeof(struct lpfc_queue *), 8231 GFP_KERNEL); 8232 if (!phba->sli4_hba.hba_eq) { 8233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8234 "2576 Failed allocate memory for " 8235 "fast-path EQ record array\n"); 8236 goto out_error; 8237 } 8238 8239 if (phba->cfg_fcp_io_channel) { 8240 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, 8241 sizeof(struct lpfc_queue *), 8242 GFP_KERNEL); 8243 if (!phba->sli4_hba.fcp_cq) { 8244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8245 "2577 Failed allocate memory for " 8246 "fast-path CQ record array\n"); 8247 goto out_error; 8248 } 8249 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, 8250 sizeof(struct lpfc_queue *), 8251 GFP_KERNEL); 8252 if (!phba->sli4_hba.fcp_wq) { 8253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8254 "2578 Failed allocate memory for " 8255 "fast-path FCP WQ record array\n"); 8256 goto out_error; 8257 } 8258 /* 8259 * Since the first EQ can have multiple CQs associated with it, 8260 * this array is used to quickly see if we have a FCP fast-path 8261 * CQ match. 8262 */ 8263 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, 8264 sizeof(uint16_t), 8265 GFP_KERNEL); 8266 if (!phba->sli4_hba.fcp_cq_map) { 8267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8268 "2545 Failed allocate memory for " 8269 "fast-path CQ map\n"); 8270 goto out_error; 8271 } 8272 } 8273 8274 if (phba->cfg_nvme_io_channel) { 8275 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, 8276 sizeof(struct lpfc_queue *), 8277 GFP_KERNEL); 8278 if (!phba->sli4_hba.nvme_cq) { 8279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8280 "6077 Failed allocate memory for " 8281 "fast-path CQ record array\n"); 8282 goto out_error; 8283 } 8284 8285 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, 8286 sizeof(struct lpfc_queue *), 8287 GFP_KERNEL); 8288 if (!phba->sli4_hba.nvme_wq) { 8289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8290 "2581 Failed allocate memory for " 8291 "fast-path NVME WQ record array\n"); 8292 goto out_error; 8293 } 8294 8295 /* 8296 * Since the first EQ can have multiple CQs associated with it, 8297 * this array is used to quickly see if we have a NVME fast-path 8298 * CQ match. 8299 */ 8300 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, 8301 sizeof(uint16_t), 8302 GFP_KERNEL); 8303 if (!phba->sli4_hba.nvme_cq_map) { 8304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8305 "6078 Failed allocate memory for " 8306 "fast-path CQ map\n"); 8307 goto out_error; 8308 } 8309 8310 if (phba->nvmet_support) { 8311 phba->sli4_hba.nvmet_cqset = kcalloc( 8312 phba->cfg_nvmet_mrq, 8313 sizeof(struct lpfc_queue *), 8314 GFP_KERNEL); 8315 if (!phba->sli4_hba.nvmet_cqset) { 8316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8317 "3121 Fail allocate memory for " 8318 "fast-path CQ set array\n"); 8319 goto out_error; 8320 } 8321 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8322 phba->cfg_nvmet_mrq, 8323 sizeof(struct lpfc_queue *), 8324 GFP_KERNEL); 8325 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8327 "3122 Fail allocate memory for " 8328 "fast-path RQ set hdr array\n"); 8329 goto out_error; 8330 } 8331 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8332 phba->cfg_nvmet_mrq, 8333 sizeof(struct lpfc_queue *), 8334 GFP_KERNEL); 8335 if (!phba->sli4_hba.nvmet_mrq_data) { 8336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8337 "3124 Fail allocate memory for " 8338 "fast-path RQ set data array\n"); 8339 goto out_error; 8340 } 8341 } 8342 } 8343 8344 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8345 8346 /* Create HBA Event Queues (EQs) */ 8347 for (idx = 0; idx < io_channel; idx++) { 8348 /* Create EQs */ 8349 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8350 phba->sli4_hba.eq_esize, 8351 phba->sli4_hba.eq_ecount); 8352 if (!qdesc) { 8353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8354 "0497 Failed allocate EQ (%d)\n", idx); 8355 goto out_error; 8356 } 8357 qdesc->qe_valid = 1; 8358 phba->sli4_hba.hba_eq[idx] = qdesc; 8359 } 8360 8361 /* FCP and NVME io channels are not required to be balanced */ 8362 8363 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8364 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8365 goto out_error; 8366 8367 for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) 8368 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8369 goto out_error; 8370 8371 if (phba->nvmet_support) { 8372 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8373 qdesc = lpfc_sli4_queue_alloc(phba, 8374 LPFC_DEFAULT_PAGE_SIZE, 8375 phba->sli4_hba.cq_esize, 8376 phba->sli4_hba.cq_ecount); 8377 if (!qdesc) { 8378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8379 "3142 Failed allocate NVME " 8380 "CQ Set (%d)\n", idx); 8381 goto out_error; 8382 } 8383 qdesc->qe_valid = 1; 8384 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8385 } 8386 } 8387 8388 /* 8389 * Create Slow Path Completion Queues (CQs) 8390 */ 8391 8392 /* Create slow-path Mailbox Command Complete Queue */ 8393 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8394 phba->sli4_hba.cq_esize, 8395 phba->sli4_hba.cq_ecount); 8396 if (!qdesc) { 8397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8398 "0500 Failed allocate slow-path mailbox CQ\n"); 8399 goto out_error; 8400 } 8401 qdesc->qe_valid = 1; 8402 phba->sli4_hba.mbx_cq = qdesc; 8403 8404 /* Create slow-path ELS Complete Queue */ 8405 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8406 phba->sli4_hba.cq_esize, 8407 phba->sli4_hba.cq_ecount); 8408 if (!qdesc) { 8409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8410 "0501 Failed allocate slow-path ELS CQ\n"); 8411 goto out_error; 8412 } 8413 qdesc->qe_valid = 1; 8414 phba->sli4_hba.els_cq = qdesc; 8415 8416 8417 /* 8418 * Create Slow Path Work Queues (WQs) 8419 */ 8420 8421 /* Create Mailbox Command Queue */ 8422 8423 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8424 phba->sli4_hba.mq_esize, 8425 phba->sli4_hba.mq_ecount); 8426 if (!qdesc) { 8427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8428 "0505 Failed allocate slow-path MQ\n"); 8429 goto out_error; 8430 } 8431 phba->sli4_hba.mbx_wq = qdesc; 8432 8433 /* 8434 * Create ELS Work Queues 8435 */ 8436 8437 /* Create slow-path ELS Work Queue */ 8438 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8439 phba->sli4_hba.wq_esize, 8440 phba->sli4_hba.wq_ecount); 8441 if (!qdesc) { 8442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8443 "0504 Failed allocate slow-path ELS WQ\n"); 8444 goto out_error; 8445 } 8446 phba->sli4_hba.els_wq = qdesc; 8447 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8448 8449 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8450 /* Create NVME LS Complete Queue */ 8451 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8452 phba->sli4_hba.cq_esize, 8453 phba->sli4_hba.cq_ecount); 8454 if (!qdesc) { 8455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8456 "6079 Failed allocate NVME LS CQ\n"); 8457 goto out_error; 8458 } 8459 qdesc->qe_valid = 1; 8460 phba->sli4_hba.nvmels_cq = qdesc; 8461 8462 /* Create NVME LS Work Queue */ 8463 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8464 phba->sli4_hba.wq_esize, 8465 phba->sli4_hba.wq_ecount); 8466 if (!qdesc) { 8467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8468 "6080 Failed allocate NVME LS WQ\n"); 8469 goto out_error; 8470 } 8471 phba->sli4_hba.nvmels_wq = qdesc; 8472 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8473 } 8474 8475 /* 8476 * Create Receive Queue (RQ) 8477 */ 8478 8479 /* Create Receive Queue for header */ 8480 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8481 phba->sli4_hba.rq_esize, 8482 phba->sli4_hba.rq_ecount); 8483 if (!qdesc) { 8484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8485 "0506 Failed allocate receive HRQ\n"); 8486 goto out_error; 8487 } 8488 phba->sli4_hba.hdr_rq = qdesc; 8489 8490 /* Create Receive Queue for data */ 8491 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8492 phba->sli4_hba.rq_esize, 8493 phba->sli4_hba.rq_ecount); 8494 if (!qdesc) { 8495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8496 "0507 Failed allocate receive DRQ\n"); 8497 goto out_error; 8498 } 8499 phba->sli4_hba.dat_rq = qdesc; 8500 8501 if (phba->nvmet_support) { 8502 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8503 /* Create NVMET Receive Queue for header */ 8504 qdesc = lpfc_sli4_queue_alloc(phba, 8505 LPFC_DEFAULT_PAGE_SIZE, 8506 phba->sli4_hba.rq_esize, 8507 LPFC_NVMET_RQE_DEF_COUNT); 8508 if (!qdesc) { 8509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8510 "3146 Failed allocate " 8511 "receive HRQ\n"); 8512 goto out_error; 8513 } 8514 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 8515 8516 /* Only needed for header of RQ pair */ 8517 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), 8518 GFP_KERNEL); 8519 if (qdesc->rqbp == NULL) { 8520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8521 "6131 Failed allocate " 8522 "Header RQBP\n"); 8523 goto out_error; 8524 } 8525 8526 /* Put list in known state in case driver load fails. */ 8527 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 8528 8529 /* Create NVMET Receive Queue for data */ 8530 qdesc = lpfc_sli4_queue_alloc(phba, 8531 LPFC_DEFAULT_PAGE_SIZE, 8532 phba->sli4_hba.rq_esize, 8533 LPFC_NVMET_RQE_DEF_COUNT); 8534 if (!qdesc) { 8535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8536 "3156 Failed allocate " 8537 "receive DRQ\n"); 8538 goto out_error; 8539 } 8540 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 8541 } 8542 } 8543 8544 /* Create the Queues needed for Flash Optimized Fabric operations */ 8545 if (phba->cfg_fof) 8546 lpfc_fof_queue_create(phba); 8547 return 0; 8548 8549 out_error: 8550 lpfc_sli4_queue_destroy(phba); 8551 return -ENOMEM; 8552 } 8553 8554 static inline void 8555 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 8556 { 8557 if (*qp != NULL) { 8558 lpfc_sli4_queue_free(*qp); 8559 *qp = NULL; 8560 } 8561 } 8562 8563 static inline void 8564 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 8565 { 8566 int idx; 8567 8568 if (*qs == NULL) 8569 return; 8570 8571 for (idx = 0; idx < max; idx++) 8572 __lpfc_sli4_release_queue(&(*qs)[idx]); 8573 8574 kfree(*qs); 8575 *qs = NULL; 8576 } 8577 8578 static inline void 8579 lpfc_sli4_release_queue_map(uint16_t **qmap) 8580 { 8581 if (*qmap != NULL) { 8582 kfree(*qmap); 8583 *qmap = NULL; 8584 } 8585 } 8586 8587 /** 8588 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 8589 * @phba: pointer to lpfc hba data structure. 8590 * 8591 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 8592 * operation. 8593 * 8594 * Return codes 8595 * 0 - successful 8596 * -ENOMEM - No available memory 8597 * -EIO - The mailbox failed to complete successfully. 8598 **/ 8599 void 8600 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 8601 { 8602 if (phba->cfg_fof) 8603 lpfc_fof_queue_destroy(phba); 8604 8605 /* Release HBA eqs */ 8606 lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); 8607 8608 /* Release FCP cqs */ 8609 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, 8610 phba->cfg_fcp_io_channel); 8611 8612 /* Release FCP wqs */ 8613 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, 8614 phba->cfg_fcp_io_channel); 8615 8616 /* Release FCP CQ mapping array */ 8617 lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); 8618 8619 /* Release NVME cqs */ 8620 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, 8621 phba->cfg_nvme_io_channel); 8622 8623 /* Release NVME wqs */ 8624 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, 8625 phba->cfg_nvme_io_channel); 8626 8627 /* Release NVME CQ mapping array */ 8628 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); 8629 8630 if (phba->nvmet_support) { 8631 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 8632 phba->cfg_nvmet_mrq); 8633 8634 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 8635 phba->cfg_nvmet_mrq); 8636 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 8637 phba->cfg_nvmet_mrq); 8638 } 8639 8640 /* Release mailbox command work queue */ 8641 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 8642 8643 /* Release ELS work queue */ 8644 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 8645 8646 /* Release ELS work queue */ 8647 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 8648 8649 /* Release unsolicited receive queue */ 8650 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 8651 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 8652 8653 /* Release ELS complete queue */ 8654 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 8655 8656 /* Release NVME LS complete queue */ 8657 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 8658 8659 /* Release mailbox command complete queue */ 8660 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 8661 8662 /* Everything on this list has been freed */ 8663 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8664 } 8665 8666 int 8667 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 8668 { 8669 struct lpfc_rqb *rqbp; 8670 struct lpfc_dmabuf *h_buf; 8671 struct rqb_dmabuf *rqb_buffer; 8672 8673 rqbp = rq->rqbp; 8674 while (!list_empty(&rqbp->rqb_buffer_list)) { 8675 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 8676 struct lpfc_dmabuf, list); 8677 8678 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 8679 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 8680 rqbp->buffer_count--; 8681 } 8682 return 1; 8683 } 8684 8685 static int 8686 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 8687 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 8688 int qidx, uint32_t qtype) 8689 { 8690 struct lpfc_sli_ring *pring; 8691 int rc; 8692 8693 if (!eq || !cq || !wq) { 8694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8695 "6085 Fast-path %s (%d) not allocated\n", 8696 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 8697 return -ENOMEM; 8698 } 8699 8700 /* create the Cq first */ 8701 rc = lpfc_cq_create(phba, cq, eq, 8702 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 8703 if (rc) { 8704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8705 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 8706 qidx, (uint32_t)rc); 8707 return rc; 8708 } 8709 cq->chann = qidx; 8710 8711 if (qtype != LPFC_MBOX) { 8712 /* Setup nvme_cq_map for fast lookup */ 8713 if (cq_map) 8714 *cq_map = cq->queue_id; 8715 8716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8717 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 8718 qidx, cq->queue_id, qidx, eq->queue_id); 8719 8720 /* create the wq */ 8721 rc = lpfc_wq_create(phba, wq, cq, qtype); 8722 if (rc) { 8723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8724 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", 8725 qidx, (uint32_t)rc); 8726 /* no need to tear down cq - caller will do so */ 8727 return rc; 8728 } 8729 wq->chann = qidx; 8730 8731 /* Bind this CQ/WQ to the NVME ring */ 8732 pring = wq->pring; 8733 pring->sli.sli4.wqp = (void *)wq; 8734 cq->pring = pring; 8735 8736 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8737 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 8738 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 8739 } else { 8740 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 8741 if (rc) { 8742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8743 "0539 Failed setup of slow-path MQ: " 8744 "rc = 0x%x\n", rc); 8745 /* no need to tear down cq - caller will do so */ 8746 return rc; 8747 } 8748 8749 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8750 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 8751 phba->sli4_hba.mbx_wq->queue_id, 8752 phba->sli4_hba.mbx_cq->queue_id); 8753 } 8754 8755 return 0; 8756 } 8757 8758 /** 8759 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 8760 * @phba: pointer to lpfc hba data structure. 8761 * 8762 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 8763 * operation. 8764 * 8765 * Return codes 8766 * 0 - successful 8767 * -ENOMEM - No available memory 8768 * -EIO - The mailbox failed to complete successfully. 8769 **/ 8770 int 8771 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 8772 { 8773 uint32_t shdr_status, shdr_add_status; 8774 union lpfc_sli4_cfg_shdr *shdr; 8775 LPFC_MBOXQ_t *mboxq; 8776 int qidx; 8777 uint32_t length, io_channel; 8778 int rc = -ENOMEM; 8779 8780 /* Check for dual-ULP support */ 8781 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8782 if (!mboxq) { 8783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8784 "3249 Unable to allocate memory for " 8785 "QUERY_FW_CFG mailbox command\n"); 8786 return -ENOMEM; 8787 } 8788 length = (sizeof(struct lpfc_mbx_query_fw_config) - 8789 sizeof(struct lpfc_sli4_cfg_mhdr)); 8790 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8791 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 8792 length, LPFC_SLI4_MBX_EMBED); 8793 8794 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8795 8796 shdr = (union lpfc_sli4_cfg_shdr *) 8797 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8798 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8799 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8800 if (shdr_status || shdr_add_status || rc) { 8801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8802 "3250 QUERY_FW_CFG mailbox failed with status " 8803 "x%x add_status x%x, mbx status x%x\n", 8804 shdr_status, shdr_add_status, rc); 8805 if (rc != MBX_TIMEOUT) 8806 mempool_free(mboxq, phba->mbox_mem_pool); 8807 rc = -ENXIO; 8808 goto out_error; 8809 } 8810 8811 phba->sli4_hba.fw_func_mode = 8812 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 8813 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 8814 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 8815 phba->sli4_hba.physical_port = 8816 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 8817 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8818 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 8819 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 8820 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 8821 8822 if (rc != MBX_TIMEOUT) 8823 mempool_free(mboxq, phba->mbox_mem_pool); 8824 8825 /* 8826 * Set up HBA Event Queues (EQs) 8827 */ 8828 io_channel = phba->io_channel_irqs; 8829 8830 /* Set up HBA event queue */ 8831 if (io_channel && !phba->sli4_hba.hba_eq) { 8832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8833 "3147 Fast-path EQs not allocated\n"); 8834 rc = -ENOMEM; 8835 goto out_error; 8836 } 8837 for (qidx = 0; qidx < io_channel; qidx++) { 8838 if (!phba->sli4_hba.hba_eq[qidx]) { 8839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8840 "0522 Fast-path EQ (%d) not " 8841 "allocated\n", qidx); 8842 rc = -ENOMEM; 8843 goto out_destroy; 8844 } 8845 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], 8846 phba->cfg_fcp_imax); 8847 if (rc) { 8848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8849 "0523 Failed setup of fast-path EQ " 8850 "(%d), rc = 0x%x\n", qidx, 8851 (uint32_t)rc); 8852 goto out_destroy; 8853 } 8854 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8855 "2584 HBA EQ setup: queue[%d]-id=%d\n", 8856 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); 8857 } 8858 8859 if (phba->cfg_nvme_io_channel) { 8860 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { 8861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8862 "6084 Fast-path NVME %s array not allocated\n", 8863 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); 8864 rc = -ENOMEM; 8865 goto out_destroy; 8866 } 8867 8868 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { 8869 rc = lpfc_create_wq_cq(phba, 8870 phba->sli4_hba.hba_eq[ 8871 qidx % io_channel], 8872 phba->sli4_hba.nvme_cq[qidx], 8873 phba->sli4_hba.nvme_wq[qidx], 8874 &phba->sli4_hba.nvme_cq_map[qidx], 8875 qidx, LPFC_NVME); 8876 if (rc) { 8877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8878 "6123 Failed to setup fastpath " 8879 "NVME WQ/CQ (%d), rc = 0x%x\n", 8880 qidx, (uint32_t)rc); 8881 goto out_destroy; 8882 } 8883 } 8884 } 8885 8886 if (phba->cfg_fcp_io_channel) { 8887 /* Set up fast-path FCP Response Complete Queue */ 8888 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { 8889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8890 "3148 Fast-path FCP %s array not allocated\n", 8891 phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); 8892 rc = -ENOMEM; 8893 goto out_destroy; 8894 } 8895 8896 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { 8897 rc = lpfc_create_wq_cq(phba, 8898 phba->sli4_hba.hba_eq[ 8899 qidx % io_channel], 8900 phba->sli4_hba.fcp_cq[qidx], 8901 phba->sli4_hba.fcp_wq[qidx], 8902 &phba->sli4_hba.fcp_cq_map[qidx], 8903 qidx, LPFC_FCP); 8904 if (rc) { 8905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8906 "0535 Failed to setup fastpath " 8907 "FCP WQ/CQ (%d), rc = 0x%x\n", 8908 qidx, (uint32_t)rc); 8909 goto out_destroy; 8910 } 8911 } 8912 } 8913 8914 /* 8915 * Set up Slow Path Complete Queues (CQs) 8916 */ 8917 8918 /* Set up slow-path MBOX CQ/MQ */ 8919 8920 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 8921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8922 "0528 %s not allocated\n", 8923 phba->sli4_hba.mbx_cq ? 8924 "Mailbox WQ" : "Mailbox CQ"); 8925 rc = -ENOMEM; 8926 goto out_destroy; 8927 } 8928 8929 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8930 phba->sli4_hba.mbx_cq, 8931 phba->sli4_hba.mbx_wq, 8932 NULL, 0, LPFC_MBOX); 8933 if (rc) { 8934 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8935 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 8936 (uint32_t)rc); 8937 goto out_destroy; 8938 } 8939 if (phba->nvmet_support) { 8940 if (!phba->sli4_hba.nvmet_cqset) { 8941 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8942 "3165 Fast-path NVME CQ Set " 8943 "array not allocated\n"); 8944 rc = -ENOMEM; 8945 goto out_destroy; 8946 } 8947 if (phba->cfg_nvmet_mrq > 1) { 8948 rc = lpfc_cq_create_set(phba, 8949 phba->sli4_hba.nvmet_cqset, 8950 phba->sli4_hba.hba_eq, 8951 LPFC_WCQ, LPFC_NVMET); 8952 if (rc) { 8953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8954 "3164 Failed setup of NVME CQ " 8955 "Set, rc = 0x%x\n", 8956 (uint32_t)rc); 8957 goto out_destroy; 8958 } 8959 } else { 8960 /* Set up NVMET Receive Complete Queue */ 8961 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 8962 phba->sli4_hba.hba_eq[0], 8963 LPFC_WCQ, LPFC_NVMET); 8964 if (rc) { 8965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8966 "6089 Failed setup NVMET CQ: " 8967 "rc = 0x%x\n", (uint32_t)rc); 8968 goto out_destroy; 8969 } 8970 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 8971 8972 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8973 "6090 NVMET CQ setup: cq-id=%d, " 8974 "parent eq-id=%d\n", 8975 phba->sli4_hba.nvmet_cqset[0]->queue_id, 8976 phba->sli4_hba.hba_eq[0]->queue_id); 8977 } 8978 } 8979 8980 /* Set up slow-path ELS WQ/CQ */ 8981 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 8982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8983 "0530 ELS %s not allocated\n", 8984 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 8985 rc = -ENOMEM; 8986 goto out_destroy; 8987 } 8988 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8989 phba->sli4_hba.els_cq, 8990 phba->sli4_hba.els_wq, 8991 NULL, 0, LPFC_ELS); 8992 if (rc) { 8993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8994 "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 8995 (uint32_t)rc); 8996 goto out_destroy; 8997 } 8998 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8999 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9000 phba->sli4_hba.els_wq->queue_id, 9001 phba->sli4_hba.els_cq->queue_id); 9002 9003 if (phba->cfg_nvme_io_channel) { 9004 /* Set up NVME LS Complete Queue */ 9005 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9007 "6091 LS %s not allocated\n", 9008 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9009 rc = -ENOMEM; 9010 goto out_destroy; 9011 } 9012 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 9013 phba->sli4_hba.nvmels_cq, 9014 phba->sli4_hba.nvmels_wq, 9015 NULL, 0, LPFC_NVME_LS); 9016 if (rc) { 9017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9018 "0529 Failed setup of NVVME LS WQ/CQ: " 9019 "rc = 0x%x\n", (uint32_t)rc); 9020 goto out_destroy; 9021 } 9022 9023 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9024 "6096 ELS WQ setup: wq-id=%d, " 9025 "parent cq-id=%d\n", 9026 phba->sli4_hba.nvmels_wq->queue_id, 9027 phba->sli4_hba.nvmels_cq->queue_id); 9028 } 9029 9030 /* 9031 * Create NVMET Receive Queue (RQ) 9032 */ 9033 if (phba->nvmet_support) { 9034 if ((!phba->sli4_hba.nvmet_cqset) || 9035 (!phba->sli4_hba.nvmet_mrq_hdr) || 9036 (!phba->sli4_hba.nvmet_mrq_data)) { 9037 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9038 "6130 MRQ CQ Queues not " 9039 "allocated\n"); 9040 rc = -ENOMEM; 9041 goto out_destroy; 9042 } 9043 if (phba->cfg_nvmet_mrq > 1) { 9044 rc = lpfc_mrq_create(phba, 9045 phba->sli4_hba.nvmet_mrq_hdr, 9046 phba->sli4_hba.nvmet_mrq_data, 9047 phba->sli4_hba.nvmet_cqset, 9048 LPFC_NVMET); 9049 if (rc) { 9050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9051 "6098 Failed setup of NVMET " 9052 "MRQ: rc = 0x%x\n", 9053 (uint32_t)rc); 9054 goto out_destroy; 9055 } 9056 9057 } else { 9058 rc = lpfc_rq_create(phba, 9059 phba->sli4_hba.nvmet_mrq_hdr[0], 9060 phba->sli4_hba.nvmet_mrq_data[0], 9061 phba->sli4_hba.nvmet_cqset[0], 9062 LPFC_NVMET); 9063 if (rc) { 9064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9065 "6057 Failed setup of NVMET " 9066 "Receive Queue: rc = 0x%x\n", 9067 (uint32_t)rc); 9068 goto out_destroy; 9069 } 9070 9071 lpfc_printf_log( 9072 phba, KERN_INFO, LOG_INIT, 9073 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9074 "dat-rq-id=%d parent cq-id=%d\n", 9075 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9076 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9077 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9078 9079 } 9080 } 9081 9082 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9084 "0540 Receive Queue not allocated\n"); 9085 rc = -ENOMEM; 9086 goto out_destroy; 9087 } 9088 9089 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9090 phba->sli4_hba.els_cq, LPFC_USOL); 9091 if (rc) { 9092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9093 "0541 Failed setup of Receive Queue: " 9094 "rc = 0x%x\n", (uint32_t)rc); 9095 goto out_destroy; 9096 } 9097 9098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9099 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9100 "parent cq-id=%d\n", 9101 phba->sli4_hba.hdr_rq->queue_id, 9102 phba->sli4_hba.dat_rq->queue_id, 9103 phba->sli4_hba.els_cq->queue_id); 9104 9105 if (phba->cfg_fof) { 9106 rc = lpfc_fof_queue_setup(phba); 9107 if (rc) { 9108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9109 "0549 Failed setup of FOF Queues: " 9110 "rc = 0x%x\n", rc); 9111 goto out_destroy; 9112 } 9113 } 9114 9115 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9116 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9117 phba->cfg_fcp_imax); 9118 9119 return 0; 9120 9121 out_destroy: 9122 lpfc_sli4_queue_unset(phba); 9123 out_error: 9124 return rc; 9125 } 9126 9127 /** 9128 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9129 * @phba: pointer to lpfc hba data structure. 9130 * 9131 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9132 * operation. 9133 * 9134 * Return codes 9135 * 0 - successful 9136 * -ENOMEM - No available memory 9137 * -EIO - The mailbox failed to complete successfully. 9138 **/ 9139 void 9140 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9141 { 9142 int qidx; 9143 9144 /* Unset the queues created for Flash Optimized Fabric operations */ 9145 if (phba->cfg_fof) 9146 lpfc_fof_queue_destroy(phba); 9147 9148 /* Unset mailbox command work queue */ 9149 if (phba->sli4_hba.mbx_wq) 9150 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9151 9152 /* Unset NVME LS work queue */ 9153 if (phba->sli4_hba.nvmels_wq) 9154 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9155 9156 /* Unset ELS work queue */ 9157 if (phba->sli4_hba.els_wq) 9158 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9159 9160 /* Unset unsolicited receive queue */ 9161 if (phba->sli4_hba.hdr_rq) 9162 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9163 phba->sli4_hba.dat_rq); 9164 9165 /* Unset FCP work queue */ 9166 if (phba->sli4_hba.fcp_wq) 9167 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 9168 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); 9169 9170 /* Unset NVME work queue */ 9171 if (phba->sli4_hba.nvme_wq) { 9172 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 9173 lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); 9174 } 9175 9176 /* Unset mailbox command complete queue */ 9177 if (phba->sli4_hba.mbx_cq) 9178 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9179 9180 /* Unset ELS complete queue */ 9181 if (phba->sli4_hba.els_cq) 9182 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9183 9184 /* Unset NVME LS complete queue */ 9185 if (phba->sli4_hba.nvmels_cq) 9186 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9187 9188 /* Unset NVME response complete queue */ 9189 if (phba->sli4_hba.nvme_cq) 9190 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 9191 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); 9192 9193 if (phba->nvmet_support) { 9194 /* Unset NVMET MRQ queue */ 9195 if (phba->sli4_hba.nvmet_mrq_hdr) { 9196 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9197 lpfc_rq_destroy( 9198 phba, 9199 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9200 phba->sli4_hba.nvmet_mrq_data[qidx]); 9201 } 9202 9203 /* Unset NVMET CQ Set complete queue */ 9204 if (phba->sli4_hba.nvmet_cqset) { 9205 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9206 lpfc_cq_destroy( 9207 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9208 } 9209 } 9210 9211 /* Unset FCP response complete queue */ 9212 if (phba->sli4_hba.fcp_cq) 9213 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 9214 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); 9215 9216 /* Unset fast-path event queue */ 9217 if (phba->sli4_hba.hba_eq) 9218 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 9219 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); 9220 } 9221 9222 /** 9223 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9224 * @phba: pointer to lpfc hba data structure. 9225 * 9226 * This routine is invoked to allocate and set up a pool of completion queue 9227 * events. The body of the completion queue event is a completion queue entry 9228 * CQE. For now, this pool is used for the interrupt service routine to queue 9229 * the following HBA completion queue events for the worker thread to process: 9230 * - Mailbox asynchronous events 9231 * - Receive queue completion unsolicited events 9232 * Later, this can be used for all the slow-path events. 9233 * 9234 * Return codes 9235 * 0 - successful 9236 * -ENOMEM - No available memory 9237 **/ 9238 static int 9239 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9240 { 9241 struct lpfc_cq_event *cq_event; 9242 int i; 9243 9244 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9245 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9246 if (!cq_event) 9247 goto out_pool_create_fail; 9248 list_add_tail(&cq_event->list, 9249 &phba->sli4_hba.sp_cqe_event_pool); 9250 } 9251 return 0; 9252 9253 out_pool_create_fail: 9254 lpfc_sli4_cq_event_pool_destroy(phba); 9255 return -ENOMEM; 9256 } 9257 9258 /** 9259 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9260 * @phba: pointer to lpfc hba data structure. 9261 * 9262 * This routine is invoked to free the pool of completion queue events at 9263 * driver unload time. Note that, it is the responsibility of the driver 9264 * cleanup routine to free all the outstanding completion-queue events 9265 * allocated from this pool back into the pool before invoking this routine 9266 * to destroy the pool. 9267 **/ 9268 static void 9269 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9270 { 9271 struct lpfc_cq_event *cq_event, *next_cq_event; 9272 9273 list_for_each_entry_safe(cq_event, next_cq_event, 9274 &phba->sli4_hba.sp_cqe_event_pool, list) { 9275 list_del(&cq_event->list); 9276 kfree(cq_event); 9277 } 9278 } 9279 9280 /** 9281 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9282 * @phba: pointer to lpfc hba data structure. 9283 * 9284 * This routine is the lock free version of the API invoked to allocate a 9285 * completion-queue event from the free pool. 9286 * 9287 * Return: Pointer to the newly allocated completion-queue event if successful 9288 * NULL otherwise. 9289 **/ 9290 struct lpfc_cq_event * 9291 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9292 { 9293 struct lpfc_cq_event *cq_event = NULL; 9294 9295 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9296 struct lpfc_cq_event, list); 9297 return cq_event; 9298 } 9299 9300 /** 9301 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9302 * @phba: pointer to lpfc hba data structure. 9303 * 9304 * This routine is the lock version of the API invoked to allocate a 9305 * completion-queue event from the free pool. 9306 * 9307 * Return: Pointer to the newly allocated completion-queue event if successful 9308 * NULL otherwise. 9309 **/ 9310 struct lpfc_cq_event * 9311 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9312 { 9313 struct lpfc_cq_event *cq_event; 9314 unsigned long iflags; 9315 9316 spin_lock_irqsave(&phba->hbalock, iflags); 9317 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9318 spin_unlock_irqrestore(&phba->hbalock, iflags); 9319 return cq_event; 9320 } 9321 9322 /** 9323 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9324 * @phba: pointer to lpfc hba data structure. 9325 * @cq_event: pointer to the completion queue event to be freed. 9326 * 9327 * This routine is the lock free version of the API invoked to release a 9328 * completion-queue event back into the free pool. 9329 **/ 9330 void 9331 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9332 struct lpfc_cq_event *cq_event) 9333 { 9334 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 9335 } 9336 9337 /** 9338 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9339 * @phba: pointer to lpfc hba data structure. 9340 * @cq_event: pointer to the completion queue event to be freed. 9341 * 9342 * This routine is the lock version of the API invoked to release a 9343 * completion-queue event back into the free pool. 9344 **/ 9345 void 9346 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9347 struct lpfc_cq_event *cq_event) 9348 { 9349 unsigned long iflags; 9350 spin_lock_irqsave(&phba->hbalock, iflags); 9351 __lpfc_sli4_cq_event_release(phba, cq_event); 9352 spin_unlock_irqrestore(&phba->hbalock, iflags); 9353 } 9354 9355 /** 9356 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9357 * @phba: pointer to lpfc hba data structure. 9358 * 9359 * This routine is to free all the pending completion-queue events to the 9360 * back into the free pool for device reset. 9361 **/ 9362 static void 9363 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 9364 { 9365 LIST_HEAD(cqelist); 9366 struct lpfc_cq_event *cqe; 9367 unsigned long iflags; 9368 9369 /* Retrieve all the pending WCQEs from pending WCQE lists */ 9370 spin_lock_irqsave(&phba->hbalock, iflags); 9371 /* Pending FCP XRI abort events */ 9372 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9373 &cqelist); 9374 /* Pending ELS XRI abort events */ 9375 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9376 &cqelist); 9377 /* Pending asynnc events */ 9378 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9379 &cqelist); 9380 spin_unlock_irqrestore(&phba->hbalock, iflags); 9381 9382 while (!list_empty(&cqelist)) { 9383 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 9384 lpfc_sli4_cq_event_release(phba, cqe); 9385 } 9386 } 9387 9388 /** 9389 * lpfc_pci_function_reset - Reset pci function. 9390 * @phba: pointer to lpfc hba data structure. 9391 * 9392 * This routine is invoked to request a PCI function reset. It will destroys 9393 * all resources assigned to the PCI function which originates this request. 9394 * 9395 * Return codes 9396 * 0 - successful 9397 * -ENOMEM - No available memory 9398 * -EIO - The mailbox failed to complete successfully. 9399 **/ 9400 int 9401 lpfc_pci_function_reset(struct lpfc_hba *phba) 9402 { 9403 LPFC_MBOXQ_t *mboxq; 9404 uint32_t rc = 0, if_type; 9405 uint32_t shdr_status, shdr_add_status; 9406 uint32_t rdy_chk; 9407 uint32_t port_reset = 0; 9408 union lpfc_sli4_cfg_shdr *shdr; 9409 struct lpfc_register reg_data; 9410 uint16_t devid; 9411 9412 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9413 switch (if_type) { 9414 case LPFC_SLI_INTF_IF_TYPE_0: 9415 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 9416 GFP_KERNEL); 9417 if (!mboxq) { 9418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9419 "0494 Unable to allocate memory for " 9420 "issuing SLI_FUNCTION_RESET mailbox " 9421 "command\n"); 9422 return -ENOMEM; 9423 } 9424 9425 /* Setup PCI function reset mailbox-ioctl command */ 9426 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9427 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 9428 LPFC_SLI4_MBX_EMBED); 9429 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9430 shdr = (union lpfc_sli4_cfg_shdr *) 9431 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9432 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9433 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 9434 &shdr->response); 9435 if (rc != MBX_TIMEOUT) 9436 mempool_free(mboxq, phba->mbox_mem_pool); 9437 if (shdr_status || shdr_add_status || rc) { 9438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9439 "0495 SLI_FUNCTION_RESET mailbox " 9440 "failed with status x%x add_status x%x," 9441 " mbx status x%x\n", 9442 shdr_status, shdr_add_status, rc); 9443 rc = -ENXIO; 9444 } 9445 break; 9446 case LPFC_SLI_INTF_IF_TYPE_2: 9447 case LPFC_SLI_INTF_IF_TYPE_6: 9448 wait: 9449 /* 9450 * Poll the Port Status Register and wait for RDY for 9451 * up to 30 seconds. If the port doesn't respond, treat 9452 * it as an error. 9453 */ 9454 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 9455 if (lpfc_readl(phba->sli4_hba.u.if_type2. 9456 STATUSregaddr, ®_data.word0)) { 9457 rc = -ENODEV; 9458 goto out; 9459 } 9460 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 9461 break; 9462 msleep(20); 9463 } 9464 9465 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 9466 phba->work_status[0] = readl( 9467 phba->sli4_hba.u.if_type2.ERR1regaddr); 9468 phba->work_status[1] = readl( 9469 phba->sli4_hba.u.if_type2.ERR2regaddr); 9470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9471 "2890 Port not ready, port status reg " 9472 "0x%x error 1=0x%x, error 2=0x%x\n", 9473 reg_data.word0, 9474 phba->work_status[0], 9475 phba->work_status[1]); 9476 rc = -ENODEV; 9477 goto out; 9478 } 9479 9480 if (!port_reset) { 9481 /* 9482 * Reset the port now 9483 */ 9484 reg_data.word0 = 0; 9485 bf_set(lpfc_sliport_ctrl_end, ®_data, 9486 LPFC_SLIPORT_LITTLE_ENDIAN); 9487 bf_set(lpfc_sliport_ctrl_ip, ®_data, 9488 LPFC_SLIPORT_INIT_PORT); 9489 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 9490 CTRLregaddr); 9491 /* flush */ 9492 pci_read_config_word(phba->pcidev, 9493 PCI_DEVICE_ID, &devid); 9494 9495 port_reset = 1; 9496 msleep(20); 9497 goto wait; 9498 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 9499 rc = -ENODEV; 9500 goto out; 9501 } 9502 break; 9503 9504 case LPFC_SLI_INTF_IF_TYPE_1: 9505 default: 9506 break; 9507 } 9508 9509 out: 9510 /* Catch the not-ready port failure after a port reset. */ 9511 if (rc) { 9512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9513 "3317 HBA not functional: IP Reset Failed " 9514 "try: echo fw_reset > board_mode\n"); 9515 rc = -ENODEV; 9516 } 9517 9518 return rc; 9519 } 9520 9521 /** 9522 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 9523 * @phba: pointer to lpfc hba data structure. 9524 * 9525 * This routine is invoked to set up the PCI device memory space for device 9526 * with SLI-4 interface spec. 9527 * 9528 * Return codes 9529 * 0 - successful 9530 * other values - error 9531 **/ 9532 static int 9533 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 9534 { 9535 struct pci_dev *pdev; 9536 unsigned long bar0map_len, bar1map_len, bar2map_len; 9537 int error = -ENODEV; 9538 uint32_t if_type; 9539 9540 /* Obtain PCI device reference */ 9541 if (!phba->pcidev) 9542 return error; 9543 else 9544 pdev = phba->pcidev; 9545 9546 /* Set the device DMA mask size */ 9547 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 9548 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 9549 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 9550 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 9551 return error; 9552 } 9553 } 9554 9555 /* 9556 * The BARs and register set definitions and offset locations are 9557 * dependent on the if_type. 9558 */ 9559 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 9560 &phba->sli4_hba.sli_intf.word0)) { 9561 return error; 9562 } 9563 9564 /* There is no SLI3 failback for SLI4 devices. */ 9565 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 9566 LPFC_SLI_INTF_VALID) { 9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9568 "2894 SLI_INTF reg contents invalid " 9569 "sli_intf reg 0x%x\n", 9570 phba->sli4_hba.sli_intf.word0); 9571 return error; 9572 } 9573 9574 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9575 /* 9576 * Get the bus address of SLI4 device Bar regions and the 9577 * number of bytes required by each mapping. The mapping of the 9578 * particular PCI BARs regions is dependent on the type of 9579 * SLI4 device. 9580 */ 9581 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 9582 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 9583 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 9584 9585 /* 9586 * Map SLI4 PCI Config Space Register base to a kernel virtual 9587 * addr 9588 */ 9589 phba->sli4_hba.conf_regs_memmap_p = 9590 ioremap(phba->pci_bar0_map, bar0map_len); 9591 if (!phba->sli4_hba.conf_regs_memmap_p) { 9592 dev_printk(KERN_ERR, &pdev->dev, 9593 "ioremap failed for SLI4 PCI config " 9594 "registers.\n"); 9595 goto out; 9596 } 9597 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 9598 /* Set up BAR0 PCI config space register memory map */ 9599 lpfc_sli4_bar0_register_memmap(phba, if_type); 9600 } else { 9601 phba->pci_bar0_map = pci_resource_start(pdev, 1); 9602 bar0map_len = pci_resource_len(pdev, 1); 9603 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9604 dev_printk(KERN_ERR, &pdev->dev, 9605 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 9606 goto out; 9607 } 9608 phba->sli4_hba.conf_regs_memmap_p = 9609 ioremap(phba->pci_bar0_map, bar0map_len); 9610 if (!phba->sli4_hba.conf_regs_memmap_p) { 9611 dev_printk(KERN_ERR, &pdev->dev, 9612 "ioremap failed for SLI4 PCI config " 9613 "registers.\n"); 9614 goto out; 9615 } 9616 lpfc_sli4_bar0_register_memmap(phba, if_type); 9617 } 9618 9619 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 9620 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 9621 /* 9622 * Map SLI4 if type 0 HBA Control Register base to a 9623 * kernel virtual address and setup the registers. 9624 */ 9625 phba->pci_bar1_map = pci_resource_start(pdev, 9626 PCI_64BIT_BAR2); 9627 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 9628 phba->sli4_hba.ctrl_regs_memmap_p = 9629 ioremap(phba->pci_bar1_map, 9630 bar1map_len); 9631 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 9632 dev_err(&pdev->dev, 9633 "ioremap failed for SLI4 HBA " 9634 "control registers.\n"); 9635 error = -ENOMEM; 9636 goto out_iounmap_conf; 9637 } 9638 phba->pci_bar2_memmap_p = 9639 phba->sli4_hba.ctrl_regs_memmap_p; 9640 lpfc_sli4_bar1_register_memmap(phba, if_type); 9641 } else { 9642 error = -ENOMEM; 9643 goto out_iounmap_conf; 9644 } 9645 } 9646 9647 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 9648 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 9649 /* 9650 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 9651 * virtual address and setup the registers. 9652 */ 9653 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 9654 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 9655 phba->sli4_hba.drbl_regs_memmap_p = 9656 ioremap(phba->pci_bar1_map, bar1map_len); 9657 if (!phba->sli4_hba.drbl_regs_memmap_p) { 9658 dev_err(&pdev->dev, 9659 "ioremap failed for SLI4 HBA doorbell registers.\n"); 9660 goto out_iounmap_conf; 9661 } 9662 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 9663 lpfc_sli4_bar1_register_memmap(phba, if_type); 9664 } 9665 9666 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 9667 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 9668 /* 9669 * Map SLI4 if type 0 HBA Doorbell Register base to 9670 * a kernel virtual address and setup the registers. 9671 */ 9672 phba->pci_bar2_map = pci_resource_start(pdev, 9673 PCI_64BIT_BAR4); 9674 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 9675 phba->sli4_hba.drbl_regs_memmap_p = 9676 ioremap(phba->pci_bar2_map, 9677 bar2map_len); 9678 if (!phba->sli4_hba.drbl_regs_memmap_p) { 9679 dev_err(&pdev->dev, 9680 "ioremap failed for SLI4 HBA" 9681 " doorbell registers.\n"); 9682 error = -ENOMEM; 9683 goto out_iounmap_ctrl; 9684 } 9685 phba->pci_bar4_memmap_p = 9686 phba->sli4_hba.drbl_regs_memmap_p; 9687 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 9688 if (error) 9689 goto out_iounmap_all; 9690 } else { 9691 error = -ENOMEM; 9692 goto out_iounmap_all; 9693 } 9694 } 9695 9696 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 9697 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 9698 /* 9699 * Map SLI4 if type 6 HBA DPP Register base to a kernel 9700 * virtual address and setup the registers. 9701 */ 9702 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 9703 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 9704 phba->sli4_hba.dpp_regs_memmap_p = 9705 ioremap(phba->pci_bar2_map, bar2map_len); 9706 if (!phba->sli4_hba.dpp_regs_memmap_p) { 9707 dev_err(&pdev->dev, 9708 "ioremap failed for SLI4 HBA dpp registers.\n"); 9709 goto out_iounmap_ctrl; 9710 } 9711 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 9712 } 9713 9714 /* Set up the EQ/CQ register handeling functions now */ 9715 switch (if_type) { 9716 case LPFC_SLI_INTF_IF_TYPE_0: 9717 case LPFC_SLI_INTF_IF_TYPE_2: 9718 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 9719 phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release; 9720 phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release; 9721 break; 9722 case LPFC_SLI_INTF_IF_TYPE_6: 9723 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 9724 phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release; 9725 phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release; 9726 break; 9727 default: 9728 break; 9729 } 9730 9731 return 0; 9732 9733 out_iounmap_all: 9734 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9735 out_iounmap_ctrl: 9736 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 9737 out_iounmap_conf: 9738 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9739 out: 9740 return error; 9741 } 9742 9743 /** 9744 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 9745 * @phba: pointer to lpfc hba data structure. 9746 * 9747 * This routine is invoked to unset the PCI device memory space for device 9748 * with SLI-4 interface spec. 9749 **/ 9750 static void 9751 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 9752 { 9753 uint32_t if_type; 9754 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9755 9756 switch (if_type) { 9757 case LPFC_SLI_INTF_IF_TYPE_0: 9758 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9759 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 9760 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9761 break; 9762 case LPFC_SLI_INTF_IF_TYPE_2: 9763 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9764 break; 9765 case LPFC_SLI_INTF_IF_TYPE_6: 9766 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9767 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9768 break; 9769 case LPFC_SLI_INTF_IF_TYPE_1: 9770 default: 9771 dev_printk(KERN_ERR, &phba->pcidev->dev, 9772 "FATAL - unsupported SLI4 interface type - %d\n", 9773 if_type); 9774 break; 9775 } 9776 } 9777 9778 /** 9779 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 9780 * @phba: pointer to lpfc hba data structure. 9781 * 9782 * This routine is invoked to enable the MSI-X interrupt vectors to device 9783 * with SLI-3 interface specs. 9784 * 9785 * Return codes 9786 * 0 - successful 9787 * other values - error 9788 **/ 9789 static int 9790 lpfc_sli_enable_msix(struct lpfc_hba *phba) 9791 { 9792 int rc; 9793 LPFC_MBOXQ_t *pmb; 9794 9795 /* Set up MSI-X multi-message vectors */ 9796 rc = pci_alloc_irq_vectors(phba->pcidev, 9797 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 9798 if (rc < 0) { 9799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9800 "0420 PCI enable MSI-X failed (%d)\n", rc); 9801 goto vec_fail_out; 9802 } 9803 9804 /* 9805 * Assign MSI-X vectors to interrupt handlers 9806 */ 9807 9808 /* vector-0 is associated to slow-path handler */ 9809 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 9810 &lpfc_sli_sp_intr_handler, 0, 9811 LPFC_SP_DRIVER_HANDLER_NAME, phba); 9812 if (rc) { 9813 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9814 "0421 MSI-X slow-path request_irq failed " 9815 "(%d)\n", rc); 9816 goto msi_fail_out; 9817 } 9818 9819 /* vector-1 is associated to fast-path handler */ 9820 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 9821 &lpfc_sli_fp_intr_handler, 0, 9822 LPFC_FP_DRIVER_HANDLER_NAME, phba); 9823 9824 if (rc) { 9825 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9826 "0429 MSI-X fast-path request_irq failed " 9827 "(%d)\n", rc); 9828 goto irq_fail_out; 9829 } 9830 9831 /* 9832 * Configure HBA MSI-X attention conditions to messages 9833 */ 9834 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9835 9836 if (!pmb) { 9837 rc = -ENOMEM; 9838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9839 "0474 Unable to allocate memory for issuing " 9840 "MBOX_CONFIG_MSI command\n"); 9841 goto mem_fail_out; 9842 } 9843 rc = lpfc_config_msi(phba, pmb); 9844 if (rc) 9845 goto mbx_fail_out; 9846 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9847 if (rc != MBX_SUCCESS) { 9848 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 9849 "0351 Config MSI mailbox command failed, " 9850 "mbxCmd x%x, mbxStatus x%x\n", 9851 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 9852 goto mbx_fail_out; 9853 } 9854 9855 /* Free memory allocated for mailbox command */ 9856 mempool_free(pmb, phba->mbox_mem_pool); 9857 return rc; 9858 9859 mbx_fail_out: 9860 /* Free memory allocated for mailbox command */ 9861 mempool_free(pmb, phba->mbox_mem_pool); 9862 9863 mem_fail_out: 9864 /* free the irq already requested */ 9865 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 9866 9867 irq_fail_out: 9868 /* free the irq already requested */ 9869 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 9870 9871 msi_fail_out: 9872 /* Unconfigure MSI-X capability structure */ 9873 pci_free_irq_vectors(phba->pcidev); 9874 9875 vec_fail_out: 9876 return rc; 9877 } 9878 9879 /** 9880 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 9881 * @phba: pointer to lpfc hba data structure. 9882 * 9883 * This routine is invoked to enable the MSI interrupt mode to device with 9884 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 9885 * enable the MSI vector. The device driver is responsible for calling the 9886 * request_irq() to register MSI vector with a interrupt the handler, which 9887 * is done in this function. 9888 * 9889 * Return codes 9890 * 0 - successful 9891 * other values - error 9892 */ 9893 static int 9894 lpfc_sli_enable_msi(struct lpfc_hba *phba) 9895 { 9896 int rc; 9897 9898 rc = pci_enable_msi(phba->pcidev); 9899 if (!rc) 9900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9901 "0462 PCI enable MSI mode success.\n"); 9902 else { 9903 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9904 "0471 PCI enable MSI mode failed (%d)\n", rc); 9905 return rc; 9906 } 9907 9908 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 9909 0, LPFC_DRIVER_NAME, phba); 9910 if (rc) { 9911 pci_disable_msi(phba->pcidev); 9912 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9913 "0478 MSI request_irq failed (%d)\n", rc); 9914 } 9915 return rc; 9916 } 9917 9918 /** 9919 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 9920 * @phba: pointer to lpfc hba data structure. 9921 * 9922 * This routine is invoked to enable device interrupt and associate driver's 9923 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 9924 * spec. Depends on the interrupt mode configured to the driver, the driver 9925 * will try to fallback from the configured interrupt mode to an interrupt 9926 * mode which is supported by the platform, kernel, and device in the order 9927 * of: 9928 * MSI-X -> MSI -> IRQ. 9929 * 9930 * Return codes 9931 * 0 - successful 9932 * other values - error 9933 **/ 9934 static uint32_t 9935 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9936 { 9937 uint32_t intr_mode = LPFC_INTR_ERROR; 9938 int retval; 9939 9940 if (cfg_mode == 2) { 9941 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 9942 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 9943 if (!retval) { 9944 /* Now, try to enable MSI-X interrupt mode */ 9945 retval = lpfc_sli_enable_msix(phba); 9946 if (!retval) { 9947 /* Indicate initialization to MSI-X mode */ 9948 phba->intr_type = MSIX; 9949 intr_mode = 2; 9950 } 9951 } 9952 } 9953 9954 /* Fallback to MSI if MSI-X initialization failed */ 9955 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9956 retval = lpfc_sli_enable_msi(phba); 9957 if (!retval) { 9958 /* Indicate initialization to MSI mode */ 9959 phba->intr_type = MSI; 9960 intr_mode = 1; 9961 } 9962 } 9963 9964 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9965 if (phba->intr_type == NONE) { 9966 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 9967 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9968 if (!retval) { 9969 /* Indicate initialization to INTx mode */ 9970 phba->intr_type = INTx; 9971 intr_mode = 0; 9972 } 9973 } 9974 return intr_mode; 9975 } 9976 9977 /** 9978 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 9979 * @phba: pointer to lpfc hba data structure. 9980 * 9981 * This routine is invoked to disable device interrupt and disassociate the 9982 * driver's interrupt handler(s) from interrupt vector(s) to device with 9983 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 9984 * release the interrupt vector(s) for the message signaled interrupt. 9985 **/ 9986 static void 9987 lpfc_sli_disable_intr(struct lpfc_hba *phba) 9988 { 9989 int nr_irqs, i; 9990 9991 if (phba->intr_type == MSIX) 9992 nr_irqs = LPFC_MSIX_VECTORS; 9993 else 9994 nr_irqs = 1; 9995 9996 for (i = 0; i < nr_irqs; i++) 9997 free_irq(pci_irq_vector(phba->pcidev, i), phba); 9998 pci_free_irq_vectors(phba->pcidev); 9999 10000 /* Reset interrupt management states */ 10001 phba->intr_type = NONE; 10002 phba->sli.slistat.sli_intr = 0; 10003 } 10004 10005 /** 10006 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10007 * @phba: pointer to lpfc hba data structure. 10008 * @vectors: number of msix vectors allocated. 10009 * 10010 * The routine will figure out the CPU affinity assignment for every 10011 * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated 10012 * with a pointer to the CPU mask that defines ALL the CPUs this vector 10013 * can be associated with. If the vector can be unquely associated with 10014 * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. 10015 * In addition, the CPU to IO channel mapping will be calculated 10016 * and the phba->sli4_hba.cpu_map array will reflect this. 10017 */ 10018 static void 10019 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10020 { 10021 struct lpfc_vector_map_info *cpup; 10022 int index = 0; 10023 int vec = 0; 10024 int cpu; 10025 #ifdef CONFIG_X86 10026 struct cpuinfo_x86 *cpuinfo; 10027 #endif 10028 10029 /* Init cpu_map array */ 10030 memset(phba->sli4_hba.cpu_map, 0xff, 10031 (sizeof(struct lpfc_vector_map_info) * 10032 phba->sli4_hba.num_present_cpu)); 10033 10034 /* Update CPU map with physical id and core id of each CPU */ 10035 cpup = phba->sli4_hba.cpu_map; 10036 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 10037 #ifdef CONFIG_X86 10038 cpuinfo = &cpu_data(cpu); 10039 cpup->phys_id = cpuinfo->phys_proc_id; 10040 cpup->core_id = cpuinfo->cpu_core_id; 10041 #else 10042 /* No distinction between CPUs for other platforms */ 10043 cpup->phys_id = 0; 10044 cpup->core_id = 0; 10045 #endif 10046 cpup->channel_id = index; /* For now round robin */ 10047 cpup->irq = pci_irq_vector(phba->pcidev, vec); 10048 vec++; 10049 if (vec >= vectors) 10050 vec = 0; 10051 index++; 10052 if (index >= phba->cfg_fcp_io_channel) 10053 index = 0; 10054 cpup++; 10055 } 10056 } 10057 10058 10059 /** 10060 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 10061 * @phba: pointer to lpfc hba data structure. 10062 * 10063 * This routine is invoked to enable the MSI-X interrupt vectors to device 10064 * with SLI-4 interface spec. 10065 * 10066 * Return codes 10067 * 0 - successful 10068 * other values - error 10069 **/ 10070 static int 10071 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 10072 { 10073 int vectors, rc, index; 10074 char *name; 10075 10076 /* Set up MSI-X multi-message vectors */ 10077 vectors = phba->io_channel_irqs; 10078 if (phba->cfg_fof) 10079 vectors++; 10080 10081 rc = pci_alloc_irq_vectors(phba->pcidev, 10082 (phba->nvmet_support) ? 1 : 2, 10083 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 10084 if (rc < 0) { 10085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10086 "0484 PCI enable MSI-X failed (%d)\n", rc); 10087 goto vec_fail_out; 10088 } 10089 vectors = rc; 10090 10091 /* Assign MSI-X vectors to interrupt handlers */ 10092 for (index = 0; index < vectors; index++) { 10093 name = phba->sli4_hba.hba_eq_hdl[index].handler_name; 10094 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 10095 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 10096 LPFC_DRIVER_HANDLER_NAME"%d", index); 10097 10098 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10099 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10100 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); 10101 if (phba->cfg_fof && (index == (vectors - 1))) 10102 rc = request_irq(pci_irq_vector(phba->pcidev, index), 10103 &lpfc_sli4_fof_intr_handler, 0, 10104 name, 10105 &phba->sli4_hba.hba_eq_hdl[index]); 10106 else 10107 rc = request_irq(pci_irq_vector(phba->pcidev, index), 10108 &lpfc_sli4_hba_intr_handler, 0, 10109 name, 10110 &phba->sli4_hba.hba_eq_hdl[index]); 10111 if (rc) { 10112 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10113 "0486 MSI-X fast-path (%d) " 10114 "request_irq failed (%d)\n", index, rc); 10115 goto cfg_fail_out; 10116 } 10117 } 10118 10119 if (phba->cfg_fof) 10120 vectors--; 10121 10122 if (vectors != phba->io_channel_irqs) { 10123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10124 "3238 Reducing IO channels to match number of " 10125 "MSI-X vectors, requested %d got %d\n", 10126 phba->io_channel_irqs, vectors); 10127 if (phba->cfg_fcp_io_channel > vectors) 10128 phba->cfg_fcp_io_channel = vectors; 10129 if (phba->cfg_nvme_io_channel > vectors) 10130 phba->cfg_nvme_io_channel = vectors; 10131 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) 10132 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 10133 else 10134 phba->io_channel_irqs = phba->cfg_nvme_io_channel; 10135 } 10136 lpfc_cpu_affinity_check(phba, vectors); 10137 10138 return rc; 10139 10140 cfg_fail_out: 10141 /* free the irq already requested */ 10142 for (--index; index >= 0; index--) 10143 free_irq(pci_irq_vector(phba->pcidev, index), 10144 &phba->sli4_hba.hba_eq_hdl[index]); 10145 10146 /* Unconfigure MSI-X capability structure */ 10147 pci_free_irq_vectors(phba->pcidev); 10148 10149 vec_fail_out: 10150 return rc; 10151 } 10152 10153 /** 10154 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 10155 * @phba: pointer to lpfc hba data structure. 10156 * 10157 * This routine is invoked to enable the MSI interrupt mode to device with 10158 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 10159 * to enable the MSI vector. The device driver is responsible for calling 10160 * the request_irq() to register MSI vector with a interrupt the handler, 10161 * which is done in this function. 10162 * 10163 * Return codes 10164 * 0 - successful 10165 * other values - error 10166 **/ 10167 static int 10168 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 10169 { 10170 int rc, index; 10171 10172 rc = pci_enable_msi(phba->pcidev); 10173 if (!rc) 10174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10175 "0487 PCI enable MSI mode success.\n"); 10176 else { 10177 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10178 "0488 PCI enable MSI mode failed (%d)\n", rc); 10179 return rc; 10180 } 10181 10182 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10183 0, LPFC_DRIVER_NAME, phba); 10184 if (rc) { 10185 pci_disable_msi(phba->pcidev); 10186 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10187 "0490 MSI request_irq failed (%d)\n", rc); 10188 return rc; 10189 } 10190 10191 for (index = 0; index < phba->io_channel_irqs; index++) { 10192 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10193 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10194 } 10195 10196 if (phba->cfg_fof) { 10197 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10198 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10199 } 10200 return 0; 10201 } 10202 10203 /** 10204 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 10205 * @phba: pointer to lpfc hba data structure. 10206 * 10207 * This routine is invoked to enable device interrupt and associate driver's 10208 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 10209 * interface spec. Depends on the interrupt mode configured to the driver, 10210 * the driver will try to fallback from the configured interrupt mode to an 10211 * interrupt mode which is supported by the platform, kernel, and device in 10212 * the order of: 10213 * MSI-X -> MSI -> IRQ. 10214 * 10215 * Return codes 10216 * 0 - successful 10217 * other values - error 10218 **/ 10219 static uint32_t 10220 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10221 { 10222 uint32_t intr_mode = LPFC_INTR_ERROR; 10223 int retval, idx; 10224 10225 if (cfg_mode == 2) { 10226 /* Preparation before conf_msi mbox cmd */ 10227 retval = 0; 10228 if (!retval) { 10229 /* Now, try to enable MSI-X interrupt mode */ 10230 retval = lpfc_sli4_enable_msix(phba); 10231 if (!retval) { 10232 /* Indicate initialization to MSI-X mode */ 10233 phba->intr_type = MSIX; 10234 intr_mode = 2; 10235 } 10236 } 10237 } 10238 10239 /* Fallback to MSI if MSI-X initialization failed */ 10240 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10241 retval = lpfc_sli4_enable_msi(phba); 10242 if (!retval) { 10243 /* Indicate initialization to MSI mode */ 10244 phba->intr_type = MSI; 10245 intr_mode = 1; 10246 } 10247 } 10248 10249 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10250 if (phba->intr_type == NONE) { 10251 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10252 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10253 if (!retval) { 10254 struct lpfc_hba_eq_hdl *eqhdl; 10255 10256 /* Indicate initialization to INTx mode */ 10257 phba->intr_type = INTx; 10258 intr_mode = 0; 10259 10260 for (idx = 0; idx < phba->io_channel_irqs; idx++) { 10261 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 10262 eqhdl->idx = idx; 10263 eqhdl->phba = phba; 10264 atomic_set(&eqhdl->hba_eq_in_use, 1); 10265 } 10266 if (phba->cfg_fof) { 10267 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 10268 eqhdl->idx = idx; 10269 eqhdl->phba = phba; 10270 atomic_set(&eqhdl->hba_eq_in_use, 1); 10271 } 10272 } 10273 } 10274 return intr_mode; 10275 } 10276 10277 /** 10278 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 10279 * @phba: pointer to lpfc hba data structure. 10280 * 10281 * This routine is invoked to disable device interrupt and disassociate 10282 * the driver's interrupt handler(s) from interrupt vector(s) to device 10283 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 10284 * will release the interrupt vector(s) for the message signaled interrupt. 10285 **/ 10286 static void 10287 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 10288 { 10289 /* Disable the currently initialized interrupt mode */ 10290 if (phba->intr_type == MSIX) { 10291 int index; 10292 10293 /* Free up MSI-X multi-message vectors */ 10294 for (index = 0; index < phba->io_channel_irqs; index++) 10295 free_irq(pci_irq_vector(phba->pcidev, index), 10296 &phba->sli4_hba.hba_eq_hdl[index]); 10297 10298 if (phba->cfg_fof) 10299 free_irq(pci_irq_vector(phba->pcidev, index), 10300 &phba->sli4_hba.hba_eq_hdl[index]); 10301 } else { 10302 free_irq(phba->pcidev->irq, phba); 10303 } 10304 10305 pci_free_irq_vectors(phba->pcidev); 10306 10307 /* Reset interrupt management states */ 10308 phba->intr_type = NONE; 10309 phba->sli.slistat.sli_intr = 0; 10310 } 10311 10312 /** 10313 * lpfc_unset_hba - Unset SLI3 hba device initialization 10314 * @phba: pointer to lpfc hba data structure. 10315 * 10316 * This routine is invoked to unset the HBA device initialization steps to 10317 * a device with SLI-3 interface spec. 10318 **/ 10319 static void 10320 lpfc_unset_hba(struct lpfc_hba *phba) 10321 { 10322 struct lpfc_vport *vport = phba->pport; 10323 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10324 10325 spin_lock_irq(shost->host_lock); 10326 vport->load_flag |= FC_UNLOADING; 10327 spin_unlock_irq(shost->host_lock); 10328 10329 kfree(phba->vpi_bmask); 10330 kfree(phba->vpi_ids); 10331 10332 lpfc_stop_hba_timers(phba); 10333 10334 phba->pport->work_port_events = 0; 10335 10336 lpfc_sli_hba_down(phba); 10337 10338 lpfc_sli_brdrestart(phba); 10339 10340 lpfc_sli_disable_intr(phba); 10341 10342 return; 10343 } 10344 10345 /** 10346 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 10347 * @phba: Pointer to HBA context object. 10348 * 10349 * This function is called in the SLI4 code path to wait for completion 10350 * of device's XRIs exchange busy. It will check the XRI exchange busy 10351 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 10352 * that, it will check the XRI exchange busy on outstanding FCP and ELS 10353 * I/Os every 30 seconds, log error message, and wait forever. Only when 10354 * all XRI exchange busy complete, the driver unload shall proceed with 10355 * invoking the function reset ioctl mailbox command to the CNA and the 10356 * the rest of the driver unload resource release. 10357 **/ 10358 static void 10359 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 10360 { 10361 int wait_time = 0; 10362 int nvme_xri_cmpl = 1; 10363 int nvmet_xri_cmpl = 1; 10364 int fcp_xri_cmpl = 1; 10365 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 10366 10367 /* Driver just aborted IOs during the hba_unset process. Pause 10368 * here to give the HBA time to complete the IO and get entries 10369 * into the abts lists. 10370 */ 10371 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 10372 10373 /* Wait for NVME pending IO to flush back to transport. */ 10374 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 10375 lpfc_nvme_wait_for_io_drain(phba); 10376 10377 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 10378 fcp_xri_cmpl = 10379 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 10380 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10381 nvme_xri_cmpl = 10382 list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 10383 nvmet_xri_cmpl = 10384 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 10385 } 10386 10387 while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || 10388 !nvmet_xri_cmpl) { 10389 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 10390 if (!nvme_xri_cmpl) 10391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10392 "6100 NVME XRI exchange busy " 10393 "wait time: %d seconds.\n", 10394 wait_time/1000); 10395 if (!fcp_xri_cmpl) 10396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10397 "2877 FCP XRI exchange busy " 10398 "wait time: %d seconds.\n", 10399 wait_time/1000); 10400 if (!els_xri_cmpl) 10401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10402 "2878 ELS XRI exchange busy " 10403 "wait time: %d seconds.\n", 10404 wait_time/1000); 10405 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 10406 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 10407 } else { 10408 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 10409 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 10410 } 10411 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10412 nvme_xri_cmpl = list_empty( 10413 &phba->sli4_hba.lpfc_abts_nvme_buf_list); 10414 nvmet_xri_cmpl = list_empty( 10415 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 10416 } 10417 10418 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 10419 fcp_xri_cmpl = list_empty( 10420 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 10421 10422 els_xri_cmpl = 10423 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 10424 10425 } 10426 } 10427 10428 /** 10429 * lpfc_sli4_hba_unset - Unset the fcoe hba 10430 * @phba: Pointer to HBA context object. 10431 * 10432 * This function is called in the SLI4 code path to reset the HBA's FCoE 10433 * function. The caller is not required to hold any lock. This routine 10434 * issues PCI function reset mailbox command to reset the FCoE function. 10435 * At the end of the function, it calls lpfc_hba_down_post function to 10436 * free any pending commands. 10437 **/ 10438 static void 10439 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 10440 { 10441 int wait_cnt = 0; 10442 LPFC_MBOXQ_t *mboxq; 10443 struct pci_dev *pdev = phba->pcidev; 10444 10445 lpfc_stop_hba_timers(phba); 10446 phba->sli4_hba.intr_enable = 0; 10447 10448 /* 10449 * Gracefully wait out the potential current outstanding asynchronous 10450 * mailbox command. 10451 */ 10452 10453 /* First, block any pending async mailbox command from posted */ 10454 spin_lock_irq(&phba->hbalock); 10455 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10456 spin_unlock_irq(&phba->hbalock); 10457 /* Now, trying to wait it out if we can */ 10458 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10459 msleep(10); 10460 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 10461 break; 10462 } 10463 /* Forcefully release the outstanding mailbox command if timed out */ 10464 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10465 spin_lock_irq(&phba->hbalock); 10466 mboxq = phba->sli.mbox_active; 10467 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 10468 __lpfc_mbox_cmpl_put(phba, mboxq); 10469 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10470 phba->sli.mbox_active = NULL; 10471 spin_unlock_irq(&phba->hbalock); 10472 } 10473 10474 /* Abort all iocbs associated with the hba */ 10475 lpfc_sli_hba_iocb_abort(phba); 10476 10477 /* Wait for completion of device XRI exchange busy */ 10478 lpfc_sli4_xri_exchange_busy_wait(phba); 10479 10480 /* Disable PCI subsystem interrupt */ 10481 lpfc_sli4_disable_intr(phba); 10482 10483 /* Disable SR-IOV if enabled */ 10484 if (phba->cfg_sriov_nr_virtfn) 10485 pci_disable_sriov(pdev); 10486 10487 /* Stop kthread signal shall trigger work_done one more time */ 10488 kthread_stop(phba->worker_thread); 10489 10490 /* Unset the queues shared with the hardware then release all 10491 * allocated resources. 10492 */ 10493 lpfc_sli4_queue_unset(phba); 10494 lpfc_sli4_queue_destroy(phba); 10495 10496 /* Reset SLI4 HBA FCoE function */ 10497 lpfc_pci_function_reset(phba); 10498 10499 /* Stop the SLI4 device port */ 10500 phba->pport->work_port_events = 0; 10501 } 10502 10503 /** 10504 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 10505 * @phba: Pointer to HBA context object. 10506 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 10507 * 10508 * This function is called in the SLI4 code path to read the port's 10509 * sli4 capabilities. 10510 * 10511 * This function may be be called from any context that can block-wait 10512 * for the completion. The expectation is that this routine is called 10513 * typically from probe_one or from the online routine. 10514 **/ 10515 int 10516 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 10517 { 10518 int rc; 10519 struct lpfc_mqe *mqe; 10520 struct lpfc_pc_sli4_params *sli4_params; 10521 uint32_t mbox_tmo; 10522 10523 rc = 0; 10524 mqe = &mboxq->u.mqe; 10525 10526 /* Read the port's SLI4 Parameters port capabilities */ 10527 lpfc_pc_sli4_params(mboxq); 10528 if (!phba->sli4_hba.intr_enable) 10529 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10530 else { 10531 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 10532 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 10533 } 10534 10535 if (unlikely(rc)) 10536 return 1; 10537 10538 sli4_params = &phba->sli4_hba.pc_sli4_params; 10539 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 10540 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 10541 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 10542 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 10543 &mqe->un.sli4_params); 10544 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 10545 &mqe->un.sli4_params); 10546 sli4_params->proto_types = mqe->un.sli4_params.word3; 10547 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 10548 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 10549 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 10550 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 10551 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 10552 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 10553 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 10554 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 10555 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 10556 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 10557 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 10558 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 10559 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 10560 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 10561 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 10562 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 10563 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 10564 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 10565 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 10566 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 10567 10568 /* Make sure that sge_supp_len can be handled by the driver */ 10569 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 10570 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 10571 10572 return rc; 10573 } 10574 10575 /** 10576 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 10577 * @phba: Pointer to HBA context object. 10578 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 10579 * 10580 * This function is called in the SLI4 code path to read the port's 10581 * sli4 capabilities. 10582 * 10583 * This function may be be called from any context that can block-wait 10584 * for the completion. The expectation is that this routine is called 10585 * typically from probe_one or from the online routine. 10586 **/ 10587 int 10588 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 10589 { 10590 int rc; 10591 struct lpfc_mqe *mqe = &mboxq->u.mqe; 10592 struct lpfc_pc_sli4_params *sli4_params; 10593 uint32_t mbox_tmo; 10594 int length; 10595 bool exp_wqcq_pages = true; 10596 struct lpfc_sli4_parameters *mbx_sli4_parameters; 10597 10598 /* 10599 * By default, the driver assumes the SLI4 port requires RPI 10600 * header postings. The SLI4_PARAM response will correct this 10601 * assumption. 10602 */ 10603 phba->sli4_hba.rpi_hdrs_in_use = 1; 10604 10605 /* Read the port's SLI4 Config Parameters */ 10606 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 10607 sizeof(struct lpfc_sli4_cfg_mhdr)); 10608 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10609 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 10610 length, LPFC_SLI4_MBX_EMBED); 10611 if (!phba->sli4_hba.intr_enable) 10612 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10613 else { 10614 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 10615 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 10616 } 10617 if (unlikely(rc)) 10618 return rc; 10619 sli4_params = &phba->sli4_hba.pc_sli4_params; 10620 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 10621 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 10622 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 10623 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 10624 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 10625 mbx_sli4_parameters); 10626 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 10627 mbx_sli4_parameters); 10628 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 10629 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 10630 else 10631 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 10632 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 10633 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 10634 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 10635 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 10636 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 10637 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 10638 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 10639 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 10640 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 10641 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 10642 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 10643 mbx_sli4_parameters); 10644 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 10645 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 10646 mbx_sli4_parameters); 10647 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 10648 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 10649 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && 10650 bf_get(cfg_xib, mbx_sli4_parameters)); 10651 10652 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || 10653 !phba->nvme_support) { 10654 phba->nvme_support = 0; 10655 phba->nvmet_support = 0; 10656 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; 10657 phba->cfg_nvme_io_channel = 0; 10658 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 10659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 10660 "6101 Disabling NVME support: " 10661 "Not supported by firmware: %d %d\n", 10662 bf_get(cfg_nvme, mbx_sli4_parameters), 10663 bf_get(cfg_xib, mbx_sli4_parameters)); 10664 10665 /* If firmware doesn't support NVME, just use SCSI support */ 10666 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 10667 return -ENODEV; 10668 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 10669 } 10670 10671 /* Only embed PBDE for if_type 6 */ 10672 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 10673 LPFC_SLI_INTF_IF_TYPE_6) { 10674 phba->fcp_embed_pbde = 1; 10675 phba->nvme_embed_pbde = 1; 10676 } 10677 10678 /* PBDE support requires xib be set */ 10679 if (!bf_get(cfg_xib, mbx_sli4_parameters)) { 10680 phba->fcp_embed_pbde = 0; 10681 phba->nvme_embed_pbde = 0; 10682 } 10683 10684 /* 10685 * To support Suppress Response feature we must satisfy 3 conditions. 10686 * lpfc_suppress_rsp module parameter must be set (default). 10687 * In SLI4-Parameters Descriptor: 10688 * Extended Inline Buffers (XIB) must be supported. 10689 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 10690 * (double negative). 10691 */ 10692 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 10693 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 10694 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 10695 else 10696 phba->cfg_suppress_rsp = 0; 10697 10698 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 10699 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 10700 10701 /* Make sure that sge_supp_len can be handled by the driver */ 10702 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 10703 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 10704 10705 /* 10706 * Check whether the adapter supports an embedded copy of the 10707 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 10708 * to use this option, 128-byte WQEs must be used. 10709 */ 10710 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 10711 phba->fcp_embed_io = 1; 10712 else 10713 phba->fcp_embed_io = 0; 10714 10715 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 10716 "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n", 10717 bf_get(cfg_xib, mbx_sli4_parameters), 10718 phba->fcp_embed_pbde, phba->fcp_embed_io, 10719 phba->nvme_support, phba->nvme_embed_pbde, 10720 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 10721 10722 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 10723 LPFC_SLI_INTF_IF_TYPE_2) && 10724 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 10725 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 10726 exp_wqcq_pages = false; 10727 10728 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 10729 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 10730 exp_wqcq_pages && 10731 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 10732 phba->enab_exp_wqcq_pages = 1; 10733 else 10734 phba->enab_exp_wqcq_pages = 0; 10735 /* 10736 * Check if the SLI port supports MDS Diagnostics 10737 */ 10738 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 10739 phba->mds_diags_support = 1; 10740 else 10741 phba->mds_diags_support = 0; 10742 return 0; 10743 } 10744 10745 /** 10746 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 10747 * @pdev: pointer to PCI device 10748 * @pid: pointer to PCI device identifier 10749 * 10750 * This routine is to be called to attach a device with SLI-3 interface spec 10751 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 10752 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10753 * information of the device and driver to see if the driver state that it can 10754 * support this kind of device. If the match is successful, the driver core 10755 * invokes this routine. If this routine determines it can claim the HBA, it 10756 * does all the initialization that it needs to do to handle the HBA properly. 10757 * 10758 * Return code 10759 * 0 - driver can claim the device 10760 * negative value - driver can not claim the device 10761 **/ 10762 static int 10763 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 10764 { 10765 struct lpfc_hba *phba; 10766 struct lpfc_vport *vport = NULL; 10767 struct Scsi_Host *shost = NULL; 10768 int error; 10769 uint32_t cfg_mode, intr_mode; 10770 10771 /* Allocate memory for HBA structure */ 10772 phba = lpfc_hba_alloc(pdev); 10773 if (!phba) 10774 return -ENOMEM; 10775 10776 /* Perform generic PCI device enabling operation */ 10777 error = lpfc_enable_pci_dev(phba); 10778 if (error) 10779 goto out_free_phba; 10780 10781 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 10782 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 10783 if (error) 10784 goto out_disable_pci_dev; 10785 10786 /* Set up SLI-3 specific device PCI memory space */ 10787 error = lpfc_sli_pci_mem_setup(phba); 10788 if (error) { 10789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10790 "1402 Failed to set up pci memory space.\n"); 10791 goto out_disable_pci_dev; 10792 } 10793 10794 /* Set up SLI-3 specific device driver resources */ 10795 error = lpfc_sli_driver_resource_setup(phba); 10796 if (error) { 10797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10798 "1404 Failed to set up driver resource.\n"); 10799 goto out_unset_pci_mem_s3; 10800 } 10801 10802 /* Initialize and populate the iocb list per host */ 10803 10804 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 10805 if (error) { 10806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10807 "1405 Failed to initialize iocb list.\n"); 10808 goto out_unset_driver_resource_s3; 10809 } 10810 10811 /* Set up common device driver resources */ 10812 error = lpfc_setup_driver_resource_phase2(phba); 10813 if (error) { 10814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10815 "1406 Failed to set up driver resource.\n"); 10816 goto out_free_iocb_list; 10817 } 10818 10819 /* Get the default values for Model Name and Description */ 10820 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10821 10822 /* Create SCSI host to the physical port */ 10823 error = lpfc_create_shost(phba); 10824 if (error) { 10825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10826 "1407 Failed to create scsi host.\n"); 10827 goto out_unset_driver_resource; 10828 } 10829 10830 /* Configure sysfs attributes */ 10831 vport = phba->pport; 10832 error = lpfc_alloc_sysfs_attr(vport); 10833 if (error) { 10834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10835 "1476 Failed to allocate sysfs attr\n"); 10836 goto out_destroy_shost; 10837 } 10838 10839 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10840 /* Now, trying to enable interrupt and bring up the device */ 10841 cfg_mode = phba->cfg_use_msi; 10842 while (true) { 10843 /* Put device to a known state before enabling interrupt */ 10844 lpfc_stop_port(phba); 10845 /* Configure and enable interrupt */ 10846 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 10847 if (intr_mode == LPFC_INTR_ERROR) { 10848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10849 "0431 Failed to enable interrupt.\n"); 10850 error = -ENODEV; 10851 goto out_free_sysfs_attr; 10852 } 10853 /* SLI-3 HBA setup */ 10854 if (lpfc_sli_hba_setup(phba)) { 10855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10856 "1477 Failed to set up hba\n"); 10857 error = -ENODEV; 10858 goto out_remove_device; 10859 } 10860 10861 /* Wait 50ms for the interrupts of previous mailbox commands */ 10862 msleep(50); 10863 /* Check active interrupts on message signaled interrupts */ 10864 if (intr_mode == 0 || 10865 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 10866 /* Log the current active interrupt mode */ 10867 phba->intr_mode = intr_mode; 10868 lpfc_log_intr_mode(phba, intr_mode); 10869 break; 10870 } else { 10871 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10872 "0447 Configure interrupt mode (%d) " 10873 "failed active interrupt test.\n", 10874 intr_mode); 10875 /* Disable the current interrupt mode */ 10876 lpfc_sli_disable_intr(phba); 10877 /* Try next level of interrupt mode */ 10878 cfg_mode = --intr_mode; 10879 } 10880 } 10881 10882 /* Perform post initialization setup */ 10883 lpfc_post_init_setup(phba); 10884 10885 /* Check if there are static vports to be created. */ 10886 lpfc_create_static_vport(phba); 10887 10888 return 0; 10889 10890 out_remove_device: 10891 lpfc_unset_hba(phba); 10892 out_free_sysfs_attr: 10893 lpfc_free_sysfs_attr(vport); 10894 out_destroy_shost: 10895 lpfc_destroy_shost(phba); 10896 out_unset_driver_resource: 10897 lpfc_unset_driver_resource_phase2(phba); 10898 out_free_iocb_list: 10899 lpfc_free_iocb_list(phba); 10900 out_unset_driver_resource_s3: 10901 lpfc_sli_driver_resource_unset(phba); 10902 out_unset_pci_mem_s3: 10903 lpfc_sli_pci_mem_unset(phba); 10904 out_disable_pci_dev: 10905 lpfc_disable_pci_dev(phba); 10906 if (shost) 10907 scsi_host_put(shost); 10908 out_free_phba: 10909 lpfc_hba_free(phba); 10910 return error; 10911 } 10912 10913 /** 10914 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 10915 * @pdev: pointer to PCI device 10916 * 10917 * This routine is to be called to disattach a device with SLI-3 interface 10918 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 10919 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10920 * device to be removed from the PCI subsystem properly. 10921 **/ 10922 static void 10923 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 10924 { 10925 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10926 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10927 struct lpfc_vport **vports; 10928 struct lpfc_hba *phba = vport->phba; 10929 int i; 10930 10931 spin_lock_irq(&phba->hbalock); 10932 vport->load_flag |= FC_UNLOADING; 10933 spin_unlock_irq(&phba->hbalock); 10934 10935 lpfc_free_sysfs_attr(vport); 10936 10937 /* Release all the vports against this physical port */ 10938 vports = lpfc_create_vport_work_array(phba); 10939 if (vports != NULL) 10940 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10941 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10942 continue; 10943 fc_vport_terminate(vports[i]->fc_vport); 10944 } 10945 lpfc_destroy_vport_work_array(phba, vports); 10946 10947 /* Remove FC host and then SCSI host with the physical port */ 10948 fc_remove_host(shost); 10949 scsi_remove_host(shost); 10950 10951 lpfc_cleanup(vport); 10952 10953 /* 10954 * Bring down the SLI Layer. This step disable all interrupts, 10955 * clears the rings, discards all mailbox commands, and resets 10956 * the HBA. 10957 */ 10958 10959 /* HBA interrupt will be disabled after this call */ 10960 lpfc_sli_hba_down(phba); 10961 /* Stop kthread signal shall trigger work_done one more time */ 10962 kthread_stop(phba->worker_thread); 10963 /* Final cleanup of txcmplq and reset the HBA */ 10964 lpfc_sli_brdrestart(phba); 10965 10966 kfree(phba->vpi_bmask); 10967 kfree(phba->vpi_ids); 10968 10969 lpfc_stop_hba_timers(phba); 10970 spin_lock_irq(&phba->hbalock); 10971 list_del_init(&vport->listentry); 10972 spin_unlock_irq(&phba->hbalock); 10973 10974 lpfc_debugfs_terminate(vport); 10975 10976 /* Disable SR-IOV if enabled */ 10977 if (phba->cfg_sriov_nr_virtfn) 10978 pci_disable_sriov(pdev); 10979 10980 /* Disable interrupt */ 10981 lpfc_sli_disable_intr(phba); 10982 10983 scsi_host_put(shost); 10984 10985 /* 10986 * Call scsi_free before mem_free since scsi bufs are released to their 10987 * corresponding pools here. 10988 */ 10989 lpfc_scsi_free(phba); 10990 lpfc_mem_free_all(phba); 10991 10992 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 10993 phba->hbqslimp.virt, phba->hbqslimp.phys); 10994 10995 /* Free resources associated with SLI2 interface */ 10996 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 10997 phba->slim2p.virt, phba->slim2p.phys); 10998 10999 /* unmap adapter SLIM and Control Registers */ 11000 iounmap(phba->ctrl_regs_memmap_p); 11001 iounmap(phba->slim_memmap_p); 11002 11003 lpfc_hba_free(phba); 11004 11005 pci_release_mem_regions(pdev); 11006 pci_disable_device(pdev); 11007 } 11008 11009 /** 11010 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 11011 * @pdev: pointer to PCI device 11012 * @msg: power management message 11013 * 11014 * This routine is to be called from the kernel's PCI subsystem to support 11015 * system Power Management (PM) to device with SLI-3 interface spec. When 11016 * PM invokes this method, it quiesces the device by stopping the driver's 11017 * worker thread for the device, turning off device's interrupt and DMA, 11018 * and bring the device offline. Note that as the driver implements the 11019 * minimum PM requirements to a power-aware driver's PM support for the 11020 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11021 * to the suspend() method call will be treated as SUSPEND and the driver will 11022 * fully reinitialize its device during resume() method call, the driver will 11023 * set device to PCI_D3hot state in PCI config space instead of setting it 11024 * according to the @msg provided by the PM. 11025 * 11026 * Return code 11027 * 0 - driver suspended the device 11028 * Error otherwise 11029 **/ 11030 static int 11031 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 11032 { 11033 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11034 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11035 11036 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11037 "0473 PCI device Power Management suspend.\n"); 11038 11039 /* Bring down the device */ 11040 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11041 lpfc_offline(phba); 11042 kthread_stop(phba->worker_thread); 11043 11044 /* Disable interrupt from device */ 11045 lpfc_sli_disable_intr(phba); 11046 11047 /* Save device state to PCI config space */ 11048 pci_save_state(pdev); 11049 pci_set_power_state(pdev, PCI_D3hot); 11050 11051 return 0; 11052 } 11053 11054 /** 11055 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 11056 * @pdev: pointer to PCI device 11057 * 11058 * This routine is to be called from the kernel's PCI subsystem to support 11059 * system Power Management (PM) to device with SLI-3 interface spec. When PM 11060 * invokes this method, it restores the device's PCI config space state and 11061 * fully reinitializes the device and brings it online. Note that as the 11062 * driver implements the minimum PM requirements to a power-aware driver's 11063 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 11064 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 11065 * driver will fully reinitialize its device during resume() method call, 11066 * the device will be set to PCI_D0 directly in PCI config space before 11067 * restoring the state. 11068 * 11069 * Return code 11070 * 0 - driver suspended the device 11071 * Error otherwise 11072 **/ 11073 static int 11074 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 11075 { 11076 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11077 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11078 uint32_t intr_mode; 11079 int error; 11080 11081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11082 "0452 PCI device Power Management resume.\n"); 11083 11084 /* Restore device state from PCI config space */ 11085 pci_set_power_state(pdev, PCI_D0); 11086 pci_restore_state(pdev); 11087 11088 /* 11089 * As the new kernel behavior of pci_restore_state() API call clears 11090 * device saved_state flag, need to save the restored state again. 11091 */ 11092 pci_save_state(pdev); 11093 11094 if (pdev->is_busmaster) 11095 pci_set_master(pdev); 11096 11097 /* Startup the kernel thread for this host adapter. */ 11098 phba->worker_thread = kthread_run(lpfc_do_work, phba, 11099 "lpfc_worker_%d", phba->brd_no); 11100 if (IS_ERR(phba->worker_thread)) { 11101 error = PTR_ERR(phba->worker_thread); 11102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11103 "0434 PM resume failed to start worker " 11104 "thread: error=x%x.\n", error); 11105 return error; 11106 } 11107 11108 /* Configure and enable interrupt */ 11109 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11110 if (intr_mode == LPFC_INTR_ERROR) { 11111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11112 "0430 PM resume Failed to enable interrupt\n"); 11113 return -EIO; 11114 } else 11115 phba->intr_mode = intr_mode; 11116 11117 /* Restart HBA and bring it online */ 11118 lpfc_sli_brdrestart(phba); 11119 lpfc_online(phba); 11120 11121 /* Log the current active interrupt mode */ 11122 lpfc_log_intr_mode(phba, phba->intr_mode); 11123 11124 return 0; 11125 } 11126 11127 /** 11128 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 11129 * @phba: pointer to lpfc hba data structure. 11130 * 11131 * This routine is called to prepare the SLI3 device for PCI slot recover. It 11132 * aborts all the outstanding SCSI I/Os to the pci device. 11133 **/ 11134 static void 11135 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 11136 { 11137 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11138 "2723 PCI channel I/O abort preparing for recovery\n"); 11139 11140 /* 11141 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 11142 * and let the SCSI mid-layer to retry them to recover. 11143 */ 11144 lpfc_sli_abort_fcp_rings(phba); 11145 } 11146 11147 /** 11148 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 11149 * @phba: pointer to lpfc hba data structure. 11150 * 11151 * This routine is called to prepare the SLI3 device for PCI slot reset. It 11152 * disables the device interrupt and pci device, and aborts the internal FCP 11153 * pending I/Os. 11154 **/ 11155 static void 11156 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 11157 { 11158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11159 "2710 PCI channel disable preparing for reset\n"); 11160 11161 /* Block any management I/Os to the device */ 11162 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 11163 11164 /* Block all SCSI devices' I/Os on the host */ 11165 lpfc_scsi_dev_block(phba); 11166 11167 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11168 lpfc_sli_flush_fcp_rings(phba); 11169 11170 /* stop all timers */ 11171 lpfc_stop_hba_timers(phba); 11172 11173 /* Disable interrupt and pci device */ 11174 lpfc_sli_disable_intr(phba); 11175 pci_disable_device(phba->pcidev); 11176 } 11177 11178 /** 11179 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 11180 * @phba: pointer to lpfc hba data structure. 11181 * 11182 * This routine is called to prepare the SLI3 device for PCI slot permanently 11183 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 11184 * pending I/Os. 11185 **/ 11186 static void 11187 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 11188 { 11189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11190 "2711 PCI channel permanent disable for failure\n"); 11191 /* Block all SCSI devices' I/Os on the host */ 11192 lpfc_scsi_dev_block(phba); 11193 11194 /* stop all timers */ 11195 lpfc_stop_hba_timers(phba); 11196 11197 /* Clean up all driver's outstanding SCSI I/Os */ 11198 lpfc_sli_flush_fcp_rings(phba); 11199 } 11200 11201 /** 11202 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 11203 * @pdev: pointer to PCI device. 11204 * @state: the current PCI connection state. 11205 * 11206 * This routine is called from the PCI subsystem for I/O error handling to 11207 * device with SLI-3 interface spec. This function is called by the PCI 11208 * subsystem after a PCI bus error affecting this device has been detected. 11209 * When this function is invoked, it will need to stop all the I/Os and 11210 * interrupt(s) to the device. Once that is done, it will return 11211 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 11212 * as desired. 11213 * 11214 * Return codes 11215 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 11216 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11217 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11218 **/ 11219 static pci_ers_result_t 11220 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 11221 { 11222 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11223 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11224 11225 switch (state) { 11226 case pci_channel_io_normal: 11227 /* Non-fatal error, prepare for recovery */ 11228 lpfc_sli_prep_dev_for_recover(phba); 11229 return PCI_ERS_RESULT_CAN_RECOVER; 11230 case pci_channel_io_frozen: 11231 /* Fatal error, prepare for slot reset */ 11232 lpfc_sli_prep_dev_for_reset(phba); 11233 return PCI_ERS_RESULT_NEED_RESET; 11234 case pci_channel_io_perm_failure: 11235 /* Permanent failure, prepare for device down */ 11236 lpfc_sli_prep_dev_for_perm_failure(phba); 11237 return PCI_ERS_RESULT_DISCONNECT; 11238 default: 11239 /* Unknown state, prepare and request slot reset */ 11240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11241 "0472 Unknown PCI error state: x%x\n", state); 11242 lpfc_sli_prep_dev_for_reset(phba); 11243 return PCI_ERS_RESULT_NEED_RESET; 11244 } 11245 } 11246 11247 /** 11248 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 11249 * @pdev: pointer to PCI device. 11250 * 11251 * This routine is called from the PCI subsystem for error handling to 11252 * device with SLI-3 interface spec. This is called after PCI bus has been 11253 * reset to restart the PCI card from scratch, as if from a cold-boot. 11254 * During the PCI subsystem error recovery, after driver returns 11255 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 11256 * recovery and then call this routine before calling the .resume method 11257 * to recover the device. This function will initialize the HBA device, 11258 * enable the interrupt, but it will just put the HBA to offline state 11259 * without passing any I/O traffic. 11260 * 11261 * Return codes 11262 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11263 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11264 */ 11265 static pci_ers_result_t 11266 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 11267 { 11268 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11269 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11270 struct lpfc_sli *psli = &phba->sli; 11271 uint32_t intr_mode; 11272 11273 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 11274 if (pci_enable_device_mem(pdev)) { 11275 printk(KERN_ERR "lpfc: Cannot re-enable " 11276 "PCI device after reset.\n"); 11277 return PCI_ERS_RESULT_DISCONNECT; 11278 } 11279 11280 pci_restore_state(pdev); 11281 11282 /* 11283 * As the new kernel behavior of pci_restore_state() API call clears 11284 * device saved_state flag, need to save the restored state again. 11285 */ 11286 pci_save_state(pdev); 11287 11288 if (pdev->is_busmaster) 11289 pci_set_master(pdev); 11290 11291 spin_lock_irq(&phba->hbalock); 11292 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 11293 spin_unlock_irq(&phba->hbalock); 11294 11295 /* Configure and enable interrupt */ 11296 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11297 if (intr_mode == LPFC_INTR_ERROR) { 11298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11299 "0427 Cannot re-enable interrupt after " 11300 "slot reset.\n"); 11301 return PCI_ERS_RESULT_DISCONNECT; 11302 } else 11303 phba->intr_mode = intr_mode; 11304 11305 /* Take device offline, it will perform cleanup */ 11306 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11307 lpfc_offline(phba); 11308 lpfc_sli_brdrestart(phba); 11309 11310 /* Log the current active interrupt mode */ 11311 lpfc_log_intr_mode(phba, phba->intr_mode); 11312 11313 return PCI_ERS_RESULT_RECOVERED; 11314 } 11315 11316 /** 11317 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 11318 * @pdev: pointer to PCI device 11319 * 11320 * This routine is called from the PCI subsystem for error handling to device 11321 * with SLI-3 interface spec. It is called when kernel error recovery tells 11322 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 11323 * error recovery. After this call, traffic can start to flow from this device 11324 * again. 11325 */ 11326 static void 11327 lpfc_io_resume_s3(struct pci_dev *pdev) 11328 { 11329 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11330 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11331 11332 /* Bring device online, it will be no-op for non-fatal error resume */ 11333 lpfc_online(phba); 11334 11335 /* Clean up Advanced Error Reporting (AER) if needed */ 11336 if (phba->hba_flag & HBA_AER_ENABLED) 11337 pci_cleanup_aer_uncorrect_error_status(pdev); 11338 } 11339 11340 /** 11341 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 11342 * @phba: pointer to lpfc hba data structure. 11343 * 11344 * returns the number of ELS/CT IOCBs to reserve 11345 **/ 11346 int 11347 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 11348 { 11349 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 11350 11351 if (phba->sli_rev == LPFC_SLI_REV4) { 11352 if (max_xri <= 100) 11353 return 10; 11354 else if (max_xri <= 256) 11355 return 25; 11356 else if (max_xri <= 512) 11357 return 50; 11358 else if (max_xri <= 1024) 11359 return 100; 11360 else if (max_xri <= 1536) 11361 return 150; 11362 else if (max_xri <= 2048) 11363 return 200; 11364 else 11365 return 250; 11366 } else 11367 return 0; 11368 } 11369 11370 /** 11371 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 11372 * @phba: pointer to lpfc hba data structure. 11373 * 11374 * returns the number of ELS/CT + NVMET IOCBs to reserve 11375 **/ 11376 int 11377 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 11378 { 11379 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 11380 11381 if (phba->nvmet_support) 11382 max_xri += LPFC_NVMET_BUF_POST; 11383 return max_xri; 11384 } 11385 11386 11387 static void 11388 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 11389 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 11390 const struct firmware *fw) 11391 { 11392 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || 11393 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 11394 magic_number != MAGIC_NUMER_G6) || 11395 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 11396 magic_number != MAGIC_NUMER_G7)) 11397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11398 "3030 This firmware version is not supported on " 11399 "this HBA model. Device:%x Magic:%x Type:%x " 11400 "ID:%x Size %d %zd\n", 11401 phba->pcidev->device, magic_number, ftype, fid, 11402 fsize, fw->size); 11403 else 11404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11405 "3022 FW Download failed. Device:%x Magic:%x Type:%x " 11406 "ID:%x Size %d %zd\n", 11407 phba->pcidev->device, magic_number, ftype, fid, 11408 fsize, fw->size); 11409 } 11410 11411 11412 /** 11413 * lpfc_write_firmware - attempt to write a firmware image to the port 11414 * @fw: pointer to firmware image returned from request_firmware. 11415 * @phba: pointer to lpfc hba data structure. 11416 * 11417 **/ 11418 static void 11419 lpfc_write_firmware(const struct firmware *fw, void *context) 11420 { 11421 struct lpfc_hba *phba = (struct lpfc_hba *)context; 11422 char fwrev[FW_REV_STR_SIZE]; 11423 struct lpfc_grp_hdr *image; 11424 struct list_head dma_buffer_list; 11425 int i, rc = 0; 11426 struct lpfc_dmabuf *dmabuf, *next; 11427 uint32_t offset = 0, temp_offset = 0; 11428 uint32_t magic_number, ftype, fid, fsize; 11429 11430 /* It can be null in no-wait mode, sanity check */ 11431 if (!fw) { 11432 rc = -ENXIO; 11433 goto out; 11434 } 11435 image = (struct lpfc_grp_hdr *)fw->data; 11436 11437 magic_number = be32_to_cpu(image->magic_number); 11438 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 11439 fid = bf_get_be32(lpfc_grp_hdr_id, image); 11440 fsize = be32_to_cpu(image->size); 11441 11442 INIT_LIST_HEAD(&dma_buffer_list); 11443 lpfc_decode_firmware_rev(phba, fwrev, 1); 11444 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 11445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11446 "3023 Updating Firmware, Current Version:%s " 11447 "New Version:%s\n", 11448 fwrev, image->revision); 11449 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 11450 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 11451 GFP_KERNEL); 11452 if (!dmabuf) { 11453 rc = -ENOMEM; 11454 goto release_out; 11455 } 11456 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 11457 SLI4_PAGE_SIZE, 11458 &dmabuf->phys, 11459 GFP_KERNEL); 11460 if (!dmabuf->virt) { 11461 kfree(dmabuf); 11462 rc = -ENOMEM; 11463 goto release_out; 11464 } 11465 list_add_tail(&dmabuf->list, &dma_buffer_list); 11466 } 11467 while (offset < fw->size) { 11468 temp_offset = offset; 11469 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 11470 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 11471 memcpy(dmabuf->virt, 11472 fw->data + temp_offset, 11473 fw->size - temp_offset); 11474 temp_offset = fw->size; 11475 break; 11476 } 11477 memcpy(dmabuf->virt, fw->data + temp_offset, 11478 SLI4_PAGE_SIZE); 11479 temp_offset += SLI4_PAGE_SIZE; 11480 } 11481 rc = lpfc_wr_object(phba, &dma_buffer_list, 11482 (fw->size - offset), &offset); 11483 if (rc) { 11484 lpfc_log_write_firmware_error(phba, offset, 11485 magic_number, ftype, fid, fsize, fw); 11486 goto release_out; 11487 } 11488 } 11489 rc = offset; 11490 } else 11491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11492 "3029 Skipped Firmware update, Current " 11493 "Version:%s New Version:%s\n", 11494 fwrev, image->revision); 11495 11496 release_out: 11497 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 11498 list_del(&dmabuf->list); 11499 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 11500 dmabuf->virt, dmabuf->phys); 11501 kfree(dmabuf); 11502 } 11503 release_firmware(fw); 11504 out: 11505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11506 "3024 Firmware update done: %d.\n", rc); 11507 return; 11508 } 11509 11510 /** 11511 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 11512 * @phba: pointer to lpfc hba data structure. 11513 * 11514 * This routine is called to perform Linux generic firmware upgrade on device 11515 * that supports such feature. 11516 **/ 11517 int 11518 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 11519 { 11520 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 11521 int ret; 11522 const struct firmware *fw; 11523 11524 /* Only supported on SLI4 interface type 2 for now */ 11525 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 11526 LPFC_SLI_INTF_IF_TYPE_2) 11527 return -EPERM; 11528 11529 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 11530 11531 if (fw_upgrade == INT_FW_UPGRADE) { 11532 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 11533 file_name, &phba->pcidev->dev, 11534 GFP_KERNEL, (void *)phba, 11535 lpfc_write_firmware); 11536 } else if (fw_upgrade == RUN_FW_UPGRADE) { 11537 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 11538 if (!ret) 11539 lpfc_write_firmware(fw, (void *)phba); 11540 } else { 11541 ret = -EINVAL; 11542 } 11543 11544 return ret; 11545 } 11546 11547 /** 11548 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 11549 * @pdev: pointer to PCI device 11550 * @pid: pointer to PCI device identifier 11551 * 11552 * This routine is called from the kernel's PCI subsystem to device with 11553 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 11554 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11555 * information of the device and driver to see if the driver state that it 11556 * can support this kind of device. If the match is successful, the driver 11557 * core invokes this routine. If this routine determines it can claim the HBA, 11558 * it does all the initialization that it needs to do to handle the HBA 11559 * properly. 11560 * 11561 * Return code 11562 * 0 - driver can claim the device 11563 * negative value - driver can not claim the device 11564 **/ 11565 static int 11566 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 11567 { 11568 struct lpfc_hba *phba; 11569 struct lpfc_vport *vport = NULL; 11570 struct Scsi_Host *shost = NULL; 11571 int error; 11572 uint32_t cfg_mode, intr_mode; 11573 11574 /* Allocate memory for HBA structure */ 11575 phba = lpfc_hba_alloc(pdev); 11576 if (!phba) 11577 return -ENOMEM; 11578 11579 /* Perform generic PCI device enabling operation */ 11580 error = lpfc_enable_pci_dev(phba); 11581 if (error) 11582 goto out_free_phba; 11583 11584 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 11585 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 11586 if (error) 11587 goto out_disable_pci_dev; 11588 11589 /* Set up SLI-4 specific device PCI memory space */ 11590 error = lpfc_sli4_pci_mem_setup(phba); 11591 if (error) { 11592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11593 "1410 Failed to set up pci memory space.\n"); 11594 goto out_disable_pci_dev; 11595 } 11596 11597 /* Set up SLI-4 Specific device driver resources */ 11598 error = lpfc_sli4_driver_resource_setup(phba); 11599 if (error) { 11600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11601 "1412 Failed to set up driver resource.\n"); 11602 goto out_unset_pci_mem_s4; 11603 } 11604 11605 INIT_LIST_HEAD(&phba->active_rrq_list); 11606 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 11607 11608 /* Set up common device driver resources */ 11609 error = lpfc_setup_driver_resource_phase2(phba); 11610 if (error) { 11611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11612 "1414 Failed to set up driver resource.\n"); 11613 goto out_unset_driver_resource_s4; 11614 } 11615 11616 /* Get the default values for Model Name and Description */ 11617 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11618 11619 /* Create SCSI host to the physical port */ 11620 error = lpfc_create_shost(phba); 11621 if (error) { 11622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11623 "1415 Failed to create scsi host.\n"); 11624 goto out_unset_driver_resource; 11625 } 11626 11627 /* Configure sysfs attributes */ 11628 vport = phba->pport; 11629 error = lpfc_alloc_sysfs_attr(vport); 11630 if (error) { 11631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11632 "1416 Failed to allocate sysfs attr\n"); 11633 goto out_destroy_shost; 11634 } 11635 11636 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11637 /* Now, trying to enable interrupt and bring up the device */ 11638 cfg_mode = phba->cfg_use_msi; 11639 11640 /* Put device to a known state before enabling interrupt */ 11641 lpfc_stop_port(phba); 11642 11643 /* Configure and enable interrupt */ 11644 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 11645 if (intr_mode == LPFC_INTR_ERROR) { 11646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11647 "0426 Failed to enable interrupt.\n"); 11648 error = -ENODEV; 11649 goto out_free_sysfs_attr; 11650 } 11651 /* Default to single EQ for non-MSI-X */ 11652 if (phba->intr_type != MSIX) { 11653 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 11654 phba->cfg_fcp_io_channel = 1; 11655 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11656 phba->cfg_nvme_io_channel = 1; 11657 if (phba->nvmet_support) 11658 phba->cfg_nvmet_mrq = 1; 11659 } 11660 phba->io_channel_irqs = 1; 11661 } 11662 11663 /* Set up SLI-4 HBA */ 11664 if (lpfc_sli4_hba_setup(phba)) { 11665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11666 "1421 Failed to set up hba\n"); 11667 error = -ENODEV; 11668 goto out_disable_intr; 11669 } 11670 11671 /* Log the current active interrupt mode */ 11672 phba->intr_mode = intr_mode; 11673 lpfc_log_intr_mode(phba, intr_mode); 11674 11675 /* Perform post initialization setup */ 11676 lpfc_post_init_setup(phba); 11677 11678 /* NVME support in FW earlier in the driver load corrects the 11679 * FC4 type making a check for nvme_support unnecessary. 11680 */ 11681 if ((phba->nvmet_support == 0) && 11682 (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 11683 /* Create NVME binding with nvme_fc_transport. This 11684 * ensures the vport is initialized. If the localport 11685 * create fails, it should not unload the driver to 11686 * support field issues. 11687 */ 11688 error = lpfc_nvme_create_localport(vport); 11689 if (error) { 11690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11691 "6004 NVME registration failed, " 11692 "error x%x\n", 11693 error); 11694 } 11695 } 11696 11697 /* check for firmware upgrade or downgrade */ 11698 if (phba->cfg_request_firmware_upgrade) 11699 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 11700 11701 /* Check if there are static vports to be created. */ 11702 lpfc_create_static_vport(phba); 11703 return 0; 11704 11705 out_disable_intr: 11706 lpfc_sli4_disable_intr(phba); 11707 out_free_sysfs_attr: 11708 lpfc_free_sysfs_attr(vport); 11709 out_destroy_shost: 11710 lpfc_destroy_shost(phba); 11711 out_unset_driver_resource: 11712 lpfc_unset_driver_resource_phase2(phba); 11713 out_unset_driver_resource_s4: 11714 lpfc_sli4_driver_resource_unset(phba); 11715 out_unset_pci_mem_s4: 11716 lpfc_sli4_pci_mem_unset(phba); 11717 out_disable_pci_dev: 11718 lpfc_disable_pci_dev(phba); 11719 if (shost) 11720 scsi_host_put(shost); 11721 out_free_phba: 11722 lpfc_hba_free(phba); 11723 return error; 11724 } 11725 11726 /** 11727 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 11728 * @pdev: pointer to PCI device 11729 * 11730 * This routine is called from the kernel's PCI subsystem to device with 11731 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 11732 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11733 * device to be removed from the PCI subsystem properly. 11734 **/ 11735 static void 11736 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 11737 { 11738 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11739 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11740 struct lpfc_vport **vports; 11741 struct lpfc_hba *phba = vport->phba; 11742 int i; 11743 11744 /* Mark the device unloading flag */ 11745 spin_lock_irq(&phba->hbalock); 11746 vport->load_flag |= FC_UNLOADING; 11747 spin_unlock_irq(&phba->hbalock); 11748 11749 /* Free the HBA sysfs attributes */ 11750 lpfc_free_sysfs_attr(vport); 11751 11752 /* Release all the vports against this physical port */ 11753 vports = lpfc_create_vport_work_array(phba); 11754 if (vports != NULL) 11755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11756 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11757 continue; 11758 fc_vport_terminate(vports[i]->fc_vport); 11759 } 11760 lpfc_destroy_vport_work_array(phba, vports); 11761 11762 /* Remove FC host and then SCSI host with the physical port */ 11763 fc_remove_host(shost); 11764 scsi_remove_host(shost); 11765 11766 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 11767 * localports are destroyed after to cleanup all transport memory. 11768 */ 11769 lpfc_cleanup(vport); 11770 lpfc_nvmet_destroy_targetport(phba); 11771 lpfc_nvme_destroy_localport(vport); 11772 11773 /* 11774 * Bring down the SLI Layer. This step disables all interrupts, 11775 * clears the rings, discards all mailbox commands, and resets 11776 * the HBA FCoE function. 11777 */ 11778 lpfc_debugfs_terminate(vport); 11779 lpfc_sli4_hba_unset(phba); 11780 11781 lpfc_stop_hba_timers(phba); 11782 spin_lock_irq(&phba->hbalock); 11783 list_del_init(&vport->listentry); 11784 spin_unlock_irq(&phba->hbalock); 11785 11786 /* Perform scsi free before driver resource_unset since scsi 11787 * buffers are released to their corresponding pools here. 11788 */ 11789 lpfc_scsi_free(phba); 11790 lpfc_nvme_free(phba); 11791 lpfc_free_iocb_list(phba); 11792 11793 lpfc_unset_driver_resource_phase2(phba); 11794 lpfc_sli4_driver_resource_unset(phba); 11795 11796 /* Unmap adapter Control and Doorbell registers */ 11797 lpfc_sli4_pci_mem_unset(phba); 11798 11799 /* Release PCI resources and disable device's PCI function */ 11800 scsi_host_put(shost); 11801 lpfc_disable_pci_dev(phba); 11802 11803 /* Finally, free the driver's device data structure */ 11804 lpfc_hba_free(phba); 11805 11806 return; 11807 } 11808 11809 /** 11810 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 11811 * @pdev: pointer to PCI device 11812 * @msg: power management message 11813 * 11814 * This routine is called from the kernel's PCI subsystem to support system 11815 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 11816 * this method, it quiesces the device by stopping the driver's worker 11817 * thread for the device, turning off device's interrupt and DMA, and bring 11818 * the device offline. Note that as the driver implements the minimum PM 11819 * requirements to a power-aware driver's PM support for suspend/resume -- all 11820 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 11821 * method call will be treated as SUSPEND and the driver will fully 11822 * reinitialize its device during resume() method call, the driver will set 11823 * device to PCI_D3hot state in PCI config space instead of setting it 11824 * according to the @msg provided by the PM. 11825 * 11826 * Return code 11827 * 0 - driver suspended the device 11828 * Error otherwise 11829 **/ 11830 static int 11831 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 11832 { 11833 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11834 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11835 11836 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11837 "2843 PCI device Power Management suspend.\n"); 11838 11839 /* Bring down the device */ 11840 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11841 lpfc_offline(phba); 11842 kthread_stop(phba->worker_thread); 11843 11844 /* Disable interrupt from device */ 11845 lpfc_sli4_disable_intr(phba); 11846 lpfc_sli4_queue_destroy(phba); 11847 11848 /* Save device state to PCI config space */ 11849 pci_save_state(pdev); 11850 pci_set_power_state(pdev, PCI_D3hot); 11851 11852 return 0; 11853 } 11854 11855 /** 11856 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 11857 * @pdev: pointer to PCI device 11858 * 11859 * This routine is called from the kernel's PCI subsystem to support system 11860 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 11861 * this method, it restores the device's PCI config space state and fully 11862 * reinitializes the device and brings it online. Note that as the driver 11863 * implements the minimum PM requirements to a power-aware driver's PM for 11864 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11865 * to the suspend() method call will be treated as SUSPEND and the driver 11866 * will fully reinitialize its device during resume() method call, the device 11867 * will be set to PCI_D0 directly in PCI config space before restoring the 11868 * state. 11869 * 11870 * Return code 11871 * 0 - driver suspended the device 11872 * Error otherwise 11873 **/ 11874 static int 11875 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 11876 { 11877 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11878 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11879 uint32_t intr_mode; 11880 int error; 11881 11882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11883 "0292 PCI device Power Management resume.\n"); 11884 11885 /* Restore device state from PCI config space */ 11886 pci_set_power_state(pdev, PCI_D0); 11887 pci_restore_state(pdev); 11888 11889 /* 11890 * As the new kernel behavior of pci_restore_state() API call clears 11891 * device saved_state flag, need to save the restored state again. 11892 */ 11893 pci_save_state(pdev); 11894 11895 if (pdev->is_busmaster) 11896 pci_set_master(pdev); 11897 11898 /* Startup the kernel thread for this host adapter. */ 11899 phba->worker_thread = kthread_run(lpfc_do_work, phba, 11900 "lpfc_worker_%d", phba->brd_no); 11901 if (IS_ERR(phba->worker_thread)) { 11902 error = PTR_ERR(phba->worker_thread); 11903 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11904 "0293 PM resume failed to start worker " 11905 "thread: error=x%x.\n", error); 11906 return error; 11907 } 11908 11909 /* Configure and enable interrupt */ 11910 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 11911 if (intr_mode == LPFC_INTR_ERROR) { 11912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11913 "0294 PM resume Failed to enable interrupt\n"); 11914 return -EIO; 11915 } else 11916 phba->intr_mode = intr_mode; 11917 11918 /* Restart HBA and bring it online */ 11919 lpfc_sli_brdrestart(phba); 11920 lpfc_online(phba); 11921 11922 /* Log the current active interrupt mode */ 11923 lpfc_log_intr_mode(phba, phba->intr_mode); 11924 11925 return 0; 11926 } 11927 11928 /** 11929 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 11930 * @phba: pointer to lpfc hba data structure. 11931 * 11932 * This routine is called to prepare the SLI4 device for PCI slot recover. It 11933 * aborts all the outstanding SCSI I/Os to the pci device. 11934 **/ 11935 static void 11936 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 11937 { 11938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11939 "2828 PCI channel I/O abort preparing for recovery\n"); 11940 /* 11941 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 11942 * and let the SCSI mid-layer to retry them to recover. 11943 */ 11944 lpfc_sli_abort_fcp_rings(phba); 11945 } 11946 11947 /** 11948 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 11949 * @phba: pointer to lpfc hba data structure. 11950 * 11951 * This routine is called to prepare the SLI4 device for PCI slot reset. It 11952 * disables the device interrupt and pci device, and aborts the internal FCP 11953 * pending I/Os. 11954 **/ 11955 static void 11956 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 11957 { 11958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11959 "2826 PCI channel disable preparing for reset\n"); 11960 11961 /* Block any management I/Os to the device */ 11962 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 11963 11964 /* Block all SCSI devices' I/Os on the host */ 11965 lpfc_scsi_dev_block(phba); 11966 11967 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11968 lpfc_sli_flush_fcp_rings(phba); 11969 11970 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 11971 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11972 lpfc_sli_flush_nvme_rings(phba); 11973 11974 /* stop all timers */ 11975 lpfc_stop_hba_timers(phba); 11976 11977 /* Disable interrupt and pci device */ 11978 lpfc_sli4_disable_intr(phba); 11979 lpfc_sli4_queue_destroy(phba); 11980 pci_disable_device(phba->pcidev); 11981 } 11982 11983 /** 11984 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 11985 * @phba: pointer to lpfc hba data structure. 11986 * 11987 * This routine is called to prepare the SLI4 device for PCI slot permanently 11988 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 11989 * pending I/Os. 11990 **/ 11991 static void 11992 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 11993 { 11994 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11995 "2827 PCI channel permanent disable for failure\n"); 11996 11997 /* Block all SCSI devices' I/Os on the host */ 11998 lpfc_scsi_dev_block(phba); 11999 12000 /* stop all timers */ 12001 lpfc_stop_hba_timers(phba); 12002 12003 /* Clean up all driver's outstanding SCSI I/Os */ 12004 lpfc_sli_flush_fcp_rings(phba); 12005 12006 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12007 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12008 lpfc_sli_flush_nvme_rings(phba); 12009 } 12010 12011 /** 12012 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 12013 * @pdev: pointer to PCI device. 12014 * @state: the current PCI connection state. 12015 * 12016 * This routine is called from the PCI subsystem for error handling to device 12017 * with SLI-4 interface spec. This function is called by the PCI subsystem 12018 * after a PCI bus error affecting this device has been detected. When this 12019 * function is invoked, it will need to stop all the I/Os and interrupt(s) 12020 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 12021 * for the PCI subsystem to perform proper recovery as desired. 12022 * 12023 * Return codes 12024 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12025 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12026 **/ 12027 static pci_ers_result_t 12028 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 12029 { 12030 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12031 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12032 12033 switch (state) { 12034 case pci_channel_io_normal: 12035 /* Non-fatal error, prepare for recovery */ 12036 lpfc_sli4_prep_dev_for_recover(phba); 12037 return PCI_ERS_RESULT_CAN_RECOVER; 12038 case pci_channel_io_frozen: 12039 /* Fatal error, prepare for slot reset */ 12040 lpfc_sli4_prep_dev_for_reset(phba); 12041 return PCI_ERS_RESULT_NEED_RESET; 12042 case pci_channel_io_perm_failure: 12043 /* Permanent failure, prepare for device down */ 12044 lpfc_sli4_prep_dev_for_perm_failure(phba); 12045 return PCI_ERS_RESULT_DISCONNECT; 12046 default: 12047 /* Unknown state, prepare and request slot reset */ 12048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12049 "2825 Unknown PCI error state: x%x\n", state); 12050 lpfc_sli4_prep_dev_for_reset(phba); 12051 return PCI_ERS_RESULT_NEED_RESET; 12052 } 12053 } 12054 12055 /** 12056 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 12057 * @pdev: pointer to PCI device. 12058 * 12059 * This routine is called from the PCI subsystem for error handling to device 12060 * with SLI-4 interface spec. It is called after PCI bus has been reset to 12061 * restart the PCI card from scratch, as if from a cold-boot. During the 12062 * PCI subsystem error recovery, after the driver returns 12063 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12064 * recovery and then call this routine before calling the .resume method to 12065 * recover the device. This function will initialize the HBA device, enable 12066 * the interrupt, but it will just put the HBA to offline state without 12067 * passing any I/O traffic. 12068 * 12069 * Return codes 12070 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12071 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12072 */ 12073 static pci_ers_result_t 12074 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 12075 { 12076 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12077 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12078 struct lpfc_sli *psli = &phba->sli; 12079 uint32_t intr_mode; 12080 12081 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12082 if (pci_enable_device_mem(pdev)) { 12083 printk(KERN_ERR "lpfc: Cannot re-enable " 12084 "PCI device after reset.\n"); 12085 return PCI_ERS_RESULT_DISCONNECT; 12086 } 12087 12088 pci_restore_state(pdev); 12089 12090 /* 12091 * As the new kernel behavior of pci_restore_state() API call clears 12092 * device saved_state flag, need to save the restored state again. 12093 */ 12094 pci_save_state(pdev); 12095 12096 if (pdev->is_busmaster) 12097 pci_set_master(pdev); 12098 12099 spin_lock_irq(&phba->hbalock); 12100 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12101 spin_unlock_irq(&phba->hbalock); 12102 12103 /* Configure and enable interrupt */ 12104 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12105 if (intr_mode == LPFC_INTR_ERROR) { 12106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12107 "2824 Cannot re-enable interrupt after " 12108 "slot reset.\n"); 12109 return PCI_ERS_RESULT_DISCONNECT; 12110 } else 12111 phba->intr_mode = intr_mode; 12112 12113 /* Log the current active interrupt mode */ 12114 lpfc_log_intr_mode(phba, phba->intr_mode); 12115 12116 return PCI_ERS_RESULT_RECOVERED; 12117 } 12118 12119 /** 12120 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 12121 * @pdev: pointer to PCI device 12122 * 12123 * This routine is called from the PCI subsystem for error handling to device 12124 * with SLI-4 interface spec. It is called when kernel error recovery tells 12125 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12126 * error recovery. After this call, traffic can start to flow from this device 12127 * again. 12128 **/ 12129 static void 12130 lpfc_io_resume_s4(struct pci_dev *pdev) 12131 { 12132 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12133 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12134 12135 /* 12136 * In case of slot reset, as function reset is performed through 12137 * mailbox command which needs DMA to be enabled, this operation 12138 * has to be moved to the io resume phase. Taking device offline 12139 * will perform the necessary cleanup. 12140 */ 12141 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 12142 /* Perform device reset */ 12143 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12144 lpfc_offline(phba); 12145 lpfc_sli_brdrestart(phba); 12146 /* Bring the device back online */ 12147 lpfc_online(phba); 12148 } 12149 12150 /* Clean up Advanced Error Reporting (AER) if needed */ 12151 if (phba->hba_flag & HBA_AER_ENABLED) 12152 pci_cleanup_aer_uncorrect_error_status(pdev); 12153 } 12154 12155 /** 12156 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 12157 * @pdev: pointer to PCI device 12158 * @pid: pointer to PCI device identifier 12159 * 12160 * This routine is to be registered to the kernel's PCI subsystem. When an 12161 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 12162 * at PCI device-specific information of the device and driver to see if the 12163 * driver state that it can support this kind of device. If the match is 12164 * successful, the driver core invokes this routine. This routine dispatches 12165 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 12166 * do all the initialization that it needs to do to handle the HBA device 12167 * properly. 12168 * 12169 * Return code 12170 * 0 - driver can claim the device 12171 * negative value - driver can not claim the device 12172 **/ 12173 static int 12174 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 12175 { 12176 int rc; 12177 struct lpfc_sli_intf intf; 12178 12179 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 12180 return -ENODEV; 12181 12182 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 12183 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 12184 rc = lpfc_pci_probe_one_s4(pdev, pid); 12185 else 12186 rc = lpfc_pci_probe_one_s3(pdev, pid); 12187 12188 return rc; 12189 } 12190 12191 /** 12192 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 12193 * @pdev: pointer to PCI device 12194 * 12195 * This routine is to be registered to the kernel's PCI subsystem. When an 12196 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 12197 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 12198 * remove routine, which will perform all the necessary cleanup for the 12199 * device to be removed from the PCI subsystem properly. 12200 **/ 12201 static void 12202 lpfc_pci_remove_one(struct pci_dev *pdev) 12203 { 12204 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12205 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12206 12207 switch (phba->pci_dev_grp) { 12208 case LPFC_PCI_DEV_LP: 12209 lpfc_pci_remove_one_s3(pdev); 12210 break; 12211 case LPFC_PCI_DEV_OC: 12212 lpfc_pci_remove_one_s4(pdev); 12213 break; 12214 default: 12215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12216 "1424 Invalid PCI device group: 0x%x\n", 12217 phba->pci_dev_grp); 12218 break; 12219 } 12220 return; 12221 } 12222 12223 /** 12224 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 12225 * @pdev: pointer to PCI device 12226 * @msg: power management message 12227 * 12228 * This routine is to be registered to the kernel's PCI subsystem to support 12229 * system Power Management (PM). When PM invokes this method, it dispatches 12230 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 12231 * suspend the device. 12232 * 12233 * Return code 12234 * 0 - driver suspended the device 12235 * Error otherwise 12236 **/ 12237 static int 12238 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 12239 { 12240 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12241 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12242 int rc = -ENODEV; 12243 12244 switch (phba->pci_dev_grp) { 12245 case LPFC_PCI_DEV_LP: 12246 rc = lpfc_pci_suspend_one_s3(pdev, msg); 12247 break; 12248 case LPFC_PCI_DEV_OC: 12249 rc = lpfc_pci_suspend_one_s4(pdev, msg); 12250 break; 12251 default: 12252 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12253 "1425 Invalid PCI device group: 0x%x\n", 12254 phba->pci_dev_grp); 12255 break; 12256 } 12257 return rc; 12258 } 12259 12260 /** 12261 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 12262 * @pdev: pointer to PCI device 12263 * 12264 * This routine is to be registered to the kernel's PCI subsystem to support 12265 * system Power Management (PM). When PM invokes this method, it dispatches 12266 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 12267 * resume the device. 12268 * 12269 * Return code 12270 * 0 - driver suspended the device 12271 * Error otherwise 12272 **/ 12273 static int 12274 lpfc_pci_resume_one(struct pci_dev *pdev) 12275 { 12276 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12277 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12278 int rc = -ENODEV; 12279 12280 switch (phba->pci_dev_grp) { 12281 case LPFC_PCI_DEV_LP: 12282 rc = lpfc_pci_resume_one_s3(pdev); 12283 break; 12284 case LPFC_PCI_DEV_OC: 12285 rc = lpfc_pci_resume_one_s4(pdev); 12286 break; 12287 default: 12288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12289 "1426 Invalid PCI device group: 0x%x\n", 12290 phba->pci_dev_grp); 12291 break; 12292 } 12293 return rc; 12294 } 12295 12296 /** 12297 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 12298 * @pdev: pointer to PCI device. 12299 * @state: the current PCI connection state. 12300 * 12301 * This routine is registered to the PCI subsystem for error handling. This 12302 * function is called by the PCI subsystem after a PCI bus error affecting 12303 * this device has been detected. When this routine is invoked, it dispatches 12304 * the action to the proper SLI-3 or SLI-4 device error detected handling 12305 * routine, which will perform the proper error detected operation. 12306 * 12307 * Return codes 12308 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12309 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12310 **/ 12311 static pci_ers_result_t 12312 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12313 { 12314 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12315 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12316 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12317 12318 switch (phba->pci_dev_grp) { 12319 case LPFC_PCI_DEV_LP: 12320 rc = lpfc_io_error_detected_s3(pdev, state); 12321 break; 12322 case LPFC_PCI_DEV_OC: 12323 rc = lpfc_io_error_detected_s4(pdev, state); 12324 break; 12325 default: 12326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12327 "1427 Invalid PCI device group: 0x%x\n", 12328 phba->pci_dev_grp); 12329 break; 12330 } 12331 return rc; 12332 } 12333 12334 /** 12335 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 12336 * @pdev: pointer to PCI device. 12337 * 12338 * This routine is registered to the PCI subsystem for error handling. This 12339 * function is called after PCI bus has been reset to restart the PCI card 12340 * from scratch, as if from a cold-boot. When this routine is invoked, it 12341 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 12342 * routine, which will perform the proper device reset. 12343 * 12344 * Return codes 12345 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12346 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12347 **/ 12348 static pci_ers_result_t 12349 lpfc_io_slot_reset(struct pci_dev *pdev) 12350 { 12351 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12352 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12353 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12354 12355 switch (phba->pci_dev_grp) { 12356 case LPFC_PCI_DEV_LP: 12357 rc = lpfc_io_slot_reset_s3(pdev); 12358 break; 12359 case LPFC_PCI_DEV_OC: 12360 rc = lpfc_io_slot_reset_s4(pdev); 12361 break; 12362 default: 12363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12364 "1428 Invalid PCI device group: 0x%x\n", 12365 phba->pci_dev_grp); 12366 break; 12367 } 12368 return rc; 12369 } 12370 12371 /** 12372 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 12373 * @pdev: pointer to PCI device 12374 * 12375 * This routine is registered to the PCI subsystem for error handling. It 12376 * is called when kernel error recovery tells the lpfc driver that it is 12377 * OK to resume normal PCI operation after PCI bus error recovery. When 12378 * this routine is invoked, it dispatches the action to the proper SLI-3 12379 * or SLI-4 device io_resume routine, which will resume the device operation. 12380 **/ 12381 static void 12382 lpfc_io_resume(struct pci_dev *pdev) 12383 { 12384 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12385 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12386 12387 switch (phba->pci_dev_grp) { 12388 case LPFC_PCI_DEV_LP: 12389 lpfc_io_resume_s3(pdev); 12390 break; 12391 case LPFC_PCI_DEV_OC: 12392 lpfc_io_resume_s4(pdev); 12393 break; 12394 default: 12395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12396 "1429 Invalid PCI device group: 0x%x\n", 12397 phba->pci_dev_grp); 12398 break; 12399 } 12400 return; 12401 } 12402 12403 /** 12404 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 12405 * @phba: pointer to lpfc hba data structure. 12406 * 12407 * This routine checks to see if OAS is supported for this adapter. If 12408 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 12409 * the enable oas flag is cleared and the pool created for OAS device data 12410 * is destroyed. 12411 * 12412 **/ 12413 void 12414 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 12415 { 12416 12417 if (!phba->cfg_EnableXLane) 12418 return; 12419 12420 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 12421 phba->cfg_fof = 1; 12422 } else { 12423 phba->cfg_fof = 0; 12424 if (phba->device_data_mem_pool) 12425 mempool_destroy(phba->device_data_mem_pool); 12426 phba->device_data_mem_pool = NULL; 12427 } 12428 12429 return; 12430 } 12431 12432 /** 12433 * lpfc_fof_queue_setup - Set up all the fof queues 12434 * @phba: pointer to lpfc hba data structure. 12435 * 12436 * This routine is invoked to set up all the fof queues for the FC HBA 12437 * operation. 12438 * 12439 * Return codes 12440 * 0 - successful 12441 * -ENOMEM - No available memory 12442 **/ 12443 int 12444 lpfc_fof_queue_setup(struct lpfc_hba *phba) 12445 { 12446 struct lpfc_sli_ring *pring; 12447 int rc; 12448 12449 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 12450 if (rc) 12451 return -ENOMEM; 12452 12453 if (phba->cfg_fof) { 12454 12455 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 12456 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 12457 if (rc) 12458 goto out_oas_cq; 12459 12460 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 12461 phba->sli4_hba.oas_cq, LPFC_FCP); 12462 if (rc) 12463 goto out_oas_wq; 12464 12465 /* Bind this CQ/WQ to the NVME ring */ 12466 pring = phba->sli4_hba.oas_wq->pring; 12467 pring->sli.sli4.wqp = 12468 (void *)phba->sli4_hba.oas_wq; 12469 phba->sli4_hba.oas_cq->pring = pring; 12470 } 12471 12472 return 0; 12473 12474 out_oas_wq: 12475 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 12476 out_oas_cq: 12477 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 12478 return rc; 12479 12480 } 12481 12482 /** 12483 * lpfc_fof_queue_create - Create all the fof queues 12484 * @phba: pointer to lpfc hba data structure. 12485 * 12486 * This routine is invoked to allocate all the fof queues for the FC HBA 12487 * operation. For each SLI4 queue type, the parameters such as queue entry 12488 * count (queue depth) shall be taken from the module parameter. For now, 12489 * we just use some constant number as place holder. 12490 * 12491 * Return codes 12492 * 0 - successful 12493 * -ENOMEM - No availble memory 12494 * -EIO - The mailbox failed to complete successfully. 12495 **/ 12496 int 12497 lpfc_fof_queue_create(struct lpfc_hba *phba) 12498 { 12499 struct lpfc_queue *qdesc; 12500 uint32_t wqesize; 12501 12502 /* Create FOF EQ */ 12503 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 12504 phba->sli4_hba.eq_esize, 12505 phba->sli4_hba.eq_ecount); 12506 if (!qdesc) 12507 goto out_error; 12508 12509 qdesc->qe_valid = 1; 12510 phba->sli4_hba.fof_eq = qdesc; 12511 12512 if (phba->cfg_fof) { 12513 12514 /* Create OAS CQ */ 12515 if (phba->enab_exp_wqcq_pages) 12516 qdesc = lpfc_sli4_queue_alloc(phba, 12517 LPFC_EXPANDED_PAGE_SIZE, 12518 phba->sli4_hba.cq_esize, 12519 LPFC_CQE_EXP_COUNT); 12520 else 12521 qdesc = lpfc_sli4_queue_alloc(phba, 12522 LPFC_DEFAULT_PAGE_SIZE, 12523 phba->sli4_hba.cq_esize, 12524 phba->sli4_hba.cq_ecount); 12525 if (!qdesc) 12526 goto out_error; 12527 12528 qdesc->qe_valid = 1; 12529 phba->sli4_hba.oas_cq = qdesc; 12530 12531 /* Create OAS WQ */ 12532 if (phba->enab_exp_wqcq_pages) { 12533 wqesize = (phba->fcp_embed_io) ? 12534 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 12535 qdesc = lpfc_sli4_queue_alloc(phba, 12536 LPFC_EXPANDED_PAGE_SIZE, 12537 wqesize, 12538 LPFC_WQE_EXP_COUNT); 12539 } else 12540 qdesc = lpfc_sli4_queue_alloc(phba, 12541 LPFC_DEFAULT_PAGE_SIZE, 12542 phba->sli4_hba.wq_esize, 12543 phba->sli4_hba.wq_ecount); 12544 12545 if (!qdesc) 12546 goto out_error; 12547 12548 phba->sli4_hba.oas_wq = qdesc; 12549 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 12550 12551 } 12552 return 0; 12553 12554 out_error: 12555 lpfc_fof_queue_destroy(phba); 12556 return -ENOMEM; 12557 } 12558 12559 /** 12560 * lpfc_fof_queue_destroy - Destroy all the fof queues 12561 * @phba: pointer to lpfc hba data structure. 12562 * 12563 * This routine is invoked to release all the SLI4 queues with the FC HBA 12564 * operation. 12565 * 12566 * Return codes 12567 * 0 - successful 12568 **/ 12569 int 12570 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 12571 { 12572 /* Release FOF Event queue */ 12573 if (phba->sli4_hba.fof_eq != NULL) { 12574 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 12575 phba->sli4_hba.fof_eq = NULL; 12576 } 12577 12578 /* Release OAS Completion queue */ 12579 if (phba->sli4_hba.oas_cq != NULL) { 12580 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 12581 phba->sli4_hba.oas_cq = NULL; 12582 } 12583 12584 /* Release OAS Work queue */ 12585 if (phba->sli4_hba.oas_wq != NULL) { 12586 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 12587 phba->sli4_hba.oas_wq = NULL; 12588 } 12589 return 0; 12590 } 12591 12592 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 12593 12594 static const struct pci_error_handlers lpfc_err_handler = { 12595 .error_detected = lpfc_io_error_detected, 12596 .slot_reset = lpfc_io_slot_reset, 12597 .resume = lpfc_io_resume, 12598 }; 12599 12600 static struct pci_driver lpfc_driver = { 12601 .name = LPFC_DRIVER_NAME, 12602 .id_table = lpfc_id_table, 12603 .probe = lpfc_pci_probe_one, 12604 .remove = lpfc_pci_remove_one, 12605 .shutdown = lpfc_pci_remove_one, 12606 .suspend = lpfc_pci_suspend_one, 12607 .resume = lpfc_pci_resume_one, 12608 .err_handler = &lpfc_err_handler, 12609 }; 12610 12611 static const struct file_operations lpfc_mgmt_fop = { 12612 .owner = THIS_MODULE, 12613 }; 12614 12615 static struct miscdevice lpfc_mgmt_dev = { 12616 .minor = MISC_DYNAMIC_MINOR, 12617 .name = "lpfcmgmt", 12618 .fops = &lpfc_mgmt_fop, 12619 }; 12620 12621 /** 12622 * lpfc_init - lpfc module initialization routine 12623 * 12624 * This routine is to be invoked when the lpfc module is loaded into the 12625 * kernel. The special kernel macro module_init() is used to indicate the 12626 * role of this routine to the kernel as lpfc module entry point. 12627 * 12628 * Return codes 12629 * 0 - successful 12630 * -ENOMEM - FC attach transport failed 12631 * all others - failed 12632 */ 12633 static int __init 12634 lpfc_init(void) 12635 { 12636 int error = 0; 12637 12638 printk(LPFC_MODULE_DESC "\n"); 12639 printk(LPFC_COPYRIGHT "\n"); 12640 12641 error = misc_register(&lpfc_mgmt_dev); 12642 if (error) 12643 printk(KERN_ERR "Could not register lpfcmgmt device, " 12644 "misc_register returned with status %d", error); 12645 12646 lpfc_transport_functions.vport_create = lpfc_vport_create; 12647 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 12648 lpfc_transport_template = 12649 fc_attach_transport(&lpfc_transport_functions); 12650 if (lpfc_transport_template == NULL) 12651 return -ENOMEM; 12652 lpfc_vport_transport_template = 12653 fc_attach_transport(&lpfc_vport_transport_functions); 12654 if (lpfc_vport_transport_template == NULL) { 12655 fc_release_transport(lpfc_transport_template); 12656 return -ENOMEM; 12657 } 12658 lpfc_nvme_cmd_template(); 12659 lpfc_nvmet_cmd_template(); 12660 12661 /* Initialize in case vector mapping is needed */ 12662 lpfc_used_cpu = NULL; 12663 lpfc_present_cpu = num_present_cpus(); 12664 12665 error = pci_register_driver(&lpfc_driver); 12666 if (error) { 12667 fc_release_transport(lpfc_transport_template); 12668 fc_release_transport(lpfc_vport_transport_template); 12669 } 12670 12671 return error; 12672 } 12673 12674 /** 12675 * lpfc_exit - lpfc module removal routine 12676 * 12677 * This routine is invoked when the lpfc module is removed from the kernel. 12678 * The special kernel macro module_exit() is used to indicate the role of 12679 * this routine to the kernel as lpfc module exit point. 12680 */ 12681 static void __exit 12682 lpfc_exit(void) 12683 { 12684 misc_deregister(&lpfc_mgmt_dev); 12685 pci_unregister_driver(&lpfc_driver); 12686 fc_release_transport(lpfc_transport_template); 12687 fc_release_transport(lpfc_vport_transport_template); 12688 if (_dump_buf_data) { 12689 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 12690 "_dump_buf_data at 0x%p\n", 12691 (1L << _dump_buf_data_order), _dump_buf_data); 12692 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 12693 } 12694 12695 if (_dump_buf_dif) { 12696 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 12697 "_dump_buf_dif at 0x%p\n", 12698 (1L << _dump_buf_dif_order), _dump_buf_dif); 12699 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 12700 } 12701 kfree(lpfc_used_cpu); 12702 idr_destroy(&lpfc_hba_index); 12703 } 12704 12705 module_init(lpfc_init); 12706 module_exit(lpfc_exit); 12707 MODULE_LICENSE("GPL"); 12708 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 12709 MODULE_AUTHOR("Broadcom"); 12710 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 12711