1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/bitops.h> 41 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_transport_fc.h> 46 #include <scsi/scsi_tcq.h> 47 #include <scsi/fc/fc_fs.h> 48 49 #include <linux/nvme-fc-driver.h> 50 51 #include "lpfc_hw4.h" 52 #include "lpfc_hw.h" 53 #include "lpfc_sli.h" 54 #include "lpfc_sli4.h" 55 #include "lpfc_nl.h" 56 #include "lpfc_disc.h" 57 #include "lpfc.h" 58 #include "lpfc_scsi.h" 59 #include "lpfc_nvme.h" 60 #include "lpfc_nvmet.h" 61 #include "lpfc_logmsg.h" 62 #include "lpfc_crtn.h" 63 #include "lpfc_vport.h" 64 #include "lpfc_version.h" 65 #include "lpfc_ids.h" 66 67 char *_dump_buf_data; 68 unsigned long _dump_buf_data_order; 69 char *_dump_buf_dif; 70 unsigned long _dump_buf_dif_order; 71 spinlock_t _dump_buf_lock; 72 73 /* Used when mapping IRQ vectors in a driver centric manner */ 74 uint16_t *lpfc_used_cpu; 75 uint32_t lpfc_present_cpu; 76 77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 78 static int lpfc_post_rcv_buf(struct lpfc_hba *); 79 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 81 static int lpfc_setup_endian_order(struct lpfc_hba *); 82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 83 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 85 static void lpfc_init_sgl_list(struct lpfc_hba *); 86 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 87 static void lpfc_free_active_sgl(struct lpfc_hba *); 88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 93 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 96 97 static struct scsi_transport_template *lpfc_transport_template = NULL; 98 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 99 static DEFINE_IDR(lpfc_hba_index); 100 #define LPFC_NVMET_BUF_POST 254 101 102 /** 103 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 104 * @phba: pointer to lpfc hba data structure. 105 * 106 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 107 * mailbox command. It retrieves the revision information from the HBA and 108 * collects the Vital Product Data (VPD) about the HBA for preparing the 109 * configuration of the HBA. 110 * 111 * Return codes: 112 * 0 - success. 113 * -ERESTART - requests the SLI layer to reset the HBA and try again. 114 * Any other value - indicates an error. 115 **/ 116 int 117 lpfc_config_port_prep(struct lpfc_hba *phba) 118 { 119 lpfc_vpd_t *vp = &phba->vpd; 120 int i = 0, rc; 121 LPFC_MBOXQ_t *pmb; 122 MAILBOX_t *mb; 123 char *lpfc_vpd_data = NULL; 124 uint16_t offset = 0; 125 static char licensed[56] = 126 "key unlock for use with gnu public licensed code only\0"; 127 static int init_key = 1; 128 129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 130 if (!pmb) { 131 phba->link_state = LPFC_HBA_ERROR; 132 return -ENOMEM; 133 } 134 135 mb = &pmb->u.mb; 136 phba->link_state = LPFC_INIT_MBX_CMDS; 137 138 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 139 if (init_key) { 140 uint32_t *ptext = (uint32_t *) licensed; 141 142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 143 *ptext = cpu_to_be32(*ptext); 144 init_key = 0; 145 } 146 147 lpfc_read_nv(phba, pmb); 148 memset((char*)mb->un.varRDnvp.rsvd3, 0, 149 sizeof (mb->un.varRDnvp.rsvd3)); 150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 151 sizeof (licensed)); 152 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 155 if (rc != MBX_SUCCESS) { 156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 157 "0324 Config Port initialization " 158 "error, mbxCmd x%x READ_NVPARM, " 159 "mbxStatus x%x\n", 160 mb->mbxCommand, mb->mbxStatus); 161 mempool_free(pmb, phba->mbox_mem_pool); 162 return -ERESTART; 163 } 164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 165 sizeof(phba->wwnn)); 166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 167 sizeof(phba->wwpn)); 168 } 169 170 phba->sli3_options = 0x0; 171 172 /* Setup and issue mailbox READ REV command */ 173 lpfc_read_rev(phba, pmb); 174 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 175 if (rc != MBX_SUCCESS) { 176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 177 "0439 Adapter failed to init, mbxCmd x%x " 178 "READ_REV, mbxStatus x%x\n", 179 mb->mbxCommand, mb->mbxStatus); 180 mempool_free( pmb, phba->mbox_mem_pool); 181 return -ERESTART; 182 } 183 184 185 /* 186 * The value of rr must be 1 since the driver set the cv field to 1. 187 * This setting requires the FW to set all revision fields. 188 */ 189 if (mb->un.varRdRev.rr == 0) { 190 vp->rev.rBit = 0; 191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 192 "0440 Adapter failed to init, READ_REV has " 193 "missing revision information.\n"); 194 mempool_free(pmb, phba->mbox_mem_pool); 195 return -ERESTART; 196 } 197 198 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 199 mempool_free(pmb, phba->mbox_mem_pool); 200 return -EINVAL; 201 } 202 203 /* Save information as VPD data */ 204 vp->rev.rBit = 1; 205 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 206 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 207 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 208 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 209 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 210 vp->rev.biuRev = mb->un.varRdRev.biuRev; 211 vp->rev.smRev = mb->un.varRdRev.smRev; 212 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 213 vp->rev.endecRev = mb->un.varRdRev.endecRev; 214 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 215 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 216 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 217 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 218 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 219 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 220 221 /* If the sli feature level is less then 9, we must 222 * tear down all RPIs and VPIs on link down if NPIV 223 * is enabled. 224 */ 225 if (vp->rev.feaLevelHigh < 9) 226 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 227 228 if (lpfc_is_LC_HBA(phba->pcidev->device)) 229 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 230 sizeof (phba->RandomData)); 231 232 /* Get adapter VPD information */ 233 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 234 if (!lpfc_vpd_data) 235 goto out_free_mbox; 236 do { 237 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 238 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 239 240 if (rc != MBX_SUCCESS) { 241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 242 "0441 VPD not present on adapter, " 243 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 244 mb->mbxCommand, mb->mbxStatus); 245 mb->un.varDmp.word_cnt = 0; 246 } 247 /* dump mem may return a zero when finished or we got a 248 * mailbox error, either way we are done. 249 */ 250 if (mb->un.varDmp.word_cnt == 0) 251 break; 252 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 253 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 254 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 255 lpfc_vpd_data + offset, 256 mb->un.varDmp.word_cnt); 257 offset += mb->un.varDmp.word_cnt; 258 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 259 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 260 261 kfree(lpfc_vpd_data); 262 out_free_mbox: 263 mempool_free(pmb, phba->mbox_mem_pool); 264 return 0; 265 } 266 267 /** 268 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for driver's configuring asynchronous event 273 * mailbox command to the device. If the mailbox command returns successfully, 274 * it will set internal async event support flag to 1; otherwise, it will 275 * set internal async event support flag to 0. 276 **/ 277 static void 278 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 279 { 280 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 281 phba->temp_sensor_support = 1; 282 else 283 phba->temp_sensor_support = 0; 284 mempool_free(pmboxq, phba->mbox_mem_pool); 285 return; 286 } 287 288 /** 289 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 290 * @phba: pointer to lpfc hba data structure. 291 * @pmboxq: pointer to the driver internal queue element for mailbox command. 292 * 293 * This is the completion handler for dump mailbox command for getting 294 * wake up parameters. When this command complete, the response contain 295 * Option rom version of the HBA. This function translate the version number 296 * into a human readable string and store it in OptionROMVersion. 297 **/ 298 static void 299 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 300 { 301 struct prog_id *prg; 302 uint32_t prog_id_word; 303 char dist = ' '; 304 /* character array used for decoding dist type. */ 305 char dist_char[] = "nabx"; 306 307 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 308 mempool_free(pmboxq, phba->mbox_mem_pool); 309 return; 310 } 311 312 prg = (struct prog_id *) &prog_id_word; 313 314 /* word 7 contain option rom version */ 315 prog_id_word = pmboxq->u.mb.un.varWords[7]; 316 317 /* Decode the Option rom version word to a readable string */ 318 if (prg->dist < 4) 319 dist = dist_char[prg->dist]; 320 321 if ((prg->dist == 3) && (prg->num == 0)) 322 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 323 prg->ver, prg->rev, prg->lev); 324 else 325 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 326 prg->ver, prg->rev, prg->lev, 327 dist, prg->num); 328 mempool_free(pmboxq, phba->mbox_mem_pool); 329 return; 330 } 331 332 /** 333 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 334 * cfg_soft_wwnn, cfg_soft_wwpn 335 * @vport: pointer to lpfc vport data structure. 336 * 337 * 338 * Return codes 339 * None. 340 **/ 341 void 342 lpfc_update_vport_wwn(struct lpfc_vport *vport) 343 { 344 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 345 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 346 347 /* If the soft name exists then update it using the service params */ 348 if (vport->phba->cfg_soft_wwnn) 349 u64_to_wwn(vport->phba->cfg_soft_wwnn, 350 vport->fc_sparam.nodeName.u.wwn); 351 if (vport->phba->cfg_soft_wwpn) 352 u64_to_wwn(vport->phba->cfg_soft_wwpn, 353 vport->fc_sparam.portName.u.wwn); 354 355 /* 356 * If the name is empty or there exists a soft name 357 * then copy the service params name, otherwise use the fc name 358 */ 359 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 361 sizeof(struct lpfc_name)); 362 else 363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 364 sizeof(struct lpfc_name)); 365 366 /* 367 * If the port name has changed, then set the Param changes flag 368 * to unreg the login 369 */ 370 if (vport->fc_portname.u.wwn[0] != 0 && 371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 372 sizeof(struct lpfc_name))) 373 vport->vport_flag |= FAWWPN_PARAM_CHG; 374 375 if (vport->fc_portname.u.wwn[0] == 0 || 376 vport->phba->cfg_soft_wwpn || 377 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 378 vport->vport_flag & FAWWPN_SET) { 379 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 380 sizeof(struct lpfc_name)); 381 vport->vport_flag &= ~FAWWPN_SET; 382 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 383 vport->vport_flag |= FAWWPN_SET; 384 } 385 else 386 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 387 sizeof(struct lpfc_name)); 388 } 389 390 /** 391 * lpfc_config_port_post - Perform lpfc initialization after config port 392 * @phba: pointer to lpfc hba data structure. 393 * 394 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 395 * command call. It performs all internal resource and state setups on the 396 * port: post IOCB buffers, enable appropriate host interrupt attentions, 397 * ELS ring timers, etc. 398 * 399 * Return codes 400 * 0 - success. 401 * Any other value - error. 402 **/ 403 int 404 lpfc_config_port_post(struct lpfc_hba *phba) 405 { 406 struct lpfc_vport *vport = phba->pport; 407 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 408 LPFC_MBOXQ_t *pmb; 409 MAILBOX_t *mb; 410 struct lpfc_dmabuf *mp; 411 struct lpfc_sli *psli = &phba->sli; 412 uint32_t status, timeout; 413 int i, j; 414 int rc; 415 416 spin_lock_irq(&phba->hbalock); 417 /* 418 * If the Config port completed correctly the HBA is not 419 * over heated any more. 420 */ 421 if (phba->over_temp_state == HBA_OVER_TEMP) 422 phba->over_temp_state = HBA_NORMAL_TEMP; 423 spin_unlock_irq(&phba->hbalock); 424 425 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 426 if (!pmb) { 427 phba->link_state = LPFC_HBA_ERROR; 428 return -ENOMEM; 429 } 430 mb = &pmb->u.mb; 431 432 /* Get login parameters for NID. */ 433 rc = lpfc_read_sparam(phba, pmb, 0); 434 if (rc) { 435 mempool_free(pmb, phba->mbox_mem_pool); 436 return -ENOMEM; 437 } 438 439 pmb->vport = vport; 440 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 442 "0448 Adapter failed init, mbxCmd x%x " 443 "READ_SPARM mbxStatus x%x\n", 444 mb->mbxCommand, mb->mbxStatus); 445 phba->link_state = LPFC_HBA_ERROR; 446 mp = (struct lpfc_dmabuf *) pmb->context1; 447 mempool_free(pmb, phba->mbox_mem_pool); 448 lpfc_mbuf_free(phba, mp->virt, mp->phys); 449 kfree(mp); 450 return -EIO; 451 } 452 453 mp = (struct lpfc_dmabuf *) pmb->context1; 454 455 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 456 lpfc_mbuf_free(phba, mp->virt, mp->phys); 457 kfree(mp); 458 pmb->context1 = NULL; 459 lpfc_update_vport_wwn(vport); 460 461 /* Update the fc_host data structures with new wwn. */ 462 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 463 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 464 fc_host_max_npiv_vports(shost) = phba->max_vpi; 465 466 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 467 /* This should be consolidated into parse_vpd ? - mr */ 468 if (phba->SerialNumber[0] == 0) { 469 uint8_t *outptr; 470 471 outptr = &vport->fc_nodename.u.s.IEEE[0]; 472 for (i = 0; i < 12; i++) { 473 status = *outptr++; 474 j = ((status & 0xf0) >> 4); 475 if (j <= 9) 476 phba->SerialNumber[i] = 477 (char)((uint8_t) 0x30 + (uint8_t) j); 478 else 479 phba->SerialNumber[i] = 480 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 481 i++; 482 j = (status & 0xf); 483 if (j <= 9) 484 phba->SerialNumber[i] = 485 (char)((uint8_t) 0x30 + (uint8_t) j); 486 else 487 phba->SerialNumber[i] = 488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 489 } 490 } 491 492 lpfc_read_config(phba, pmb); 493 pmb->vport = vport; 494 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 496 "0453 Adapter failed to init, mbxCmd x%x " 497 "READ_CONFIG, mbxStatus x%x\n", 498 mb->mbxCommand, mb->mbxStatus); 499 phba->link_state = LPFC_HBA_ERROR; 500 mempool_free( pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 504 /* Check if the port is disabled */ 505 lpfc_sli_read_link_ste(phba); 506 507 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 508 i = (mb->un.varRdConfig.max_xri + 1); 509 if (phba->cfg_hba_queue_depth > i) { 510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 511 "3359 HBA queue depth changed from %d to %d\n", 512 phba->cfg_hba_queue_depth, i); 513 phba->cfg_hba_queue_depth = i; 514 } 515 516 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 517 i = (mb->un.varRdConfig.max_xri >> 3); 518 if (phba->pport->cfg_lun_queue_depth > i) { 519 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 520 "3360 LUN queue depth changed from %d to %d\n", 521 phba->pport->cfg_lun_queue_depth, i); 522 phba->pport->cfg_lun_queue_depth = i; 523 } 524 525 phba->lmt = mb->un.varRdConfig.lmt; 526 527 /* Get the default values for Model Name and Description */ 528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 529 530 phba->link_state = LPFC_LINK_DOWN; 531 532 /* Only process IOCBs on ELS ring till hba_state is READY */ 533 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 536 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 537 538 /* Post receive buffers for desired rings */ 539 if (phba->sli_rev != 3) 540 lpfc_post_rcv_buf(phba); 541 542 /* 543 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 544 */ 545 if (phba->intr_type == MSIX) { 546 rc = lpfc_config_msi(phba, pmb); 547 if (rc) { 548 mempool_free(pmb, phba->mbox_mem_pool); 549 return -EIO; 550 } 551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 552 if (rc != MBX_SUCCESS) { 553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 554 "0352 Config MSI mailbox command " 555 "failed, mbxCmd x%x, mbxStatus x%x\n", 556 pmb->u.mb.mbxCommand, 557 pmb->u.mb.mbxStatus); 558 mempool_free(pmb, phba->mbox_mem_pool); 559 return -EIO; 560 } 561 } 562 563 spin_lock_irq(&phba->hbalock); 564 /* Initialize ERATT handling flag */ 565 phba->hba_flag &= ~HBA_ERATT_HANDLED; 566 567 /* Enable appropriate host interrupts */ 568 if (lpfc_readl(phba->HCregaddr, &status)) { 569 spin_unlock_irq(&phba->hbalock); 570 return -EIO; 571 } 572 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 573 if (psli->num_rings > 0) 574 status |= HC_R0INT_ENA; 575 if (psli->num_rings > 1) 576 status |= HC_R1INT_ENA; 577 if (psli->num_rings > 2) 578 status |= HC_R2INT_ENA; 579 if (psli->num_rings > 3) 580 status |= HC_R3INT_ENA; 581 582 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 583 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 584 status &= ~(HC_R0INT_ENA); 585 586 writel(status, phba->HCregaddr); 587 readl(phba->HCregaddr); /* flush */ 588 spin_unlock_irq(&phba->hbalock); 589 590 /* Set up ring-0 (ELS) timer */ 591 timeout = phba->fc_ratov * 2; 592 mod_timer(&vport->els_tmofunc, 593 jiffies + msecs_to_jiffies(1000 * timeout)); 594 /* Set up heart beat (HB) timer */ 595 mod_timer(&phba->hb_tmofunc, 596 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 597 phba->hb_outstanding = 0; 598 phba->last_completion_time = jiffies; 599 /* Set up error attention (ERATT) polling timer */ 600 mod_timer(&phba->eratt_poll, 601 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 602 603 if (phba->hba_flag & LINK_DISABLED) { 604 lpfc_printf_log(phba, 605 KERN_ERR, LOG_INIT, 606 "2598 Adapter Link is disabled.\n"); 607 lpfc_down_link(phba, pmb); 608 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 609 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 610 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 611 lpfc_printf_log(phba, 612 KERN_ERR, LOG_INIT, 613 "2599 Adapter failed to issue DOWN_LINK" 614 " mbox command rc 0x%x\n", rc); 615 616 mempool_free(pmb, phba->mbox_mem_pool); 617 return -EIO; 618 } 619 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 620 mempool_free(pmb, phba->mbox_mem_pool); 621 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 622 if (rc) 623 return rc; 624 } 625 /* MBOX buffer will be freed in mbox compl */ 626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 627 if (!pmb) { 628 phba->link_state = LPFC_HBA_ERROR; 629 return -ENOMEM; 630 } 631 632 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 633 pmb->mbox_cmpl = lpfc_config_async_cmpl; 634 pmb->vport = phba->pport; 635 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 636 637 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 638 lpfc_printf_log(phba, 639 KERN_ERR, 640 LOG_INIT, 641 "0456 Adapter failed to issue " 642 "ASYNCEVT_ENABLE mbox status x%x\n", 643 rc); 644 mempool_free(pmb, phba->mbox_mem_pool); 645 } 646 647 /* Get Option rom version */ 648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 649 if (!pmb) { 650 phba->link_state = LPFC_HBA_ERROR; 651 return -ENOMEM; 652 } 653 654 lpfc_dump_wakeup_param(phba, pmb); 655 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 656 pmb->vport = phba->pport; 657 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 658 659 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 661 "to get Option ROM version status x%x\n", rc); 662 mempool_free(pmb, phba->mbox_mem_pool); 663 } 664 665 return 0; 666 } 667 668 /** 669 * lpfc_hba_init_link - Initialize the FC link 670 * @phba: pointer to lpfc hba data structure. 671 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 672 * 673 * This routine will issue the INIT_LINK mailbox command call. 674 * It is available to other drivers through the lpfc_hba data 675 * structure for use as a delayed link up mechanism with the 676 * module parameter lpfc_suppress_link_up. 677 * 678 * Return code 679 * 0 - success 680 * Any other value - error 681 **/ 682 static int 683 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 684 { 685 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 686 } 687 688 /** 689 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 690 * @phba: pointer to lpfc hba data structure. 691 * @fc_topology: desired fc topology. 692 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 693 * 694 * This routine will issue the INIT_LINK mailbox command call. 695 * It is available to other drivers through the lpfc_hba data 696 * structure for use as a delayed link up mechanism with the 697 * module parameter lpfc_suppress_link_up. 698 * 699 * Return code 700 * 0 - success 701 * Any other value - error 702 **/ 703 int 704 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 705 uint32_t flag) 706 { 707 struct lpfc_vport *vport = phba->pport; 708 LPFC_MBOXQ_t *pmb; 709 MAILBOX_t *mb; 710 int rc; 711 712 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 713 if (!pmb) { 714 phba->link_state = LPFC_HBA_ERROR; 715 return -ENOMEM; 716 } 717 mb = &pmb->u.mb; 718 pmb->vport = vport; 719 720 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 722 !(phba->lmt & LMT_1Gb)) || 723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 724 !(phba->lmt & LMT_2Gb)) || 725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 726 !(phba->lmt & LMT_4Gb)) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 728 !(phba->lmt & LMT_8Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 730 !(phba->lmt & LMT_10Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 732 !(phba->lmt & LMT_16Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 734 !(phba->lmt & LMT_32Gb))) { 735 /* Reset link speed to auto */ 736 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 737 "1302 Invalid speed for this board:%d " 738 "Reset link speed to auto.\n", 739 phba->cfg_link_speed); 740 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 741 } 742 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 743 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 744 if (phba->sli_rev < LPFC_SLI_REV4) 745 lpfc_set_loopback_flag(phba); 746 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 747 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 749 "0498 Adapter failed to init, mbxCmd x%x " 750 "INIT_LINK, mbxStatus x%x\n", 751 mb->mbxCommand, mb->mbxStatus); 752 if (phba->sli_rev <= LPFC_SLI_REV3) { 753 /* Clear all interrupt enable conditions */ 754 writel(0, phba->HCregaddr); 755 readl(phba->HCregaddr); /* flush */ 756 /* Clear all pending interrupts */ 757 writel(0xffffffff, phba->HAregaddr); 758 readl(phba->HAregaddr); /* flush */ 759 } 760 phba->link_state = LPFC_HBA_ERROR; 761 if (rc != MBX_BUSY || flag == MBX_POLL) 762 mempool_free(pmb, phba->mbox_mem_pool); 763 return -EIO; 764 } 765 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 766 if (flag == MBX_POLL) 767 mempool_free(pmb, phba->mbox_mem_pool); 768 769 return 0; 770 } 771 772 /** 773 * lpfc_hba_down_link - this routine downs the FC link 774 * @phba: pointer to lpfc hba data structure. 775 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 776 * 777 * This routine will issue the DOWN_LINK mailbox command call. 778 * It is available to other drivers through the lpfc_hba data 779 * structure for use to stop the link. 780 * 781 * Return code 782 * 0 - success 783 * Any other value - error 784 **/ 785 static int 786 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 787 { 788 LPFC_MBOXQ_t *pmb; 789 int rc; 790 791 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 792 if (!pmb) { 793 phba->link_state = LPFC_HBA_ERROR; 794 return -ENOMEM; 795 } 796 797 lpfc_printf_log(phba, 798 KERN_ERR, LOG_INIT, 799 "0491 Adapter Link is disabled.\n"); 800 lpfc_down_link(phba, pmb); 801 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 802 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 803 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 804 lpfc_printf_log(phba, 805 KERN_ERR, LOG_INIT, 806 "2522 Adapter failed to issue DOWN_LINK" 807 " mbox command rc 0x%x\n", rc); 808 809 mempool_free(pmb, phba->mbox_mem_pool); 810 return -EIO; 811 } 812 if (flag == MBX_POLL) 813 mempool_free(pmb, phba->mbox_mem_pool); 814 815 return 0; 816 } 817 818 /** 819 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 820 * @phba: pointer to lpfc HBA data structure. 821 * 822 * This routine will do LPFC uninitialization before the HBA is reset when 823 * bringing down the SLI Layer. 824 * 825 * Return codes 826 * 0 - success. 827 * Any other value - error. 828 **/ 829 int 830 lpfc_hba_down_prep(struct lpfc_hba *phba) 831 { 832 struct lpfc_vport **vports; 833 int i; 834 835 if (phba->sli_rev <= LPFC_SLI_REV3) { 836 /* Disable interrupts */ 837 writel(0, phba->HCregaddr); 838 readl(phba->HCregaddr); /* flush */ 839 } 840 841 if (phba->pport->load_flag & FC_UNLOADING) 842 lpfc_cleanup_discovery_resources(phba->pport); 843 else { 844 vports = lpfc_create_vport_work_array(phba); 845 if (vports != NULL) 846 for (i = 0; i <= phba->max_vports && 847 vports[i] != NULL; i++) 848 lpfc_cleanup_discovery_resources(vports[i]); 849 lpfc_destroy_vport_work_array(phba, vports); 850 } 851 return 0; 852 } 853 854 /** 855 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 856 * rspiocb which got deferred 857 * 858 * @phba: pointer to lpfc HBA data structure. 859 * 860 * This routine will cleanup completed slow path events after HBA is reset 861 * when bringing down the SLI Layer. 862 * 863 * 864 * Return codes 865 * void. 866 **/ 867 static void 868 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 869 { 870 struct lpfc_iocbq *rspiocbq; 871 struct hbq_dmabuf *dmabuf; 872 struct lpfc_cq_event *cq_event; 873 874 spin_lock_irq(&phba->hbalock); 875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 876 spin_unlock_irq(&phba->hbalock); 877 878 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 879 /* Get the response iocb from the head of work queue */ 880 spin_lock_irq(&phba->hbalock); 881 list_remove_head(&phba->sli4_hba.sp_queue_event, 882 cq_event, struct lpfc_cq_event, list); 883 spin_unlock_irq(&phba->hbalock); 884 885 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 886 case CQE_CODE_COMPL_WQE: 887 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 888 cq_event); 889 lpfc_sli_release_iocbq(phba, rspiocbq); 890 break; 891 case CQE_CODE_RECEIVE: 892 case CQE_CODE_RECEIVE_V1: 893 dmabuf = container_of(cq_event, struct hbq_dmabuf, 894 cq_event); 895 lpfc_in_buf_free(phba, &dmabuf->dbuf); 896 } 897 } 898 } 899 900 /** 901 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 902 * @phba: pointer to lpfc HBA data structure. 903 * 904 * This routine will cleanup posted ELS buffers after the HBA is reset 905 * when bringing down the SLI Layer. 906 * 907 * 908 * Return codes 909 * void. 910 **/ 911 static void 912 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 913 { 914 struct lpfc_sli *psli = &phba->sli; 915 struct lpfc_sli_ring *pring; 916 struct lpfc_dmabuf *mp, *next_mp; 917 LIST_HEAD(buflist); 918 int count; 919 920 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 921 lpfc_sli_hbqbuf_free_all(phba); 922 else { 923 /* Cleanup preposted buffers on the ELS ring */ 924 pring = &psli->sli3_ring[LPFC_ELS_RING]; 925 spin_lock_irq(&phba->hbalock); 926 list_splice_init(&pring->postbufq, &buflist); 927 spin_unlock_irq(&phba->hbalock); 928 929 count = 0; 930 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 931 list_del(&mp->list); 932 count++; 933 lpfc_mbuf_free(phba, mp->virt, mp->phys); 934 kfree(mp); 935 } 936 937 spin_lock_irq(&phba->hbalock); 938 pring->postbufq_cnt -= count; 939 spin_unlock_irq(&phba->hbalock); 940 } 941 } 942 943 /** 944 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 945 * @phba: pointer to lpfc HBA data structure. 946 * 947 * This routine will cleanup the txcmplq after the HBA is reset when bringing 948 * down the SLI Layer. 949 * 950 * Return codes 951 * void 952 **/ 953 static void 954 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 955 { 956 struct lpfc_sli *psli = &phba->sli; 957 struct lpfc_queue *qp = NULL; 958 struct lpfc_sli_ring *pring; 959 LIST_HEAD(completions); 960 int i; 961 962 if (phba->sli_rev != LPFC_SLI_REV4) { 963 for (i = 0; i < psli->num_rings; i++) { 964 pring = &psli->sli3_ring[i]; 965 spin_lock_irq(&phba->hbalock); 966 /* At this point in time the HBA is either reset or DOA 967 * Nothing should be on txcmplq as it will 968 * NEVER complete. 969 */ 970 list_splice_init(&pring->txcmplq, &completions); 971 pring->txcmplq_cnt = 0; 972 spin_unlock_irq(&phba->hbalock); 973 974 lpfc_sli_abort_iocb_ring(phba, pring); 975 } 976 /* Cancel all the IOCBs from the completions list */ 977 lpfc_sli_cancel_iocbs(phba, &completions, 978 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 979 return; 980 } 981 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 982 pring = qp->pring; 983 if (!pring) 984 continue; 985 spin_lock_irq(&pring->ring_lock); 986 list_splice_init(&pring->txcmplq, &completions); 987 pring->txcmplq_cnt = 0; 988 spin_unlock_irq(&pring->ring_lock); 989 lpfc_sli_abort_iocb_ring(phba, pring); 990 } 991 /* Cancel all the IOCBs from the completions list */ 992 lpfc_sli_cancel_iocbs(phba, &completions, 993 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 994 } 995 996 /** 997 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 998 int i; 999 * @phba: pointer to lpfc HBA data structure. 1000 * 1001 * This routine will do uninitialization after the HBA is reset when bring 1002 * down the SLI Layer. 1003 * 1004 * Return codes 1005 * 0 - success. 1006 * Any other value - error. 1007 **/ 1008 static int 1009 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1010 { 1011 lpfc_hba_free_post_buf(phba); 1012 lpfc_hba_clean_txcmplq(phba); 1013 return 0; 1014 } 1015 1016 /** 1017 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1018 * @phba: pointer to lpfc HBA data structure. 1019 * 1020 * This routine will do uninitialization after the HBA is reset when bring 1021 * down the SLI Layer. 1022 * 1023 * Return codes 1024 * 0 - success. 1025 * Any other value - error. 1026 **/ 1027 static int 1028 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1029 { 1030 struct lpfc_scsi_buf *psb, *psb_next; 1031 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1032 LIST_HEAD(aborts); 1033 LIST_HEAD(nvme_aborts); 1034 LIST_HEAD(nvmet_aborts); 1035 unsigned long iflag = 0; 1036 struct lpfc_sglq *sglq_entry = NULL; 1037 int cnt; 1038 1039 1040 lpfc_sli_hbqbuf_free_all(phba); 1041 lpfc_hba_clean_txcmplq(phba); 1042 1043 /* At this point in time the HBA is either reset or DOA. Either 1044 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1045 * on the lpfc_els_sgl_list so that it can either be freed if the 1046 * driver is unloading or reposted if the driver is restarting 1047 * the port. 1048 */ 1049 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1050 /* scsl_buf_list */ 1051 /* sgl_list_lock required because worker thread uses this 1052 * list. 1053 */ 1054 spin_lock(&phba->sli4_hba.sgl_list_lock); 1055 list_for_each_entry(sglq_entry, 1056 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1057 sglq_entry->state = SGL_FREED; 1058 1059 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1060 &phba->sli4_hba.lpfc_els_sgl_list); 1061 1062 1063 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1064 /* abts_scsi_buf_list_lock required because worker thread uses this 1065 * list. 1066 */ 1067 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 1068 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1069 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1070 &aborts); 1071 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1072 } 1073 1074 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1075 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1076 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, 1077 &nvme_aborts); 1078 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1079 &nvmet_aborts); 1080 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1081 } 1082 1083 spin_unlock_irq(&phba->hbalock); 1084 1085 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1086 psb->pCmd = NULL; 1087 psb->status = IOSTAT_SUCCESS; 1088 } 1089 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1090 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1091 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1092 1093 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1094 cnt = 0; 1095 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { 1096 psb->pCmd = NULL; 1097 psb->status = IOSTAT_SUCCESS; 1098 cnt++; 1099 } 1100 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); 1101 phba->put_nvme_bufs += cnt; 1102 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); 1103 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); 1104 1105 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1106 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1107 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1108 } 1109 } 1110 1111 lpfc_sli4_free_sp_events(phba); 1112 return 0; 1113 } 1114 1115 /** 1116 * lpfc_hba_down_post - Wrapper func for hba down post routine 1117 * @phba: pointer to lpfc HBA data structure. 1118 * 1119 * This routine wraps the actual SLI3 or SLI4 routine for performing 1120 * uninitialization after the HBA is reset when bring down the SLI Layer. 1121 * 1122 * Return codes 1123 * 0 - success. 1124 * Any other value - error. 1125 **/ 1126 int 1127 lpfc_hba_down_post(struct lpfc_hba *phba) 1128 { 1129 return (*phba->lpfc_hba_down_post)(phba); 1130 } 1131 1132 /** 1133 * lpfc_hb_timeout - The HBA-timer timeout handler 1134 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1135 * 1136 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1137 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1138 * work-port-events bitmap and the worker thread is notified. This timeout 1139 * event will be used by the worker thread to invoke the actual timeout 1140 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1141 * be performed in the timeout handler and the HBA timeout event bit shall 1142 * be cleared by the worker thread after it has taken the event bitmap out. 1143 **/ 1144 static void 1145 lpfc_hb_timeout(struct timer_list *t) 1146 { 1147 struct lpfc_hba *phba; 1148 uint32_t tmo_posted; 1149 unsigned long iflag; 1150 1151 phba = from_timer(phba, t, hb_tmofunc); 1152 1153 /* Check for heart beat timeout conditions */ 1154 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1155 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1156 if (!tmo_posted) 1157 phba->pport->work_port_events |= WORKER_HB_TMO; 1158 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1159 1160 /* Tell the worker thread there is work to do */ 1161 if (!tmo_posted) 1162 lpfc_worker_wake_up(phba); 1163 return; 1164 } 1165 1166 /** 1167 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1168 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1169 * 1170 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1171 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1172 * work-port-events bitmap and the worker thread is notified. This timeout 1173 * event will be used by the worker thread to invoke the actual timeout 1174 * handler routine, lpfc_rrq_handler. Any periodical operations will 1175 * be performed in the timeout handler and the RRQ timeout event bit shall 1176 * be cleared by the worker thread after it has taken the event bitmap out. 1177 **/ 1178 static void 1179 lpfc_rrq_timeout(struct timer_list *t) 1180 { 1181 struct lpfc_hba *phba; 1182 unsigned long iflag; 1183 1184 phba = from_timer(phba, t, rrq_tmr); 1185 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1186 if (!(phba->pport->load_flag & FC_UNLOADING)) 1187 phba->hba_flag |= HBA_RRQ_ACTIVE; 1188 else 1189 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1190 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1191 1192 if (!(phba->pport->load_flag & FC_UNLOADING)) 1193 lpfc_worker_wake_up(phba); 1194 } 1195 1196 /** 1197 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1198 * @phba: pointer to lpfc hba data structure. 1199 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1200 * 1201 * This is the callback function to the lpfc heart-beat mailbox command. 1202 * If configured, the lpfc driver issues the heart-beat mailbox command to 1203 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1204 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1205 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1206 * heart-beat outstanding state. Once the mailbox command comes back and 1207 * no error conditions detected, the heart-beat mailbox command timer is 1208 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1209 * state is cleared for the next heart-beat. If the timer expired with the 1210 * heart-beat outstanding state set, the driver will put the HBA offline. 1211 **/ 1212 static void 1213 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1214 { 1215 unsigned long drvr_flag; 1216 1217 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1218 phba->hb_outstanding = 0; 1219 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1220 1221 /* Check and reset heart-beat timer is necessary */ 1222 mempool_free(pmboxq, phba->mbox_mem_pool); 1223 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1224 !(phba->link_state == LPFC_HBA_ERROR) && 1225 !(phba->pport->load_flag & FC_UNLOADING)) 1226 mod_timer(&phba->hb_tmofunc, 1227 jiffies + 1228 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1229 return; 1230 } 1231 1232 /** 1233 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1234 * @phba: pointer to lpfc hba data structure. 1235 * 1236 * This is the actual HBA-timer timeout handler to be invoked by the worker 1237 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1238 * handler performs any periodic operations needed for the device. If such 1239 * periodic event has already been attended to either in the interrupt handler 1240 * or by processing slow-ring or fast-ring events within the HBA-timer 1241 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1242 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1243 * is configured and there is no heart-beat mailbox command outstanding, a 1244 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1245 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1246 * to offline. 1247 **/ 1248 void 1249 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1250 { 1251 struct lpfc_vport **vports; 1252 LPFC_MBOXQ_t *pmboxq; 1253 struct lpfc_dmabuf *buf_ptr; 1254 int retval, i; 1255 struct lpfc_sli *psli = &phba->sli; 1256 LIST_HEAD(completions); 1257 struct lpfc_queue *qp; 1258 unsigned long time_elapsed; 1259 uint32_t tick_cqe, max_cqe, val; 1260 uint64_t tot, data1, data2, data3; 1261 struct lpfc_nvmet_tgtport *tgtp; 1262 struct lpfc_register reg_data; 1263 void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; 1264 1265 vports = lpfc_create_vport_work_array(phba); 1266 if (vports != NULL) 1267 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1268 lpfc_rcv_seq_check_edtov(vports[i]); 1269 lpfc_fdmi_num_disc_check(vports[i]); 1270 } 1271 lpfc_destroy_vport_work_array(phba, vports); 1272 1273 if ((phba->link_state == LPFC_HBA_ERROR) || 1274 (phba->pport->load_flag & FC_UNLOADING) || 1275 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1276 return; 1277 1278 if (phba->cfg_auto_imax) { 1279 if (!phba->last_eqdelay_time) { 1280 phba->last_eqdelay_time = jiffies; 1281 goto skip_eqdelay; 1282 } 1283 time_elapsed = jiffies - phba->last_eqdelay_time; 1284 phba->last_eqdelay_time = jiffies; 1285 1286 tot = 0xffff; 1287 /* Check outstanding IO count */ 1288 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1289 if (phba->nvmet_support) { 1290 tgtp = phba->targetport->private; 1291 /* Calculate outstanding IOs */ 1292 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); 1293 tot += atomic_read(&tgtp->xmt_fcp_release); 1294 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; 1295 } else { 1296 tot = atomic_read(&phba->fc4NvmeIoCmpls); 1297 data1 = atomic_read( 1298 &phba->fc4NvmeInputRequests); 1299 data2 = atomic_read( 1300 &phba->fc4NvmeOutputRequests); 1301 data3 = atomic_read( 1302 &phba->fc4NvmeControlRequests); 1303 tot = (data1 + data2 + data3) - tot; 1304 } 1305 } 1306 1307 /* Interrupts per sec per EQ */ 1308 val = phba->cfg_fcp_imax / phba->io_channel_irqs; 1309 tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ 1310 1311 /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ 1312 max_cqe = time_elapsed * tick_cqe; 1313 1314 for (i = 0; i < phba->io_channel_irqs; i++) { 1315 /* Fast-path EQ */ 1316 qp = phba->sli4_hba.hba_eq[i]; 1317 if (!qp) 1318 continue; 1319 1320 /* Use no EQ delay if we don't have many outstanding 1321 * IOs, or if we are only processing 1 CQE/ISR or less. 1322 * Otherwise, assume we can process up to lpfc_fcp_imax 1323 * interrupts per HBA. 1324 */ 1325 if (tot < LPFC_NODELAY_MAX_IO || 1326 qp->EQ_cqe_cnt <= max_cqe) 1327 val = 0; 1328 else 1329 val = phba->cfg_fcp_imax; 1330 1331 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 1332 /* Use EQ Delay Register method */ 1333 1334 /* Convert for EQ Delay register */ 1335 if (val) { 1336 /* First, interrupts per sec per EQ */ 1337 val = phba->cfg_fcp_imax / 1338 phba->io_channel_irqs; 1339 1340 /* us delay between each interrupt */ 1341 val = LPFC_SEC_TO_USEC / val; 1342 } 1343 if (val != qp->q_mode) { 1344 reg_data.word0 = 0; 1345 bf_set(lpfc_sliport_eqdelay_id, 1346 ®_data, qp->queue_id); 1347 bf_set(lpfc_sliport_eqdelay_delay, 1348 ®_data, val); 1349 writel(reg_data.word0, eqdreg); 1350 } 1351 } else { 1352 /* Use mbox command method */ 1353 if (val != qp->q_mode) 1354 lpfc_modify_hba_eq_delay(phba, i, 1355 1, val); 1356 } 1357 1358 /* 1359 * val is cfg_fcp_imax or 0 for mbox delay or us delay 1360 * between interrupts for EQDR. 1361 */ 1362 qp->q_mode = val; 1363 qp->EQ_cqe_cnt = 0; 1364 } 1365 } 1366 1367 skip_eqdelay: 1368 spin_lock_irq(&phba->pport->work_port_lock); 1369 1370 if (time_after(phba->last_completion_time + 1371 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1372 jiffies)) { 1373 spin_unlock_irq(&phba->pport->work_port_lock); 1374 if (!phba->hb_outstanding) 1375 mod_timer(&phba->hb_tmofunc, 1376 jiffies + 1377 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1378 else 1379 mod_timer(&phba->hb_tmofunc, 1380 jiffies + 1381 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1382 return; 1383 } 1384 spin_unlock_irq(&phba->pport->work_port_lock); 1385 1386 if (phba->elsbuf_cnt && 1387 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1388 spin_lock_irq(&phba->hbalock); 1389 list_splice_init(&phba->elsbuf, &completions); 1390 phba->elsbuf_cnt = 0; 1391 phba->elsbuf_prev_cnt = 0; 1392 spin_unlock_irq(&phba->hbalock); 1393 1394 while (!list_empty(&completions)) { 1395 list_remove_head(&completions, buf_ptr, 1396 struct lpfc_dmabuf, list); 1397 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1398 kfree(buf_ptr); 1399 } 1400 } 1401 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1402 1403 /* If there is no heart beat outstanding, issue a heartbeat command */ 1404 if (phba->cfg_enable_hba_heartbeat) { 1405 if (!phba->hb_outstanding) { 1406 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1407 (list_empty(&psli->mboxq))) { 1408 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1409 GFP_KERNEL); 1410 if (!pmboxq) { 1411 mod_timer(&phba->hb_tmofunc, 1412 jiffies + 1413 msecs_to_jiffies(1000 * 1414 LPFC_HB_MBOX_INTERVAL)); 1415 return; 1416 } 1417 1418 lpfc_heart_beat(phba, pmboxq); 1419 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1420 pmboxq->vport = phba->pport; 1421 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1422 MBX_NOWAIT); 1423 1424 if (retval != MBX_BUSY && 1425 retval != MBX_SUCCESS) { 1426 mempool_free(pmboxq, 1427 phba->mbox_mem_pool); 1428 mod_timer(&phba->hb_tmofunc, 1429 jiffies + 1430 msecs_to_jiffies(1000 * 1431 LPFC_HB_MBOX_INTERVAL)); 1432 return; 1433 } 1434 phba->skipped_hb = 0; 1435 phba->hb_outstanding = 1; 1436 } else if (time_before_eq(phba->last_completion_time, 1437 phba->skipped_hb)) { 1438 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1439 "2857 Last completion time not " 1440 " updated in %d ms\n", 1441 jiffies_to_msecs(jiffies 1442 - phba->last_completion_time)); 1443 } else 1444 phba->skipped_hb = jiffies; 1445 1446 mod_timer(&phba->hb_tmofunc, 1447 jiffies + 1448 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1449 return; 1450 } else { 1451 /* 1452 * If heart beat timeout called with hb_outstanding set 1453 * we need to give the hb mailbox cmd a chance to 1454 * complete or TMO. 1455 */ 1456 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1457 "0459 Adapter heartbeat still out" 1458 "standing:last compl time was %d ms.\n", 1459 jiffies_to_msecs(jiffies 1460 - phba->last_completion_time)); 1461 mod_timer(&phba->hb_tmofunc, 1462 jiffies + 1463 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1464 } 1465 } else { 1466 mod_timer(&phba->hb_tmofunc, 1467 jiffies + 1468 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1469 } 1470 } 1471 1472 /** 1473 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1474 * @phba: pointer to lpfc hba data structure. 1475 * 1476 * This routine is called to bring the HBA offline when HBA hardware error 1477 * other than Port Error 6 has been detected. 1478 **/ 1479 static void 1480 lpfc_offline_eratt(struct lpfc_hba *phba) 1481 { 1482 struct lpfc_sli *psli = &phba->sli; 1483 1484 spin_lock_irq(&phba->hbalock); 1485 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1486 spin_unlock_irq(&phba->hbalock); 1487 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1488 1489 lpfc_offline(phba); 1490 lpfc_reset_barrier(phba); 1491 spin_lock_irq(&phba->hbalock); 1492 lpfc_sli_brdreset(phba); 1493 spin_unlock_irq(&phba->hbalock); 1494 lpfc_hba_down_post(phba); 1495 lpfc_sli_brdready(phba, HS_MBRDY); 1496 lpfc_unblock_mgmt_io(phba); 1497 phba->link_state = LPFC_HBA_ERROR; 1498 return; 1499 } 1500 1501 /** 1502 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1503 * @phba: pointer to lpfc hba data structure. 1504 * 1505 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1506 * other than Port Error 6 has been detected. 1507 **/ 1508 void 1509 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1510 { 1511 spin_lock_irq(&phba->hbalock); 1512 phba->link_state = LPFC_HBA_ERROR; 1513 spin_unlock_irq(&phba->hbalock); 1514 1515 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1516 lpfc_offline(phba); 1517 lpfc_hba_down_post(phba); 1518 lpfc_unblock_mgmt_io(phba); 1519 } 1520 1521 /** 1522 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1523 * @phba: pointer to lpfc hba data structure. 1524 * 1525 * This routine is invoked to handle the deferred HBA hardware error 1526 * conditions. This type of error is indicated by HBA by setting ER1 1527 * and another ER bit in the host status register. The driver will 1528 * wait until the ER1 bit clears before handling the error condition. 1529 **/ 1530 static void 1531 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1532 { 1533 uint32_t old_host_status = phba->work_hs; 1534 struct lpfc_sli *psli = &phba->sli; 1535 1536 /* If the pci channel is offline, ignore possible errors, 1537 * since we cannot communicate with the pci card anyway. 1538 */ 1539 if (pci_channel_offline(phba->pcidev)) { 1540 spin_lock_irq(&phba->hbalock); 1541 phba->hba_flag &= ~DEFER_ERATT; 1542 spin_unlock_irq(&phba->hbalock); 1543 return; 1544 } 1545 1546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1547 "0479 Deferred Adapter Hardware Error " 1548 "Data: x%x x%x x%x\n", 1549 phba->work_hs, 1550 phba->work_status[0], phba->work_status[1]); 1551 1552 spin_lock_irq(&phba->hbalock); 1553 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1554 spin_unlock_irq(&phba->hbalock); 1555 1556 1557 /* 1558 * Firmware stops when it triggred erratt. That could cause the I/Os 1559 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1560 * SCSI layer retry it after re-establishing link. 1561 */ 1562 lpfc_sli_abort_fcp_rings(phba); 1563 1564 /* 1565 * There was a firmware error. Take the hba offline and then 1566 * attempt to restart it. 1567 */ 1568 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1569 lpfc_offline(phba); 1570 1571 /* Wait for the ER1 bit to clear.*/ 1572 while (phba->work_hs & HS_FFER1) { 1573 msleep(100); 1574 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1575 phba->work_hs = UNPLUG_ERR ; 1576 break; 1577 } 1578 /* If driver is unloading let the worker thread continue */ 1579 if (phba->pport->load_flag & FC_UNLOADING) { 1580 phba->work_hs = 0; 1581 break; 1582 } 1583 } 1584 1585 /* 1586 * This is to ptrotect against a race condition in which 1587 * first write to the host attention register clear the 1588 * host status register. 1589 */ 1590 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1591 phba->work_hs = old_host_status & ~HS_FFER1; 1592 1593 spin_lock_irq(&phba->hbalock); 1594 phba->hba_flag &= ~DEFER_ERATT; 1595 spin_unlock_irq(&phba->hbalock); 1596 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1597 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1598 } 1599 1600 static void 1601 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1602 { 1603 struct lpfc_board_event_header board_event; 1604 struct Scsi_Host *shost; 1605 1606 board_event.event_type = FC_REG_BOARD_EVENT; 1607 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1608 shost = lpfc_shost_from_vport(phba->pport); 1609 fc_host_post_vendor_event(shost, fc_get_event_number(), 1610 sizeof(board_event), 1611 (char *) &board_event, 1612 LPFC_NL_VENDOR_ID); 1613 } 1614 1615 /** 1616 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1617 * @phba: pointer to lpfc hba data structure. 1618 * 1619 * This routine is invoked to handle the following HBA hardware error 1620 * conditions: 1621 * 1 - HBA error attention interrupt 1622 * 2 - DMA ring index out of range 1623 * 3 - Mailbox command came back as unknown 1624 **/ 1625 static void 1626 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1627 { 1628 struct lpfc_vport *vport = phba->pport; 1629 struct lpfc_sli *psli = &phba->sli; 1630 uint32_t event_data; 1631 unsigned long temperature; 1632 struct temp_event temp_event_data; 1633 struct Scsi_Host *shost; 1634 1635 /* If the pci channel is offline, ignore possible errors, 1636 * since we cannot communicate with the pci card anyway. 1637 */ 1638 if (pci_channel_offline(phba->pcidev)) { 1639 spin_lock_irq(&phba->hbalock); 1640 phba->hba_flag &= ~DEFER_ERATT; 1641 spin_unlock_irq(&phba->hbalock); 1642 return; 1643 } 1644 1645 /* If resets are disabled then leave the HBA alone and return */ 1646 if (!phba->cfg_enable_hba_reset) 1647 return; 1648 1649 /* Send an internal error event to mgmt application */ 1650 lpfc_board_errevt_to_mgmt(phba); 1651 1652 if (phba->hba_flag & DEFER_ERATT) 1653 lpfc_handle_deferred_eratt(phba); 1654 1655 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1656 if (phba->work_hs & HS_FFER6) 1657 /* Re-establishing Link */ 1658 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1659 "1301 Re-establishing Link " 1660 "Data: x%x x%x x%x\n", 1661 phba->work_hs, phba->work_status[0], 1662 phba->work_status[1]); 1663 if (phba->work_hs & HS_FFER8) 1664 /* Device Zeroization */ 1665 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1666 "2861 Host Authentication device " 1667 "zeroization Data:x%x x%x x%x\n", 1668 phba->work_hs, phba->work_status[0], 1669 phba->work_status[1]); 1670 1671 spin_lock_irq(&phba->hbalock); 1672 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1673 spin_unlock_irq(&phba->hbalock); 1674 1675 /* 1676 * Firmware stops when it triggled erratt with HS_FFER6. 1677 * That could cause the I/Os dropped by the firmware. 1678 * Error iocb (I/O) on txcmplq and let the SCSI layer 1679 * retry it after re-establishing link. 1680 */ 1681 lpfc_sli_abort_fcp_rings(phba); 1682 1683 /* 1684 * There was a firmware error. Take the hba offline and then 1685 * attempt to restart it. 1686 */ 1687 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1688 lpfc_offline(phba); 1689 lpfc_sli_brdrestart(phba); 1690 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1691 lpfc_unblock_mgmt_io(phba); 1692 return; 1693 } 1694 lpfc_unblock_mgmt_io(phba); 1695 } else if (phba->work_hs & HS_CRIT_TEMP) { 1696 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1697 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1698 temp_event_data.event_code = LPFC_CRIT_TEMP; 1699 temp_event_data.data = (uint32_t)temperature; 1700 1701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1702 "0406 Adapter maximum temperature exceeded " 1703 "(%ld), taking this port offline " 1704 "Data: x%x x%x x%x\n", 1705 temperature, phba->work_hs, 1706 phba->work_status[0], phba->work_status[1]); 1707 1708 shost = lpfc_shost_from_vport(phba->pport); 1709 fc_host_post_vendor_event(shost, fc_get_event_number(), 1710 sizeof(temp_event_data), 1711 (char *) &temp_event_data, 1712 SCSI_NL_VID_TYPE_PCI 1713 | PCI_VENDOR_ID_EMULEX); 1714 1715 spin_lock_irq(&phba->hbalock); 1716 phba->over_temp_state = HBA_OVER_TEMP; 1717 spin_unlock_irq(&phba->hbalock); 1718 lpfc_offline_eratt(phba); 1719 1720 } else { 1721 /* The if clause above forces this code path when the status 1722 * failure is a value other than FFER6. Do not call the offline 1723 * twice. This is the adapter hardware error path. 1724 */ 1725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1726 "0457 Adapter Hardware Error " 1727 "Data: x%x x%x x%x\n", 1728 phba->work_hs, 1729 phba->work_status[0], phba->work_status[1]); 1730 1731 event_data = FC_REG_DUMP_EVENT; 1732 shost = lpfc_shost_from_vport(vport); 1733 fc_host_post_vendor_event(shost, fc_get_event_number(), 1734 sizeof(event_data), (char *) &event_data, 1735 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1736 1737 lpfc_offline_eratt(phba); 1738 } 1739 return; 1740 } 1741 1742 /** 1743 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1744 * @phba: pointer to lpfc hba data structure. 1745 * @mbx_action: flag for mailbox shutdown action. 1746 * 1747 * This routine is invoked to perform an SLI4 port PCI function reset in 1748 * response to port status register polling attention. It waits for port 1749 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1750 * During this process, interrupt vectors are freed and later requested 1751 * for handling possible port resource change. 1752 **/ 1753 static int 1754 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1755 bool en_rn_msg) 1756 { 1757 int rc; 1758 uint32_t intr_mode; 1759 1760 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1761 LPFC_SLI_INTF_IF_TYPE_2) { 1762 /* 1763 * On error status condition, driver need to wait for port 1764 * ready before performing reset. 1765 */ 1766 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1767 if (rc) 1768 return rc; 1769 } 1770 1771 /* need reset: attempt for port recovery */ 1772 if (en_rn_msg) 1773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1774 "2887 Reset Needed: Attempting Port " 1775 "Recovery...\n"); 1776 lpfc_offline_prep(phba, mbx_action); 1777 lpfc_offline(phba); 1778 /* release interrupt for possible resource change */ 1779 lpfc_sli4_disable_intr(phba); 1780 lpfc_sli_brdrestart(phba); 1781 /* request and enable interrupt */ 1782 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1783 if (intr_mode == LPFC_INTR_ERROR) { 1784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1785 "3175 Failed to enable interrupt\n"); 1786 return -EIO; 1787 } 1788 phba->intr_mode = intr_mode; 1789 rc = lpfc_online(phba); 1790 if (rc == 0) 1791 lpfc_unblock_mgmt_io(phba); 1792 1793 return rc; 1794 } 1795 1796 /** 1797 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1798 * @phba: pointer to lpfc hba data structure. 1799 * 1800 * This routine is invoked to handle the SLI4 HBA hardware error attention 1801 * conditions. 1802 **/ 1803 static void 1804 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1805 { 1806 struct lpfc_vport *vport = phba->pport; 1807 uint32_t event_data; 1808 struct Scsi_Host *shost; 1809 uint32_t if_type; 1810 struct lpfc_register portstat_reg = {0}; 1811 uint32_t reg_err1, reg_err2; 1812 uint32_t uerrlo_reg, uemasklo_reg; 1813 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1814 bool en_rn_msg = true; 1815 struct temp_event temp_event_data; 1816 struct lpfc_register portsmphr_reg; 1817 int rc, i; 1818 1819 /* If the pci channel is offline, ignore possible errors, since 1820 * we cannot communicate with the pci card anyway. 1821 */ 1822 if (pci_channel_offline(phba->pcidev)) 1823 return; 1824 1825 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1826 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1827 switch (if_type) { 1828 case LPFC_SLI_INTF_IF_TYPE_0: 1829 pci_rd_rc1 = lpfc_readl( 1830 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1831 &uerrlo_reg); 1832 pci_rd_rc2 = lpfc_readl( 1833 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1834 &uemasklo_reg); 1835 /* consider PCI bus read error as pci_channel_offline */ 1836 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1837 return; 1838 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1839 lpfc_sli4_offline_eratt(phba); 1840 return; 1841 } 1842 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1843 "7623 Checking UE recoverable"); 1844 1845 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1846 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1847 &portsmphr_reg.word0)) 1848 continue; 1849 1850 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1851 &portsmphr_reg); 1852 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1853 LPFC_PORT_SEM_UE_RECOVERABLE) 1854 break; 1855 /*Sleep for 1Sec, before checking SEMAPHORE */ 1856 msleep(1000); 1857 } 1858 1859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1860 "4827 smphr_port_status x%x : Waited %dSec", 1861 smphr_port_status, i); 1862 1863 /* Recoverable UE, reset the HBA device */ 1864 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1865 LPFC_PORT_SEM_UE_RECOVERABLE) { 1866 for (i = 0; i < 20; i++) { 1867 msleep(1000); 1868 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1869 &portsmphr_reg.word0) && 1870 (LPFC_POST_STAGE_PORT_READY == 1871 bf_get(lpfc_port_smphr_port_status, 1872 &portsmphr_reg))) { 1873 rc = lpfc_sli4_port_sta_fn_reset(phba, 1874 LPFC_MBX_NO_WAIT, en_rn_msg); 1875 if (rc == 0) 1876 return; 1877 lpfc_printf_log(phba, 1878 KERN_ERR, LOG_INIT, 1879 "4215 Failed to recover UE"); 1880 break; 1881 } 1882 } 1883 } 1884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1885 "7624 Firmware not ready: Failing UE recovery," 1886 " waited %dSec", i); 1887 lpfc_sli4_offline_eratt(phba); 1888 break; 1889 1890 case LPFC_SLI_INTF_IF_TYPE_2: 1891 pci_rd_rc1 = lpfc_readl( 1892 phba->sli4_hba.u.if_type2.STATUSregaddr, 1893 &portstat_reg.word0); 1894 /* consider PCI bus read error as pci_channel_offline */ 1895 if (pci_rd_rc1 == -EIO) { 1896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1897 "3151 PCI bus read access failure: x%x\n", 1898 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1899 return; 1900 } 1901 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1902 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1903 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1905 "2889 Port Overtemperature event, " 1906 "taking port offline Data: x%x x%x\n", 1907 reg_err1, reg_err2); 1908 1909 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1910 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1911 temp_event_data.event_code = LPFC_CRIT_TEMP; 1912 temp_event_data.data = 0xFFFFFFFF; 1913 1914 shost = lpfc_shost_from_vport(phba->pport); 1915 fc_host_post_vendor_event(shost, fc_get_event_number(), 1916 sizeof(temp_event_data), 1917 (char *)&temp_event_data, 1918 SCSI_NL_VID_TYPE_PCI 1919 | PCI_VENDOR_ID_EMULEX); 1920 1921 spin_lock_irq(&phba->hbalock); 1922 phba->over_temp_state = HBA_OVER_TEMP; 1923 spin_unlock_irq(&phba->hbalock); 1924 lpfc_sli4_offline_eratt(phba); 1925 return; 1926 } 1927 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1928 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1930 "3143 Port Down: Firmware Update " 1931 "Detected\n"); 1932 en_rn_msg = false; 1933 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1934 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1936 "3144 Port Down: Debug Dump\n"); 1937 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1938 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1940 "3145 Port Down: Provisioning\n"); 1941 1942 /* If resets are disabled then leave the HBA alone and return */ 1943 if (!phba->cfg_enable_hba_reset) 1944 return; 1945 1946 /* Check port status register for function reset */ 1947 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1948 en_rn_msg); 1949 if (rc == 0) { 1950 /* don't report event on forced debug dump */ 1951 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1952 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1953 return; 1954 else 1955 break; 1956 } 1957 /* fall through for not able to recover */ 1958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1959 "3152 Unrecoverable error, bring the port " 1960 "offline\n"); 1961 lpfc_sli4_offline_eratt(phba); 1962 break; 1963 case LPFC_SLI_INTF_IF_TYPE_1: 1964 default: 1965 break; 1966 } 1967 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1968 "3123 Report dump event to upper layer\n"); 1969 /* Send an internal error event to mgmt application */ 1970 lpfc_board_errevt_to_mgmt(phba); 1971 1972 event_data = FC_REG_DUMP_EVENT; 1973 shost = lpfc_shost_from_vport(vport); 1974 fc_host_post_vendor_event(shost, fc_get_event_number(), 1975 sizeof(event_data), (char *) &event_data, 1976 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1977 } 1978 1979 /** 1980 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1981 * @phba: pointer to lpfc HBA data structure. 1982 * 1983 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1984 * routine from the API jump table function pointer from the lpfc_hba struct. 1985 * 1986 * Return codes 1987 * 0 - success. 1988 * Any other value - error. 1989 **/ 1990 void 1991 lpfc_handle_eratt(struct lpfc_hba *phba) 1992 { 1993 (*phba->lpfc_handle_eratt)(phba); 1994 } 1995 1996 /** 1997 * lpfc_handle_latt - The HBA link event handler 1998 * @phba: pointer to lpfc hba data structure. 1999 * 2000 * This routine is invoked from the worker thread to handle a HBA host 2001 * attention link event. SLI3 only. 2002 **/ 2003 void 2004 lpfc_handle_latt(struct lpfc_hba *phba) 2005 { 2006 struct lpfc_vport *vport = phba->pport; 2007 struct lpfc_sli *psli = &phba->sli; 2008 LPFC_MBOXQ_t *pmb; 2009 volatile uint32_t control; 2010 struct lpfc_dmabuf *mp; 2011 int rc = 0; 2012 2013 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2014 if (!pmb) { 2015 rc = 1; 2016 goto lpfc_handle_latt_err_exit; 2017 } 2018 2019 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2020 if (!mp) { 2021 rc = 2; 2022 goto lpfc_handle_latt_free_pmb; 2023 } 2024 2025 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2026 if (!mp->virt) { 2027 rc = 3; 2028 goto lpfc_handle_latt_free_mp; 2029 } 2030 2031 /* Cleanup any outstanding ELS commands */ 2032 lpfc_els_flush_all_cmd(phba); 2033 2034 psli->slistat.link_event++; 2035 lpfc_read_topology(phba, pmb, mp); 2036 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2037 pmb->vport = vport; 2038 /* Block ELS IOCBs until we have processed this mbox command */ 2039 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2040 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2041 if (rc == MBX_NOT_FINISHED) { 2042 rc = 4; 2043 goto lpfc_handle_latt_free_mbuf; 2044 } 2045 2046 /* Clear Link Attention in HA REG */ 2047 spin_lock_irq(&phba->hbalock); 2048 writel(HA_LATT, phba->HAregaddr); 2049 readl(phba->HAregaddr); /* flush */ 2050 spin_unlock_irq(&phba->hbalock); 2051 2052 return; 2053 2054 lpfc_handle_latt_free_mbuf: 2055 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2056 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2057 lpfc_handle_latt_free_mp: 2058 kfree(mp); 2059 lpfc_handle_latt_free_pmb: 2060 mempool_free(pmb, phba->mbox_mem_pool); 2061 lpfc_handle_latt_err_exit: 2062 /* Enable Link attention interrupts */ 2063 spin_lock_irq(&phba->hbalock); 2064 psli->sli_flag |= LPFC_PROCESS_LA; 2065 control = readl(phba->HCregaddr); 2066 control |= HC_LAINT_ENA; 2067 writel(control, phba->HCregaddr); 2068 readl(phba->HCregaddr); /* flush */ 2069 2070 /* Clear Link Attention in HA REG */ 2071 writel(HA_LATT, phba->HAregaddr); 2072 readl(phba->HAregaddr); /* flush */ 2073 spin_unlock_irq(&phba->hbalock); 2074 lpfc_linkdown(phba); 2075 phba->link_state = LPFC_HBA_ERROR; 2076 2077 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2078 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2079 2080 return; 2081 } 2082 2083 /** 2084 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2085 * @phba: pointer to lpfc hba data structure. 2086 * @vpd: pointer to the vital product data. 2087 * @len: length of the vital product data in bytes. 2088 * 2089 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2090 * an array of characters. In this routine, the ModelName, ProgramType, and 2091 * ModelDesc, etc. fields of the phba data structure will be populated. 2092 * 2093 * Return codes 2094 * 0 - pointer to the VPD passed in is NULL 2095 * 1 - success 2096 **/ 2097 int 2098 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2099 { 2100 uint8_t lenlo, lenhi; 2101 int Length; 2102 int i, j; 2103 int finished = 0; 2104 int index = 0; 2105 2106 if (!vpd) 2107 return 0; 2108 2109 /* Vital Product */ 2110 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2111 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2112 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2113 (uint32_t) vpd[3]); 2114 while (!finished && (index < (len - 4))) { 2115 switch (vpd[index]) { 2116 case 0x82: 2117 case 0x91: 2118 index += 1; 2119 lenlo = vpd[index]; 2120 index += 1; 2121 lenhi = vpd[index]; 2122 index += 1; 2123 i = ((((unsigned short)lenhi) << 8) + lenlo); 2124 index += i; 2125 break; 2126 case 0x90: 2127 index += 1; 2128 lenlo = vpd[index]; 2129 index += 1; 2130 lenhi = vpd[index]; 2131 index += 1; 2132 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2133 if (Length > len - index) 2134 Length = len - index; 2135 while (Length > 0) { 2136 /* Look for Serial Number */ 2137 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2138 index += 2; 2139 i = vpd[index]; 2140 index += 1; 2141 j = 0; 2142 Length -= (3+i); 2143 while(i--) { 2144 phba->SerialNumber[j++] = vpd[index++]; 2145 if (j == 31) 2146 break; 2147 } 2148 phba->SerialNumber[j] = 0; 2149 continue; 2150 } 2151 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2152 phba->vpd_flag |= VPD_MODEL_DESC; 2153 index += 2; 2154 i = vpd[index]; 2155 index += 1; 2156 j = 0; 2157 Length -= (3+i); 2158 while(i--) { 2159 phba->ModelDesc[j++] = vpd[index++]; 2160 if (j == 255) 2161 break; 2162 } 2163 phba->ModelDesc[j] = 0; 2164 continue; 2165 } 2166 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2167 phba->vpd_flag |= VPD_MODEL_NAME; 2168 index += 2; 2169 i = vpd[index]; 2170 index += 1; 2171 j = 0; 2172 Length -= (3+i); 2173 while(i--) { 2174 phba->ModelName[j++] = vpd[index++]; 2175 if (j == 79) 2176 break; 2177 } 2178 phba->ModelName[j] = 0; 2179 continue; 2180 } 2181 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2182 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2183 index += 2; 2184 i = vpd[index]; 2185 index += 1; 2186 j = 0; 2187 Length -= (3+i); 2188 while(i--) { 2189 phba->ProgramType[j++] = vpd[index++]; 2190 if (j == 255) 2191 break; 2192 } 2193 phba->ProgramType[j] = 0; 2194 continue; 2195 } 2196 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2197 phba->vpd_flag |= VPD_PORT; 2198 index += 2; 2199 i = vpd[index]; 2200 index += 1; 2201 j = 0; 2202 Length -= (3+i); 2203 while(i--) { 2204 if ((phba->sli_rev == LPFC_SLI_REV4) && 2205 (phba->sli4_hba.pport_name_sta == 2206 LPFC_SLI4_PPNAME_GET)) { 2207 j++; 2208 index++; 2209 } else 2210 phba->Port[j++] = vpd[index++]; 2211 if (j == 19) 2212 break; 2213 } 2214 if ((phba->sli_rev != LPFC_SLI_REV4) || 2215 (phba->sli4_hba.pport_name_sta == 2216 LPFC_SLI4_PPNAME_NON)) 2217 phba->Port[j] = 0; 2218 continue; 2219 } 2220 else { 2221 index += 2; 2222 i = vpd[index]; 2223 index += 1; 2224 index += i; 2225 Length -= (3 + i); 2226 } 2227 } 2228 finished = 0; 2229 break; 2230 case 0x78: 2231 finished = 1; 2232 break; 2233 default: 2234 index ++; 2235 break; 2236 } 2237 } 2238 2239 return(1); 2240 } 2241 2242 /** 2243 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2244 * @phba: pointer to lpfc hba data structure. 2245 * @mdp: pointer to the data structure to hold the derived model name. 2246 * @descp: pointer to the data structure to hold the derived description. 2247 * 2248 * This routine retrieves HBA's description based on its registered PCI device 2249 * ID. The @descp passed into this function points to an array of 256 chars. It 2250 * shall be returned with the model name, maximum speed, and the host bus type. 2251 * The @mdp passed into this function points to an array of 80 chars. When the 2252 * function returns, the @mdp will be filled with the model name. 2253 **/ 2254 static void 2255 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2256 { 2257 lpfc_vpd_t *vp; 2258 uint16_t dev_id = phba->pcidev->device; 2259 int max_speed; 2260 int GE = 0; 2261 int oneConnect = 0; /* default is not a oneConnect */ 2262 struct { 2263 char *name; 2264 char *bus; 2265 char *function; 2266 } m = {"<Unknown>", "", ""}; 2267 2268 if (mdp && mdp[0] != '\0' 2269 && descp && descp[0] != '\0') 2270 return; 2271 2272 if (phba->lmt & LMT_32Gb) 2273 max_speed = 32; 2274 else if (phba->lmt & LMT_16Gb) 2275 max_speed = 16; 2276 else if (phba->lmt & LMT_10Gb) 2277 max_speed = 10; 2278 else if (phba->lmt & LMT_8Gb) 2279 max_speed = 8; 2280 else if (phba->lmt & LMT_4Gb) 2281 max_speed = 4; 2282 else if (phba->lmt & LMT_2Gb) 2283 max_speed = 2; 2284 else if (phba->lmt & LMT_1Gb) 2285 max_speed = 1; 2286 else 2287 max_speed = 0; 2288 2289 vp = &phba->vpd; 2290 2291 switch (dev_id) { 2292 case PCI_DEVICE_ID_FIREFLY: 2293 m = (typeof(m)){"LP6000", "PCI", 2294 "Obsolete, Unsupported Fibre Channel Adapter"}; 2295 break; 2296 case PCI_DEVICE_ID_SUPERFLY: 2297 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2298 m = (typeof(m)){"LP7000", "PCI", ""}; 2299 else 2300 m = (typeof(m)){"LP7000E", "PCI", ""}; 2301 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2302 break; 2303 case PCI_DEVICE_ID_DRAGONFLY: 2304 m = (typeof(m)){"LP8000", "PCI", 2305 "Obsolete, Unsupported Fibre Channel Adapter"}; 2306 break; 2307 case PCI_DEVICE_ID_CENTAUR: 2308 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2309 m = (typeof(m)){"LP9002", "PCI", ""}; 2310 else 2311 m = (typeof(m)){"LP9000", "PCI", ""}; 2312 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2313 break; 2314 case PCI_DEVICE_ID_RFLY: 2315 m = (typeof(m)){"LP952", "PCI", 2316 "Obsolete, Unsupported Fibre Channel Adapter"}; 2317 break; 2318 case PCI_DEVICE_ID_PEGASUS: 2319 m = (typeof(m)){"LP9802", "PCI-X", 2320 "Obsolete, Unsupported Fibre Channel Adapter"}; 2321 break; 2322 case PCI_DEVICE_ID_THOR: 2323 m = (typeof(m)){"LP10000", "PCI-X", 2324 "Obsolete, Unsupported Fibre Channel Adapter"}; 2325 break; 2326 case PCI_DEVICE_ID_VIPER: 2327 m = (typeof(m)){"LPX1000", "PCI-X", 2328 "Obsolete, Unsupported Fibre Channel Adapter"}; 2329 break; 2330 case PCI_DEVICE_ID_PFLY: 2331 m = (typeof(m)){"LP982", "PCI-X", 2332 "Obsolete, Unsupported Fibre Channel Adapter"}; 2333 break; 2334 case PCI_DEVICE_ID_TFLY: 2335 m = (typeof(m)){"LP1050", "PCI-X", 2336 "Obsolete, Unsupported Fibre Channel Adapter"}; 2337 break; 2338 case PCI_DEVICE_ID_HELIOS: 2339 m = (typeof(m)){"LP11000", "PCI-X2", 2340 "Obsolete, Unsupported Fibre Channel Adapter"}; 2341 break; 2342 case PCI_DEVICE_ID_HELIOS_SCSP: 2343 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2344 "Obsolete, Unsupported Fibre Channel Adapter"}; 2345 break; 2346 case PCI_DEVICE_ID_HELIOS_DCSP: 2347 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2348 "Obsolete, Unsupported Fibre Channel Adapter"}; 2349 break; 2350 case PCI_DEVICE_ID_NEPTUNE: 2351 m = (typeof(m)){"LPe1000", "PCIe", 2352 "Obsolete, Unsupported Fibre Channel Adapter"}; 2353 break; 2354 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2355 m = (typeof(m)){"LPe1000-SP", "PCIe", 2356 "Obsolete, Unsupported Fibre Channel Adapter"}; 2357 break; 2358 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2359 m = (typeof(m)){"LPe1002-SP", "PCIe", 2360 "Obsolete, Unsupported Fibre Channel Adapter"}; 2361 break; 2362 case PCI_DEVICE_ID_BMID: 2363 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2364 break; 2365 case PCI_DEVICE_ID_BSMB: 2366 m = (typeof(m)){"LP111", "PCI-X2", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_ZEPHYR: 2370 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2371 break; 2372 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2373 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2374 break; 2375 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2376 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2377 GE = 1; 2378 break; 2379 case PCI_DEVICE_ID_ZMID: 2380 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2381 break; 2382 case PCI_DEVICE_ID_ZSMB: 2383 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2384 break; 2385 case PCI_DEVICE_ID_LP101: 2386 m = (typeof(m)){"LP101", "PCI-X", 2387 "Obsolete, Unsupported Fibre Channel Adapter"}; 2388 break; 2389 case PCI_DEVICE_ID_LP10000S: 2390 m = (typeof(m)){"LP10000-S", "PCI", 2391 "Obsolete, Unsupported Fibre Channel Adapter"}; 2392 break; 2393 case PCI_DEVICE_ID_LP11000S: 2394 m = (typeof(m)){"LP11000-S", "PCI-X2", 2395 "Obsolete, Unsupported Fibre Channel Adapter"}; 2396 break; 2397 case PCI_DEVICE_ID_LPE11000S: 2398 m = (typeof(m)){"LPe11000-S", "PCIe", 2399 "Obsolete, Unsupported Fibre Channel Adapter"}; 2400 break; 2401 case PCI_DEVICE_ID_SAT: 2402 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2403 break; 2404 case PCI_DEVICE_ID_SAT_MID: 2405 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2406 break; 2407 case PCI_DEVICE_ID_SAT_SMB: 2408 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2409 break; 2410 case PCI_DEVICE_ID_SAT_DCSP: 2411 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2412 break; 2413 case PCI_DEVICE_ID_SAT_SCSP: 2414 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2415 break; 2416 case PCI_DEVICE_ID_SAT_S: 2417 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2418 break; 2419 case PCI_DEVICE_ID_HORNET: 2420 m = (typeof(m)){"LP21000", "PCIe", 2421 "Obsolete, Unsupported FCoE Adapter"}; 2422 GE = 1; 2423 break; 2424 case PCI_DEVICE_ID_PROTEUS_VF: 2425 m = (typeof(m)){"LPev12000", "PCIe IOV", 2426 "Obsolete, Unsupported Fibre Channel Adapter"}; 2427 break; 2428 case PCI_DEVICE_ID_PROTEUS_PF: 2429 m = (typeof(m)){"LPev12000", "PCIe IOV", 2430 "Obsolete, Unsupported Fibre Channel Adapter"}; 2431 break; 2432 case PCI_DEVICE_ID_PROTEUS_S: 2433 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2434 "Obsolete, Unsupported Fibre Channel Adapter"}; 2435 break; 2436 case PCI_DEVICE_ID_TIGERSHARK: 2437 oneConnect = 1; 2438 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2439 break; 2440 case PCI_DEVICE_ID_TOMCAT: 2441 oneConnect = 1; 2442 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2443 break; 2444 case PCI_DEVICE_ID_FALCON: 2445 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2446 "EmulexSecure Fibre"}; 2447 break; 2448 case PCI_DEVICE_ID_BALIUS: 2449 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2450 "Obsolete, Unsupported Fibre Channel Adapter"}; 2451 break; 2452 case PCI_DEVICE_ID_LANCER_FC: 2453 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2454 break; 2455 case PCI_DEVICE_ID_LANCER_FC_VF: 2456 m = (typeof(m)){"LPe16000", "PCIe", 2457 "Obsolete, Unsupported Fibre Channel Adapter"}; 2458 break; 2459 case PCI_DEVICE_ID_LANCER_FCOE: 2460 oneConnect = 1; 2461 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2462 break; 2463 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2464 oneConnect = 1; 2465 m = (typeof(m)){"OCe15100", "PCIe", 2466 "Obsolete, Unsupported FCoE"}; 2467 break; 2468 case PCI_DEVICE_ID_LANCER_G6_FC: 2469 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2470 break; 2471 case PCI_DEVICE_ID_SKYHAWK: 2472 case PCI_DEVICE_ID_SKYHAWK_VF: 2473 oneConnect = 1; 2474 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2475 break; 2476 default: 2477 m = (typeof(m)){"Unknown", "", ""}; 2478 break; 2479 } 2480 2481 if (mdp && mdp[0] == '\0') 2482 snprintf(mdp, 79,"%s", m.name); 2483 /* 2484 * oneConnect hba requires special processing, they are all initiators 2485 * and we put the port number on the end 2486 */ 2487 if (descp && descp[0] == '\0') { 2488 if (oneConnect) 2489 snprintf(descp, 255, 2490 "Emulex OneConnect %s, %s Initiator %s", 2491 m.name, m.function, 2492 phba->Port); 2493 else if (max_speed == 0) 2494 snprintf(descp, 255, 2495 "Emulex %s %s %s", 2496 m.name, m.bus, m.function); 2497 else 2498 snprintf(descp, 255, 2499 "Emulex %s %d%s %s %s", 2500 m.name, max_speed, (GE) ? "GE" : "Gb", 2501 m.bus, m.function); 2502 } 2503 } 2504 2505 /** 2506 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2507 * @phba: pointer to lpfc hba data structure. 2508 * @pring: pointer to a IOCB ring. 2509 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2510 * 2511 * This routine posts a given number of IOCBs with the associated DMA buffer 2512 * descriptors specified by the cnt argument to the given IOCB ring. 2513 * 2514 * Return codes 2515 * The number of IOCBs NOT able to be posted to the IOCB ring. 2516 **/ 2517 int 2518 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2519 { 2520 IOCB_t *icmd; 2521 struct lpfc_iocbq *iocb; 2522 struct lpfc_dmabuf *mp1, *mp2; 2523 2524 cnt += pring->missbufcnt; 2525 2526 /* While there are buffers to post */ 2527 while (cnt > 0) { 2528 /* Allocate buffer for command iocb */ 2529 iocb = lpfc_sli_get_iocbq(phba); 2530 if (iocb == NULL) { 2531 pring->missbufcnt = cnt; 2532 return cnt; 2533 } 2534 icmd = &iocb->iocb; 2535 2536 /* 2 buffers can be posted per command */ 2537 /* Allocate buffer to post */ 2538 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2539 if (mp1) 2540 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2541 if (!mp1 || !mp1->virt) { 2542 kfree(mp1); 2543 lpfc_sli_release_iocbq(phba, iocb); 2544 pring->missbufcnt = cnt; 2545 return cnt; 2546 } 2547 2548 INIT_LIST_HEAD(&mp1->list); 2549 /* Allocate buffer to post */ 2550 if (cnt > 1) { 2551 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2552 if (mp2) 2553 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2554 &mp2->phys); 2555 if (!mp2 || !mp2->virt) { 2556 kfree(mp2); 2557 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2558 kfree(mp1); 2559 lpfc_sli_release_iocbq(phba, iocb); 2560 pring->missbufcnt = cnt; 2561 return cnt; 2562 } 2563 2564 INIT_LIST_HEAD(&mp2->list); 2565 } else { 2566 mp2 = NULL; 2567 } 2568 2569 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2570 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2571 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2572 icmd->ulpBdeCount = 1; 2573 cnt--; 2574 if (mp2) { 2575 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2576 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2577 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2578 cnt--; 2579 icmd->ulpBdeCount = 2; 2580 } 2581 2582 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2583 icmd->ulpLe = 1; 2584 2585 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2586 IOCB_ERROR) { 2587 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2588 kfree(mp1); 2589 cnt++; 2590 if (mp2) { 2591 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2592 kfree(mp2); 2593 cnt++; 2594 } 2595 lpfc_sli_release_iocbq(phba, iocb); 2596 pring->missbufcnt = cnt; 2597 return cnt; 2598 } 2599 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2600 if (mp2) 2601 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2602 } 2603 pring->missbufcnt = 0; 2604 return 0; 2605 } 2606 2607 /** 2608 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2609 * @phba: pointer to lpfc hba data structure. 2610 * 2611 * This routine posts initial receive IOCB buffers to the ELS ring. The 2612 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2613 * set to 64 IOCBs. SLI3 only. 2614 * 2615 * Return codes 2616 * 0 - success (currently always success) 2617 **/ 2618 static int 2619 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2620 { 2621 struct lpfc_sli *psli = &phba->sli; 2622 2623 /* Ring 0, ELS / CT buffers */ 2624 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2625 /* Ring 2 - FCP no buffers needed */ 2626 2627 return 0; 2628 } 2629 2630 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2631 2632 /** 2633 * lpfc_sha_init - Set up initial array of hash table entries 2634 * @HashResultPointer: pointer to an array as hash table. 2635 * 2636 * This routine sets up the initial values to the array of hash table entries 2637 * for the LC HBAs. 2638 **/ 2639 static void 2640 lpfc_sha_init(uint32_t * HashResultPointer) 2641 { 2642 HashResultPointer[0] = 0x67452301; 2643 HashResultPointer[1] = 0xEFCDAB89; 2644 HashResultPointer[2] = 0x98BADCFE; 2645 HashResultPointer[3] = 0x10325476; 2646 HashResultPointer[4] = 0xC3D2E1F0; 2647 } 2648 2649 /** 2650 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2651 * @HashResultPointer: pointer to an initial/result hash table. 2652 * @HashWorkingPointer: pointer to an working hash table. 2653 * 2654 * This routine iterates an initial hash table pointed by @HashResultPointer 2655 * with the values from the working hash table pointeed by @HashWorkingPointer. 2656 * The results are putting back to the initial hash table, returned through 2657 * the @HashResultPointer as the result hash table. 2658 **/ 2659 static void 2660 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2661 { 2662 int t; 2663 uint32_t TEMP; 2664 uint32_t A, B, C, D, E; 2665 t = 16; 2666 do { 2667 HashWorkingPointer[t] = 2668 S(1, 2669 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2670 8] ^ 2671 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2672 } while (++t <= 79); 2673 t = 0; 2674 A = HashResultPointer[0]; 2675 B = HashResultPointer[1]; 2676 C = HashResultPointer[2]; 2677 D = HashResultPointer[3]; 2678 E = HashResultPointer[4]; 2679 2680 do { 2681 if (t < 20) { 2682 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2683 } else if (t < 40) { 2684 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2685 } else if (t < 60) { 2686 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2687 } else { 2688 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2689 } 2690 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2691 E = D; 2692 D = C; 2693 C = S(30, B); 2694 B = A; 2695 A = TEMP; 2696 } while (++t <= 79); 2697 2698 HashResultPointer[0] += A; 2699 HashResultPointer[1] += B; 2700 HashResultPointer[2] += C; 2701 HashResultPointer[3] += D; 2702 HashResultPointer[4] += E; 2703 2704 } 2705 2706 /** 2707 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2708 * @RandomChallenge: pointer to the entry of host challenge random number array. 2709 * @HashWorking: pointer to the entry of the working hash array. 2710 * 2711 * This routine calculates the working hash array referred by @HashWorking 2712 * from the challenge random numbers associated with the host, referred by 2713 * @RandomChallenge. The result is put into the entry of the working hash 2714 * array and returned by reference through @HashWorking. 2715 **/ 2716 static void 2717 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2718 { 2719 *HashWorking = (*RandomChallenge ^ *HashWorking); 2720 } 2721 2722 /** 2723 * lpfc_hba_init - Perform special handling for LC HBA initialization 2724 * @phba: pointer to lpfc hba data structure. 2725 * @hbainit: pointer to an array of unsigned 32-bit integers. 2726 * 2727 * This routine performs the special handling for LC HBA initialization. 2728 **/ 2729 void 2730 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2731 { 2732 int t; 2733 uint32_t *HashWorking; 2734 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2735 2736 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2737 if (!HashWorking) 2738 return; 2739 2740 HashWorking[0] = HashWorking[78] = *pwwnn++; 2741 HashWorking[1] = HashWorking[79] = *pwwnn; 2742 2743 for (t = 0; t < 7; t++) 2744 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2745 2746 lpfc_sha_init(hbainit); 2747 lpfc_sha_iterate(hbainit, HashWorking); 2748 kfree(HashWorking); 2749 } 2750 2751 /** 2752 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2753 * @vport: pointer to a virtual N_Port data structure. 2754 * 2755 * This routine performs the necessary cleanups before deleting the @vport. 2756 * It invokes the discovery state machine to perform necessary state 2757 * transitions and to release the ndlps associated with the @vport. Note, 2758 * the physical port is treated as @vport 0. 2759 **/ 2760 void 2761 lpfc_cleanup(struct lpfc_vport *vport) 2762 { 2763 struct lpfc_hba *phba = vport->phba; 2764 struct lpfc_nodelist *ndlp, *next_ndlp; 2765 int i = 0; 2766 2767 if (phba->link_state > LPFC_LINK_DOWN) 2768 lpfc_port_link_failure(vport); 2769 2770 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2771 if (!NLP_CHK_NODE_ACT(ndlp)) { 2772 ndlp = lpfc_enable_node(vport, ndlp, 2773 NLP_STE_UNUSED_NODE); 2774 if (!ndlp) 2775 continue; 2776 spin_lock_irq(&phba->ndlp_lock); 2777 NLP_SET_FREE_REQ(ndlp); 2778 spin_unlock_irq(&phba->ndlp_lock); 2779 /* Trigger the release of the ndlp memory */ 2780 lpfc_nlp_put(ndlp); 2781 continue; 2782 } 2783 spin_lock_irq(&phba->ndlp_lock); 2784 if (NLP_CHK_FREE_REQ(ndlp)) { 2785 /* The ndlp should not be in memory free mode already */ 2786 spin_unlock_irq(&phba->ndlp_lock); 2787 continue; 2788 } else 2789 /* Indicate request for freeing ndlp memory */ 2790 NLP_SET_FREE_REQ(ndlp); 2791 spin_unlock_irq(&phba->ndlp_lock); 2792 2793 if (vport->port_type != LPFC_PHYSICAL_PORT && 2794 ndlp->nlp_DID == Fabric_DID) { 2795 /* Just free up ndlp with Fabric_DID for vports */ 2796 lpfc_nlp_put(ndlp); 2797 continue; 2798 } 2799 2800 /* take care of nodes in unused state before the state 2801 * machine taking action. 2802 */ 2803 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2804 lpfc_nlp_put(ndlp); 2805 continue; 2806 } 2807 2808 if (ndlp->nlp_type & NLP_FABRIC) 2809 lpfc_disc_state_machine(vport, ndlp, NULL, 2810 NLP_EVT_DEVICE_RECOVERY); 2811 2812 lpfc_disc_state_machine(vport, ndlp, NULL, 2813 NLP_EVT_DEVICE_RM); 2814 } 2815 2816 /* At this point, ALL ndlp's should be gone 2817 * because of the previous NLP_EVT_DEVICE_RM. 2818 * Lets wait for this to happen, if needed. 2819 */ 2820 while (!list_empty(&vport->fc_nodes)) { 2821 if (i++ > 3000) { 2822 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2823 "0233 Nodelist not empty\n"); 2824 list_for_each_entry_safe(ndlp, next_ndlp, 2825 &vport->fc_nodes, nlp_listp) { 2826 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2827 LOG_NODE, 2828 "0282 did:x%x ndlp:x%p " 2829 "usgmap:x%x refcnt:%d\n", 2830 ndlp->nlp_DID, (void *)ndlp, 2831 ndlp->nlp_usg_map, 2832 kref_read(&ndlp->kref)); 2833 } 2834 break; 2835 } 2836 2837 /* Wait for any activity on ndlps to settle */ 2838 msleep(10); 2839 } 2840 lpfc_cleanup_vports_rrqs(vport, NULL); 2841 } 2842 2843 /** 2844 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2845 * @vport: pointer to a virtual N_Port data structure. 2846 * 2847 * This routine stops all the timers associated with a @vport. This function 2848 * is invoked before disabling or deleting a @vport. Note that the physical 2849 * port is treated as @vport 0. 2850 **/ 2851 void 2852 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2853 { 2854 del_timer_sync(&vport->els_tmofunc); 2855 del_timer_sync(&vport->delayed_disc_tmo); 2856 lpfc_can_disctmo(vport); 2857 return; 2858 } 2859 2860 /** 2861 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2862 * @phba: pointer to lpfc hba data structure. 2863 * 2864 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2865 * caller of this routine should already hold the host lock. 2866 **/ 2867 void 2868 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2869 { 2870 /* Clear pending FCF rediscovery wait flag */ 2871 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2872 2873 /* Now, try to stop the timer */ 2874 del_timer(&phba->fcf.redisc_wait); 2875 } 2876 2877 /** 2878 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2879 * @phba: pointer to lpfc hba data structure. 2880 * 2881 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2882 * checks whether the FCF rediscovery wait timer is pending with the host 2883 * lock held before proceeding with disabling the timer and clearing the 2884 * wait timer pendig flag. 2885 **/ 2886 void 2887 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2888 { 2889 spin_lock_irq(&phba->hbalock); 2890 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2891 /* FCF rediscovery timer already fired or stopped */ 2892 spin_unlock_irq(&phba->hbalock); 2893 return; 2894 } 2895 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2896 /* Clear failover in progress flags */ 2897 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2898 spin_unlock_irq(&phba->hbalock); 2899 } 2900 2901 /** 2902 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2903 * @phba: pointer to lpfc hba data structure. 2904 * 2905 * This routine stops all the timers associated with a HBA. This function is 2906 * invoked before either putting a HBA offline or unloading the driver. 2907 **/ 2908 void 2909 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2910 { 2911 lpfc_stop_vport_timers(phba->pport); 2912 del_timer_sync(&phba->sli.mbox_tmo); 2913 del_timer_sync(&phba->fabric_block_timer); 2914 del_timer_sync(&phba->eratt_poll); 2915 del_timer_sync(&phba->hb_tmofunc); 2916 if (phba->sli_rev == LPFC_SLI_REV4) { 2917 del_timer_sync(&phba->rrq_tmr); 2918 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2919 } 2920 phba->hb_outstanding = 0; 2921 2922 switch (phba->pci_dev_grp) { 2923 case LPFC_PCI_DEV_LP: 2924 /* Stop any LightPulse device specific driver timers */ 2925 del_timer_sync(&phba->fcp_poll_timer); 2926 break; 2927 case LPFC_PCI_DEV_OC: 2928 /* Stop any OneConnect device sepcific driver timers */ 2929 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2930 break; 2931 default: 2932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2933 "0297 Invalid device group (x%x)\n", 2934 phba->pci_dev_grp); 2935 break; 2936 } 2937 return; 2938 } 2939 2940 /** 2941 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2942 * @phba: pointer to lpfc hba data structure. 2943 * 2944 * This routine marks a HBA's management interface as blocked. Once the HBA's 2945 * management interface is marked as blocked, all the user space access to 2946 * the HBA, whether they are from sysfs interface or libdfc interface will 2947 * all be blocked. The HBA is set to block the management interface when the 2948 * driver prepares the HBA interface for online or offline. 2949 **/ 2950 static void 2951 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2952 { 2953 unsigned long iflag; 2954 uint8_t actcmd = MBX_HEARTBEAT; 2955 unsigned long timeout; 2956 2957 spin_lock_irqsave(&phba->hbalock, iflag); 2958 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2959 spin_unlock_irqrestore(&phba->hbalock, iflag); 2960 if (mbx_action == LPFC_MBX_NO_WAIT) 2961 return; 2962 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2963 spin_lock_irqsave(&phba->hbalock, iflag); 2964 if (phba->sli.mbox_active) { 2965 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2966 /* Determine how long we might wait for the active mailbox 2967 * command to be gracefully completed by firmware. 2968 */ 2969 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2970 phba->sli.mbox_active) * 1000) + jiffies; 2971 } 2972 spin_unlock_irqrestore(&phba->hbalock, iflag); 2973 2974 /* Wait for the outstnading mailbox command to complete */ 2975 while (phba->sli.mbox_active) { 2976 /* Check active mailbox complete status every 2ms */ 2977 msleep(2); 2978 if (time_after(jiffies, timeout)) { 2979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2980 "2813 Mgmt IO is Blocked %x " 2981 "- mbox cmd %x still active\n", 2982 phba->sli.sli_flag, actcmd); 2983 break; 2984 } 2985 } 2986 } 2987 2988 /** 2989 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2990 * @phba: pointer to lpfc hba data structure. 2991 * 2992 * Allocate RPIs for all active remote nodes. This is needed whenever 2993 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2994 * is to fixup the temporary rpi assignments. 2995 **/ 2996 void 2997 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2998 { 2999 struct lpfc_nodelist *ndlp, *next_ndlp; 3000 struct lpfc_vport **vports; 3001 int i, rpi; 3002 unsigned long flags; 3003 3004 if (phba->sli_rev != LPFC_SLI_REV4) 3005 return; 3006 3007 vports = lpfc_create_vport_work_array(phba); 3008 if (vports == NULL) 3009 return; 3010 3011 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3012 if (vports[i]->load_flag & FC_UNLOADING) 3013 continue; 3014 3015 list_for_each_entry_safe(ndlp, next_ndlp, 3016 &vports[i]->fc_nodes, 3017 nlp_listp) { 3018 if (!NLP_CHK_NODE_ACT(ndlp)) 3019 continue; 3020 rpi = lpfc_sli4_alloc_rpi(phba); 3021 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3022 spin_lock_irqsave(&phba->ndlp_lock, flags); 3023 NLP_CLR_NODE_ACT(ndlp); 3024 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3025 continue; 3026 } 3027 ndlp->nlp_rpi = rpi; 3028 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3029 "0009 rpi:%x DID:%x " 3030 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 3031 ndlp->nlp_DID, ndlp->nlp_flag, 3032 ndlp->nlp_usg_map, ndlp); 3033 } 3034 } 3035 lpfc_destroy_vport_work_array(phba, vports); 3036 } 3037 3038 /** 3039 * lpfc_online - Initialize and bring a HBA online 3040 * @phba: pointer to lpfc hba data structure. 3041 * 3042 * This routine initializes the HBA and brings a HBA online. During this 3043 * process, the management interface is blocked to prevent user space access 3044 * to the HBA interfering with the driver initialization. 3045 * 3046 * Return codes 3047 * 0 - successful 3048 * 1 - failed 3049 **/ 3050 int 3051 lpfc_online(struct lpfc_hba *phba) 3052 { 3053 struct lpfc_vport *vport; 3054 struct lpfc_vport **vports; 3055 int i, error = 0; 3056 bool vpis_cleared = false; 3057 3058 if (!phba) 3059 return 0; 3060 vport = phba->pport; 3061 3062 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3063 return 0; 3064 3065 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3066 "0458 Bring Adapter online\n"); 3067 3068 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3069 3070 if (phba->sli_rev == LPFC_SLI_REV4) { 3071 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3072 lpfc_unblock_mgmt_io(phba); 3073 return 1; 3074 } 3075 spin_lock_irq(&phba->hbalock); 3076 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3077 vpis_cleared = true; 3078 spin_unlock_irq(&phba->hbalock); 3079 3080 /* Reestablish the local initiator port. 3081 * The offline process destroyed the previous lport. 3082 */ 3083 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3084 !phba->nvmet_support) { 3085 error = lpfc_nvme_create_localport(phba->pport); 3086 if (error) 3087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3088 "6132 NVME restore reg failed " 3089 "on nvmei error x%x\n", error); 3090 } 3091 } else { 3092 lpfc_sli_queue_init(phba); 3093 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3094 lpfc_unblock_mgmt_io(phba); 3095 return 1; 3096 } 3097 } 3098 3099 vports = lpfc_create_vport_work_array(phba); 3100 if (vports != NULL) { 3101 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3102 struct Scsi_Host *shost; 3103 shost = lpfc_shost_from_vport(vports[i]); 3104 spin_lock_irq(shost->host_lock); 3105 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3106 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3107 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3108 if (phba->sli_rev == LPFC_SLI_REV4) { 3109 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3110 if ((vpis_cleared) && 3111 (vports[i]->port_type != 3112 LPFC_PHYSICAL_PORT)) 3113 vports[i]->vpi = 0; 3114 } 3115 spin_unlock_irq(shost->host_lock); 3116 } 3117 } 3118 lpfc_destroy_vport_work_array(phba, vports); 3119 3120 lpfc_unblock_mgmt_io(phba); 3121 return 0; 3122 } 3123 3124 /** 3125 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3126 * @phba: pointer to lpfc hba data structure. 3127 * 3128 * This routine marks a HBA's management interface as not blocked. Once the 3129 * HBA's management interface is marked as not blocked, all the user space 3130 * access to the HBA, whether they are from sysfs interface or libdfc 3131 * interface will be allowed. The HBA is set to block the management interface 3132 * when the driver prepares the HBA interface for online or offline and then 3133 * set to unblock the management interface afterwards. 3134 **/ 3135 void 3136 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3137 { 3138 unsigned long iflag; 3139 3140 spin_lock_irqsave(&phba->hbalock, iflag); 3141 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3142 spin_unlock_irqrestore(&phba->hbalock, iflag); 3143 } 3144 3145 /** 3146 * lpfc_offline_prep - Prepare a HBA to be brought offline 3147 * @phba: pointer to lpfc hba data structure. 3148 * 3149 * This routine is invoked to prepare a HBA to be brought offline. It performs 3150 * unregistration login to all the nodes on all vports and flushes the mailbox 3151 * queue to make it ready to be brought offline. 3152 **/ 3153 void 3154 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3155 { 3156 struct lpfc_vport *vport = phba->pport; 3157 struct lpfc_nodelist *ndlp, *next_ndlp; 3158 struct lpfc_vport **vports; 3159 struct Scsi_Host *shost; 3160 int i; 3161 3162 if (vport->fc_flag & FC_OFFLINE_MODE) 3163 return; 3164 3165 lpfc_block_mgmt_io(phba, mbx_action); 3166 3167 lpfc_linkdown(phba); 3168 3169 /* Issue an unreg_login to all nodes on all vports */ 3170 vports = lpfc_create_vport_work_array(phba); 3171 if (vports != NULL) { 3172 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3173 if (vports[i]->load_flag & FC_UNLOADING) 3174 continue; 3175 shost = lpfc_shost_from_vport(vports[i]); 3176 spin_lock_irq(shost->host_lock); 3177 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3178 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3179 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3180 spin_unlock_irq(shost->host_lock); 3181 3182 shost = lpfc_shost_from_vport(vports[i]); 3183 list_for_each_entry_safe(ndlp, next_ndlp, 3184 &vports[i]->fc_nodes, 3185 nlp_listp) { 3186 if (!NLP_CHK_NODE_ACT(ndlp)) 3187 continue; 3188 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3189 continue; 3190 if (ndlp->nlp_type & NLP_FABRIC) { 3191 lpfc_disc_state_machine(vports[i], ndlp, 3192 NULL, NLP_EVT_DEVICE_RECOVERY); 3193 lpfc_disc_state_machine(vports[i], ndlp, 3194 NULL, NLP_EVT_DEVICE_RM); 3195 } 3196 spin_lock_irq(shost->host_lock); 3197 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3198 spin_unlock_irq(shost->host_lock); 3199 /* 3200 * Whenever an SLI4 port goes offline, free the 3201 * RPI. Get a new RPI when the adapter port 3202 * comes back online. 3203 */ 3204 if (phba->sli_rev == LPFC_SLI_REV4) { 3205 lpfc_printf_vlog(ndlp->vport, 3206 KERN_INFO, LOG_NODE, 3207 "0011 lpfc_offline: " 3208 "ndlp:x%p did %x " 3209 "usgmap:x%x rpi:%x\n", 3210 ndlp, ndlp->nlp_DID, 3211 ndlp->nlp_usg_map, 3212 ndlp->nlp_rpi); 3213 3214 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3215 } 3216 lpfc_unreg_rpi(vports[i], ndlp); 3217 } 3218 } 3219 } 3220 lpfc_destroy_vport_work_array(phba, vports); 3221 3222 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3223 3224 if (phba->wq) 3225 flush_workqueue(phba->wq); 3226 } 3227 3228 /** 3229 * lpfc_offline - Bring a HBA offline 3230 * @phba: pointer to lpfc hba data structure. 3231 * 3232 * This routine actually brings a HBA offline. It stops all the timers 3233 * associated with the HBA, brings down the SLI layer, and eventually 3234 * marks the HBA as in offline state for the upper layer protocol. 3235 **/ 3236 void 3237 lpfc_offline(struct lpfc_hba *phba) 3238 { 3239 struct Scsi_Host *shost; 3240 struct lpfc_vport **vports; 3241 int i; 3242 3243 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3244 return; 3245 3246 /* stop port and all timers associated with this hba */ 3247 lpfc_stop_port(phba); 3248 3249 /* Tear down the local and target port registrations. The 3250 * nvme transports need to cleanup. 3251 */ 3252 lpfc_nvmet_destroy_targetport(phba); 3253 lpfc_nvme_destroy_localport(phba->pport); 3254 3255 vports = lpfc_create_vport_work_array(phba); 3256 if (vports != NULL) 3257 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3258 lpfc_stop_vport_timers(vports[i]); 3259 lpfc_destroy_vport_work_array(phba, vports); 3260 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3261 "0460 Bring Adapter offline\n"); 3262 /* Bring down the SLI Layer and cleanup. The HBA is offline 3263 now. */ 3264 lpfc_sli_hba_down(phba); 3265 spin_lock_irq(&phba->hbalock); 3266 phba->work_ha = 0; 3267 spin_unlock_irq(&phba->hbalock); 3268 vports = lpfc_create_vport_work_array(phba); 3269 if (vports != NULL) 3270 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3271 shost = lpfc_shost_from_vport(vports[i]); 3272 spin_lock_irq(shost->host_lock); 3273 vports[i]->work_port_events = 0; 3274 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3275 spin_unlock_irq(shost->host_lock); 3276 } 3277 lpfc_destroy_vport_work_array(phba, vports); 3278 } 3279 3280 /** 3281 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3282 * @phba: pointer to lpfc hba data structure. 3283 * 3284 * This routine is to free all the SCSI buffers and IOCBs from the driver 3285 * list back to kernel. It is called from lpfc_pci_remove_one to free 3286 * the internal resources before the device is removed from the system. 3287 **/ 3288 static void 3289 lpfc_scsi_free(struct lpfc_hba *phba) 3290 { 3291 struct lpfc_scsi_buf *sb, *sb_next; 3292 3293 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3294 return; 3295 3296 spin_lock_irq(&phba->hbalock); 3297 3298 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3299 3300 spin_lock(&phba->scsi_buf_list_put_lock); 3301 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3302 list) { 3303 list_del(&sb->list); 3304 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3305 sb->dma_handle); 3306 kfree(sb); 3307 phba->total_scsi_bufs--; 3308 } 3309 spin_unlock(&phba->scsi_buf_list_put_lock); 3310 3311 spin_lock(&phba->scsi_buf_list_get_lock); 3312 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3313 list) { 3314 list_del(&sb->list); 3315 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3316 sb->dma_handle); 3317 kfree(sb); 3318 phba->total_scsi_bufs--; 3319 } 3320 spin_unlock(&phba->scsi_buf_list_get_lock); 3321 spin_unlock_irq(&phba->hbalock); 3322 } 3323 /** 3324 * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists 3325 * @phba: pointer to lpfc hba data structure. 3326 * 3327 * This routine is to free all the NVME buffers and IOCBs from the driver 3328 * list back to kernel. It is called from lpfc_pci_remove_one to free 3329 * the internal resources before the device is removed from the system. 3330 **/ 3331 static void 3332 lpfc_nvme_free(struct lpfc_hba *phba) 3333 { 3334 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; 3335 3336 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3337 return; 3338 3339 spin_lock_irq(&phba->hbalock); 3340 3341 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3342 spin_lock(&phba->nvme_buf_list_put_lock); 3343 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3344 &phba->lpfc_nvme_buf_list_put, list) { 3345 list_del(&lpfc_ncmd->list); 3346 phba->put_nvme_bufs--; 3347 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3348 lpfc_ncmd->dma_handle); 3349 kfree(lpfc_ncmd); 3350 phba->total_nvme_bufs--; 3351 } 3352 spin_unlock(&phba->nvme_buf_list_put_lock); 3353 3354 spin_lock(&phba->nvme_buf_list_get_lock); 3355 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3356 &phba->lpfc_nvme_buf_list_get, list) { 3357 list_del(&lpfc_ncmd->list); 3358 phba->get_nvme_bufs--; 3359 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3360 lpfc_ncmd->dma_handle); 3361 kfree(lpfc_ncmd); 3362 phba->total_nvme_bufs--; 3363 } 3364 spin_unlock(&phba->nvme_buf_list_get_lock); 3365 spin_unlock_irq(&phba->hbalock); 3366 } 3367 /** 3368 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3369 * @phba: pointer to lpfc hba data structure. 3370 * 3371 * This routine first calculates the sizes of the current els and allocated 3372 * scsi sgl lists, and then goes through all sgls to updates the physical 3373 * XRIs assigned due to port function reset. During port initialization, the 3374 * current els and allocated scsi sgl lists are 0s. 3375 * 3376 * Return codes 3377 * 0 - successful (for now, it always returns 0) 3378 **/ 3379 int 3380 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3381 { 3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3383 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3384 LIST_HEAD(els_sgl_list); 3385 int rc; 3386 3387 /* 3388 * update on pci function's els xri-sgl list 3389 */ 3390 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3391 3392 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3393 /* els xri-sgl expanded */ 3394 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3395 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3396 "3157 ELS xri-sgl count increased from " 3397 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3398 els_xri_cnt); 3399 /* allocate the additional els sgls */ 3400 for (i = 0; i < xri_cnt; i++) { 3401 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3402 GFP_KERNEL); 3403 if (sglq_entry == NULL) { 3404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3405 "2562 Failure to allocate an " 3406 "ELS sgl entry:%d\n", i); 3407 rc = -ENOMEM; 3408 goto out_free_mem; 3409 } 3410 sglq_entry->buff_type = GEN_BUFF_TYPE; 3411 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3412 &sglq_entry->phys); 3413 if (sglq_entry->virt == NULL) { 3414 kfree(sglq_entry); 3415 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3416 "2563 Failure to allocate an " 3417 "ELS mbuf:%d\n", i); 3418 rc = -ENOMEM; 3419 goto out_free_mem; 3420 } 3421 sglq_entry->sgl = sglq_entry->virt; 3422 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3423 sglq_entry->state = SGL_FREED; 3424 list_add_tail(&sglq_entry->list, &els_sgl_list); 3425 } 3426 spin_lock_irq(&phba->hbalock); 3427 spin_lock(&phba->sli4_hba.sgl_list_lock); 3428 list_splice_init(&els_sgl_list, 3429 &phba->sli4_hba.lpfc_els_sgl_list); 3430 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3431 spin_unlock_irq(&phba->hbalock); 3432 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3433 /* els xri-sgl shrinked */ 3434 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3435 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3436 "3158 ELS xri-sgl count decreased from " 3437 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3438 els_xri_cnt); 3439 spin_lock_irq(&phba->hbalock); 3440 spin_lock(&phba->sli4_hba.sgl_list_lock); 3441 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3442 &els_sgl_list); 3443 /* release extra els sgls from list */ 3444 for (i = 0; i < xri_cnt; i++) { 3445 list_remove_head(&els_sgl_list, 3446 sglq_entry, struct lpfc_sglq, list); 3447 if (sglq_entry) { 3448 __lpfc_mbuf_free(phba, sglq_entry->virt, 3449 sglq_entry->phys); 3450 kfree(sglq_entry); 3451 } 3452 } 3453 list_splice_init(&els_sgl_list, 3454 &phba->sli4_hba.lpfc_els_sgl_list); 3455 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3456 spin_unlock_irq(&phba->hbalock); 3457 } else 3458 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3459 "3163 ELS xri-sgl count unchanged: %d\n", 3460 els_xri_cnt); 3461 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3462 3463 /* update xris to els sgls on the list */ 3464 sglq_entry = NULL; 3465 sglq_entry_next = NULL; 3466 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3467 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3468 lxri = lpfc_sli4_next_xritag(phba); 3469 if (lxri == NO_XRI) { 3470 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3471 "2400 Failed to allocate xri for " 3472 "ELS sgl\n"); 3473 rc = -ENOMEM; 3474 goto out_free_mem; 3475 } 3476 sglq_entry->sli4_lxritag = lxri; 3477 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3478 } 3479 return 0; 3480 3481 out_free_mem: 3482 lpfc_free_els_sgl_list(phba); 3483 return rc; 3484 } 3485 3486 /** 3487 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3488 * @phba: pointer to lpfc hba data structure. 3489 * 3490 * This routine first calculates the sizes of the current els and allocated 3491 * scsi sgl lists, and then goes through all sgls to updates the physical 3492 * XRIs assigned due to port function reset. During port initialization, the 3493 * current els and allocated scsi sgl lists are 0s. 3494 * 3495 * Return codes 3496 * 0 - successful (for now, it always returns 0) 3497 **/ 3498 int 3499 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3500 { 3501 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3502 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3503 uint16_t nvmet_xri_cnt; 3504 LIST_HEAD(nvmet_sgl_list); 3505 int rc; 3506 3507 /* 3508 * update on pci function's nvmet xri-sgl list 3509 */ 3510 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3511 3512 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3513 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3514 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3515 /* els xri-sgl expanded */ 3516 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3517 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3518 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3519 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3520 /* allocate the additional nvmet sgls */ 3521 for (i = 0; i < xri_cnt; i++) { 3522 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3523 GFP_KERNEL); 3524 if (sglq_entry == NULL) { 3525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3526 "6303 Failure to allocate an " 3527 "NVMET sgl entry:%d\n", i); 3528 rc = -ENOMEM; 3529 goto out_free_mem; 3530 } 3531 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3532 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3533 &sglq_entry->phys); 3534 if (sglq_entry->virt == NULL) { 3535 kfree(sglq_entry); 3536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3537 "6304 Failure to allocate an " 3538 "NVMET buf:%d\n", i); 3539 rc = -ENOMEM; 3540 goto out_free_mem; 3541 } 3542 sglq_entry->sgl = sglq_entry->virt; 3543 memset(sglq_entry->sgl, 0, 3544 phba->cfg_sg_dma_buf_size); 3545 sglq_entry->state = SGL_FREED; 3546 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3547 } 3548 spin_lock_irq(&phba->hbalock); 3549 spin_lock(&phba->sli4_hba.sgl_list_lock); 3550 list_splice_init(&nvmet_sgl_list, 3551 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3552 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3553 spin_unlock_irq(&phba->hbalock); 3554 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3555 /* nvmet xri-sgl shrunk */ 3556 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3557 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3558 "6305 NVMET xri-sgl count decreased from " 3559 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3560 nvmet_xri_cnt); 3561 spin_lock_irq(&phba->hbalock); 3562 spin_lock(&phba->sli4_hba.sgl_list_lock); 3563 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3564 &nvmet_sgl_list); 3565 /* release extra nvmet sgls from list */ 3566 for (i = 0; i < xri_cnt; i++) { 3567 list_remove_head(&nvmet_sgl_list, 3568 sglq_entry, struct lpfc_sglq, list); 3569 if (sglq_entry) { 3570 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3571 sglq_entry->phys); 3572 kfree(sglq_entry); 3573 } 3574 } 3575 list_splice_init(&nvmet_sgl_list, 3576 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3577 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3578 spin_unlock_irq(&phba->hbalock); 3579 } else 3580 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3581 "6306 NVMET xri-sgl count unchanged: %d\n", 3582 nvmet_xri_cnt); 3583 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3584 3585 /* update xris to nvmet sgls on the list */ 3586 sglq_entry = NULL; 3587 sglq_entry_next = NULL; 3588 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3589 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3590 lxri = lpfc_sli4_next_xritag(phba); 3591 if (lxri == NO_XRI) { 3592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3593 "6307 Failed to allocate xri for " 3594 "NVMET sgl\n"); 3595 rc = -ENOMEM; 3596 goto out_free_mem; 3597 } 3598 sglq_entry->sli4_lxritag = lxri; 3599 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3600 } 3601 return 0; 3602 3603 out_free_mem: 3604 lpfc_free_nvmet_sgl_list(phba); 3605 return rc; 3606 } 3607 3608 /** 3609 * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping 3610 * @phba: pointer to lpfc hba data structure. 3611 * 3612 * This routine first calculates the sizes of the current els and allocated 3613 * scsi sgl lists, and then goes through all sgls to updates the physical 3614 * XRIs assigned due to port function reset. During port initialization, the 3615 * current els and allocated scsi sgl lists are 0s. 3616 * 3617 * Return codes 3618 * 0 - successful (for now, it always returns 0) 3619 **/ 3620 int 3621 lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) 3622 { 3623 struct lpfc_scsi_buf *psb, *psb_next; 3624 uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; 3625 LIST_HEAD(scsi_sgl_list); 3626 int rc; 3627 3628 /* 3629 * update on pci function's els xri-sgl list 3630 */ 3631 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3632 phba->total_scsi_bufs = 0; 3633 3634 /* 3635 * update on pci function's allocated scsi xri-sgl list 3636 */ 3637 /* maximum number of xris available for scsi buffers */ 3638 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3639 els_xri_cnt; 3640 3641 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3642 return 0; 3643 3644 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3645 phba->sli4_hba.scsi_xri_max = /* Split them up */ 3646 (phba->sli4_hba.scsi_xri_max * 3647 phba->cfg_xri_split) / 100; 3648 3649 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3650 spin_lock(&phba->scsi_buf_list_put_lock); 3651 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3652 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3653 spin_unlock(&phba->scsi_buf_list_put_lock); 3654 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3655 3656 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3657 "6060 Current allocated SCSI xri-sgl count:%d, " 3658 "maximum SCSI xri count:%d (split:%d)\n", 3659 phba->sli4_hba.scsi_xri_cnt, 3660 phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); 3661 3662 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3663 /* max scsi xri shrinked below the allocated scsi buffers */ 3664 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3665 phba->sli4_hba.scsi_xri_max; 3666 /* release the extra allocated scsi buffers */ 3667 for (i = 0; i < scsi_xri_cnt; i++) { 3668 list_remove_head(&scsi_sgl_list, psb, 3669 struct lpfc_scsi_buf, list); 3670 if (psb) { 3671 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3672 psb->data, psb->dma_handle); 3673 kfree(psb); 3674 } 3675 } 3676 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3677 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3678 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3679 } 3680 3681 /* update xris associated to remaining allocated scsi buffers */ 3682 psb = NULL; 3683 psb_next = NULL; 3684 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3685 lxri = lpfc_sli4_next_xritag(phba); 3686 if (lxri == NO_XRI) { 3687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3688 "2560 Failed to allocate xri for " 3689 "scsi buffer\n"); 3690 rc = -ENOMEM; 3691 goto out_free_mem; 3692 } 3693 psb->cur_iocbq.sli4_lxritag = lxri; 3694 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3695 } 3696 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3697 spin_lock(&phba->scsi_buf_list_put_lock); 3698 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3699 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3700 spin_unlock(&phba->scsi_buf_list_put_lock); 3701 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3702 return 0; 3703 3704 out_free_mem: 3705 lpfc_scsi_free(phba); 3706 return rc; 3707 } 3708 3709 static uint64_t 3710 lpfc_get_wwpn(struct lpfc_hba *phba) 3711 { 3712 uint64_t wwn; 3713 int rc; 3714 LPFC_MBOXQ_t *mboxq; 3715 MAILBOX_t *mb; 3716 3717 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3718 GFP_KERNEL); 3719 if (!mboxq) 3720 return (uint64_t)-1; 3721 3722 /* First get WWN of HBA instance */ 3723 lpfc_read_nv(phba, mboxq); 3724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3725 if (rc != MBX_SUCCESS) { 3726 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3727 "6019 Mailbox failed , mbxCmd x%x " 3728 "READ_NV, mbxStatus x%x\n", 3729 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 3730 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 3731 mempool_free(mboxq, phba->mbox_mem_pool); 3732 return (uint64_t) -1; 3733 } 3734 mb = &mboxq->u.mb; 3735 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 3736 /* wwn is WWPN of HBA instance */ 3737 mempool_free(mboxq, phba->mbox_mem_pool); 3738 if (phba->sli_rev == LPFC_SLI_REV4) 3739 return be64_to_cpu(wwn); 3740 else 3741 return rol64(wwn, 32); 3742 } 3743 3744 /** 3745 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping 3746 * @phba: pointer to lpfc hba data structure. 3747 * 3748 * This routine first calculates the sizes of the current els and allocated 3749 * scsi sgl lists, and then goes through all sgls to updates the physical 3750 * XRIs assigned due to port function reset. During port initialization, the 3751 * current els and allocated scsi sgl lists are 0s. 3752 * 3753 * Return codes 3754 * 0 - successful (for now, it always returns 0) 3755 **/ 3756 int 3757 lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) 3758 { 3759 struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 3760 uint16_t i, lxri, els_xri_cnt; 3761 uint16_t nvme_xri_cnt, nvme_xri_max; 3762 LIST_HEAD(nvme_sgl_list); 3763 int rc, cnt; 3764 3765 phba->total_nvme_bufs = 0; 3766 phba->get_nvme_bufs = 0; 3767 phba->put_nvme_bufs = 0; 3768 3769 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3770 return 0; 3771 /* 3772 * update on pci function's allocated nvme xri-sgl list 3773 */ 3774 3775 /* maximum number of xris available for nvme buffers */ 3776 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3777 nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3778 phba->sli4_hba.nvme_xri_max = nvme_xri_max; 3779 phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; 3780 3781 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3782 "6074 Current allocated NVME xri-sgl count:%d, " 3783 "maximum NVME xri count:%d\n", 3784 phba->sli4_hba.nvme_xri_cnt, 3785 phba->sli4_hba.nvme_xri_max); 3786 3787 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3788 spin_lock(&phba->nvme_buf_list_put_lock); 3789 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); 3790 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); 3791 cnt = phba->get_nvme_bufs + phba->put_nvme_bufs; 3792 phba->get_nvme_bufs = 0; 3793 phba->put_nvme_bufs = 0; 3794 spin_unlock(&phba->nvme_buf_list_put_lock); 3795 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3796 3797 if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { 3798 /* max nvme xri shrunk below the allocated nvme buffers */ 3799 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3800 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - 3801 phba->sli4_hba.nvme_xri_max; 3802 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3803 /* release the extra allocated nvme buffers */ 3804 for (i = 0; i < nvme_xri_cnt; i++) { 3805 list_remove_head(&nvme_sgl_list, lpfc_ncmd, 3806 struct lpfc_nvme_buf, list); 3807 if (lpfc_ncmd) { 3808 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3809 lpfc_ncmd->data, 3810 lpfc_ncmd->dma_handle); 3811 kfree(lpfc_ncmd); 3812 } 3813 } 3814 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3815 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; 3816 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3817 } 3818 3819 /* update xris associated to remaining allocated nvme buffers */ 3820 lpfc_ncmd = NULL; 3821 lpfc_ncmd_next = NULL; 3822 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3823 &nvme_sgl_list, list) { 3824 lxri = lpfc_sli4_next_xritag(phba); 3825 if (lxri == NO_XRI) { 3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3827 "6075 Failed to allocate xri for " 3828 "nvme buffer\n"); 3829 rc = -ENOMEM; 3830 goto out_free_mem; 3831 } 3832 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 3833 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3834 } 3835 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3836 spin_lock(&phba->nvme_buf_list_put_lock); 3837 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); 3838 phba->get_nvme_bufs = cnt; 3839 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 3840 spin_unlock(&phba->nvme_buf_list_put_lock); 3841 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3842 return 0; 3843 3844 out_free_mem: 3845 lpfc_nvme_free(phba); 3846 return rc; 3847 } 3848 3849 /** 3850 * lpfc_create_port - Create an FC port 3851 * @phba: pointer to lpfc hba data structure. 3852 * @instance: a unique integer ID to this FC port. 3853 * @dev: pointer to the device data structure. 3854 * 3855 * This routine creates a FC port for the upper layer protocol. The FC port 3856 * can be created on top of either a physical port or a virtual port provided 3857 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3858 * and associates the FC port created before adding the shost into the SCSI 3859 * layer. 3860 * 3861 * Return codes 3862 * @vport - pointer to the virtual N_Port data structure. 3863 * NULL - port create failed. 3864 **/ 3865 struct lpfc_vport * 3866 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3867 { 3868 struct lpfc_vport *vport; 3869 struct Scsi_Host *shost = NULL; 3870 int error = 0; 3871 int i; 3872 uint64_t wwn; 3873 bool use_no_reset_hba = false; 3874 int rc; 3875 3876 if (lpfc_no_hba_reset_cnt) { 3877 if (phba->sli_rev < LPFC_SLI_REV4 && 3878 dev == &phba->pcidev->dev) { 3879 /* Reset the port first */ 3880 lpfc_sli_brdrestart(phba); 3881 rc = lpfc_sli_chipset_init(phba); 3882 if (rc) 3883 return NULL; 3884 } 3885 wwn = lpfc_get_wwpn(phba); 3886 } 3887 3888 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 3889 if (wwn == lpfc_no_hba_reset[i]) { 3890 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3891 "6020 Setting use_no_reset port=%llx\n", 3892 wwn); 3893 use_no_reset_hba = true; 3894 break; 3895 } 3896 } 3897 3898 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 3899 if (dev != &phba->pcidev->dev) { 3900 shost = scsi_host_alloc(&lpfc_vport_template, 3901 sizeof(struct lpfc_vport)); 3902 } else { 3903 if (!use_no_reset_hba) 3904 shost = scsi_host_alloc(&lpfc_template, 3905 sizeof(struct lpfc_vport)); 3906 else 3907 shost = scsi_host_alloc(&lpfc_template_no_hr, 3908 sizeof(struct lpfc_vport)); 3909 } 3910 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 3911 shost = scsi_host_alloc(&lpfc_template_nvme, 3912 sizeof(struct lpfc_vport)); 3913 } 3914 if (!shost) 3915 goto out; 3916 3917 vport = (struct lpfc_vport *) shost->hostdata; 3918 vport->phba = phba; 3919 vport->load_flag |= FC_LOADING; 3920 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3921 vport->fc_rscn_flush = 0; 3922 lpfc_get_vport_cfgparam(vport); 3923 3924 shost->unique_id = instance; 3925 shost->max_id = LPFC_MAX_TARGET; 3926 shost->max_lun = vport->cfg_max_luns; 3927 shost->this_id = -1; 3928 shost->max_cmd_len = 16; 3929 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3930 if (phba->sli_rev == LPFC_SLI_REV4) { 3931 shost->dma_boundary = 3932 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3933 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3934 } 3935 3936 /* 3937 * Set initial can_queue value since 0 is no longer supported and 3938 * scsi_add_host will fail. This will be adjusted later based on the 3939 * max xri value determined in hba setup. 3940 */ 3941 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3942 if (dev != &phba->pcidev->dev) { 3943 shost->transportt = lpfc_vport_transport_template; 3944 vport->port_type = LPFC_NPIV_PORT; 3945 } else { 3946 shost->transportt = lpfc_transport_template; 3947 vport->port_type = LPFC_PHYSICAL_PORT; 3948 } 3949 3950 /* Initialize all internally managed lists. */ 3951 INIT_LIST_HEAD(&vport->fc_nodes); 3952 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3953 spin_lock_init(&vport->work_port_lock); 3954 3955 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 3956 3957 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 3958 3959 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 3960 3961 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3962 if (error) 3963 goto out_put_shost; 3964 3965 spin_lock_irq(&phba->hbalock); 3966 list_add_tail(&vport->listentry, &phba->port_list); 3967 spin_unlock_irq(&phba->hbalock); 3968 return vport; 3969 3970 out_put_shost: 3971 scsi_host_put(shost); 3972 out: 3973 return NULL; 3974 } 3975 3976 /** 3977 * destroy_port - destroy an FC port 3978 * @vport: pointer to an lpfc virtual N_Port data structure. 3979 * 3980 * This routine destroys a FC port from the upper layer protocol. All the 3981 * resources associated with the port are released. 3982 **/ 3983 void 3984 destroy_port(struct lpfc_vport *vport) 3985 { 3986 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3987 struct lpfc_hba *phba = vport->phba; 3988 3989 lpfc_debugfs_terminate(vport); 3990 fc_remove_host(shost); 3991 scsi_remove_host(shost); 3992 3993 spin_lock_irq(&phba->hbalock); 3994 list_del_init(&vport->listentry); 3995 spin_unlock_irq(&phba->hbalock); 3996 3997 lpfc_cleanup(vport); 3998 return; 3999 } 4000 4001 /** 4002 * lpfc_get_instance - Get a unique integer ID 4003 * 4004 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4005 * uses the kernel idr facility to perform the task. 4006 * 4007 * Return codes: 4008 * instance - a unique integer ID allocated as the new instance. 4009 * -1 - lpfc get instance failed. 4010 **/ 4011 int 4012 lpfc_get_instance(void) 4013 { 4014 int ret; 4015 4016 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4017 return ret < 0 ? -1 : ret; 4018 } 4019 4020 /** 4021 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4022 * @shost: pointer to SCSI host data structure. 4023 * @time: elapsed time of the scan in jiffies. 4024 * 4025 * This routine is called by the SCSI layer with a SCSI host to determine 4026 * whether the scan host is finished. 4027 * 4028 * Note: there is no scan_start function as adapter initialization will have 4029 * asynchronously kicked off the link initialization. 4030 * 4031 * Return codes 4032 * 0 - SCSI host scan is not over yet. 4033 * 1 - SCSI host scan is over. 4034 **/ 4035 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4036 { 4037 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4038 struct lpfc_hba *phba = vport->phba; 4039 int stat = 0; 4040 4041 spin_lock_irq(shost->host_lock); 4042 4043 if (vport->load_flag & FC_UNLOADING) { 4044 stat = 1; 4045 goto finished; 4046 } 4047 if (time >= msecs_to_jiffies(30 * 1000)) { 4048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4049 "0461 Scanning longer than 30 " 4050 "seconds. Continuing initialization\n"); 4051 stat = 1; 4052 goto finished; 4053 } 4054 if (time >= msecs_to_jiffies(15 * 1000) && 4055 phba->link_state <= LPFC_LINK_DOWN) { 4056 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4057 "0465 Link down longer than 15 " 4058 "seconds. Continuing initialization\n"); 4059 stat = 1; 4060 goto finished; 4061 } 4062 4063 if (vport->port_state != LPFC_VPORT_READY) 4064 goto finished; 4065 if (vport->num_disc_nodes || vport->fc_prli_sent) 4066 goto finished; 4067 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4068 goto finished; 4069 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4070 goto finished; 4071 4072 stat = 1; 4073 4074 finished: 4075 spin_unlock_irq(shost->host_lock); 4076 return stat; 4077 } 4078 4079 /** 4080 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4081 * @shost: pointer to SCSI host data structure. 4082 * 4083 * This routine initializes a given SCSI host attributes on a FC port. The 4084 * SCSI host can be either on top of a physical port or a virtual port. 4085 **/ 4086 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4087 { 4088 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4089 struct lpfc_hba *phba = vport->phba; 4090 /* 4091 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4092 */ 4093 4094 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4095 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4096 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4097 4098 memset(fc_host_supported_fc4s(shost), 0, 4099 sizeof(fc_host_supported_fc4s(shost))); 4100 fc_host_supported_fc4s(shost)[2] = 1; 4101 fc_host_supported_fc4s(shost)[7] = 1; 4102 4103 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4104 sizeof fc_host_symbolic_name(shost)); 4105 4106 fc_host_supported_speeds(shost) = 0; 4107 if (phba->lmt & LMT_32Gb) 4108 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4109 if (phba->lmt & LMT_16Gb) 4110 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4111 if (phba->lmt & LMT_10Gb) 4112 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4113 if (phba->lmt & LMT_8Gb) 4114 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4115 if (phba->lmt & LMT_4Gb) 4116 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4117 if (phba->lmt & LMT_2Gb) 4118 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4119 if (phba->lmt & LMT_1Gb) 4120 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4121 4122 fc_host_maxframe_size(shost) = 4123 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4124 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4125 4126 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4127 4128 /* This value is also unchanging */ 4129 memset(fc_host_active_fc4s(shost), 0, 4130 sizeof(fc_host_active_fc4s(shost))); 4131 fc_host_active_fc4s(shost)[2] = 1; 4132 fc_host_active_fc4s(shost)[7] = 1; 4133 4134 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4135 spin_lock_irq(shost->host_lock); 4136 vport->load_flag &= ~FC_LOADING; 4137 spin_unlock_irq(shost->host_lock); 4138 } 4139 4140 /** 4141 * lpfc_stop_port_s3 - Stop SLI3 device port 4142 * @phba: pointer to lpfc hba data structure. 4143 * 4144 * This routine is invoked to stop an SLI3 device port, it stops the device 4145 * from generating interrupts and stops the device driver's timers for the 4146 * device. 4147 **/ 4148 static void 4149 lpfc_stop_port_s3(struct lpfc_hba *phba) 4150 { 4151 /* Clear all interrupt enable conditions */ 4152 writel(0, phba->HCregaddr); 4153 readl(phba->HCregaddr); /* flush */ 4154 /* Clear all pending interrupts */ 4155 writel(0xffffffff, phba->HAregaddr); 4156 readl(phba->HAregaddr); /* flush */ 4157 4158 /* Reset some HBA SLI setup states */ 4159 lpfc_stop_hba_timers(phba); 4160 phba->pport->work_port_events = 0; 4161 } 4162 4163 /** 4164 * lpfc_stop_port_s4 - Stop SLI4 device port 4165 * @phba: pointer to lpfc hba data structure. 4166 * 4167 * This routine is invoked to stop an SLI4 device port, it stops the device 4168 * from generating interrupts and stops the device driver's timers for the 4169 * device. 4170 **/ 4171 static void 4172 lpfc_stop_port_s4(struct lpfc_hba *phba) 4173 { 4174 /* Reset some HBA SLI4 setup states */ 4175 lpfc_stop_hba_timers(phba); 4176 phba->pport->work_port_events = 0; 4177 phba->sli4_hba.intr_enable = 0; 4178 } 4179 4180 /** 4181 * lpfc_stop_port - Wrapper function for stopping hba port 4182 * @phba: Pointer to HBA context object. 4183 * 4184 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4185 * the API jump table function pointer from the lpfc_hba struct. 4186 **/ 4187 void 4188 lpfc_stop_port(struct lpfc_hba *phba) 4189 { 4190 phba->lpfc_stop_port(phba); 4191 4192 if (phba->wq) 4193 flush_workqueue(phba->wq); 4194 } 4195 4196 /** 4197 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4198 * @phba: Pointer to hba for which this call is being executed. 4199 * 4200 * This routine starts the timer waiting for the FCF rediscovery to complete. 4201 **/ 4202 void 4203 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4204 { 4205 unsigned long fcf_redisc_wait_tmo = 4206 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4207 /* Start fcf rediscovery wait period timer */ 4208 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4209 spin_lock_irq(&phba->hbalock); 4210 /* Allow action to new fcf asynchronous event */ 4211 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4212 /* Mark the FCF rediscovery pending state */ 4213 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4214 spin_unlock_irq(&phba->hbalock); 4215 } 4216 4217 /** 4218 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4219 * @ptr: Map to lpfc_hba data structure pointer. 4220 * 4221 * This routine is invoked when waiting for FCF table rediscover has been 4222 * timed out. If new FCF record(s) has (have) been discovered during the 4223 * wait period, a new FCF event shall be added to the FCOE async event 4224 * list, and then worker thread shall be waked up for processing from the 4225 * worker thread context. 4226 **/ 4227 static void 4228 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4229 { 4230 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4231 4232 /* Don't send FCF rediscovery event if timer cancelled */ 4233 spin_lock_irq(&phba->hbalock); 4234 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4235 spin_unlock_irq(&phba->hbalock); 4236 return; 4237 } 4238 /* Clear FCF rediscovery timer pending flag */ 4239 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4240 /* FCF rediscovery event to worker thread */ 4241 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4242 spin_unlock_irq(&phba->hbalock); 4243 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4244 "2776 FCF rediscover quiescent timer expired\n"); 4245 /* wake up worker thread */ 4246 lpfc_worker_wake_up(phba); 4247 } 4248 4249 /** 4250 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4251 * @phba: pointer to lpfc hba data structure. 4252 * @acqe_link: pointer to the async link completion queue entry. 4253 * 4254 * This routine is to parse the SLI4 link-attention link fault code and 4255 * translate it into the base driver's read link attention mailbox command 4256 * status. 4257 * 4258 * Return: Link-attention status in terms of base driver's coding. 4259 **/ 4260 static uint16_t 4261 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4262 struct lpfc_acqe_link *acqe_link) 4263 { 4264 uint16_t latt_fault; 4265 4266 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4267 case LPFC_ASYNC_LINK_FAULT_NONE: 4268 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4269 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4270 latt_fault = 0; 4271 break; 4272 default: 4273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4274 "0398 Invalid link fault code: x%x\n", 4275 bf_get(lpfc_acqe_link_fault, acqe_link)); 4276 latt_fault = MBXERR_ERROR; 4277 break; 4278 } 4279 return latt_fault; 4280 } 4281 4282 /** 4283 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4284 * @phba: pointer to lpfc hba data structure. 4285 * @acqe_link: pointer to the async link completion queue entry. 4286 * 4287 * This routine is to parse the SLI4 link attention type and translate it 4288 * into the base driver's link attention type coding. 4289 * 4290 * Return: Link attention type in terms of base driver's coding. 4291 **/ 4292 static uint8_t 4293 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4294 struct lpfc_acqe_link *acqe_link) 4295 { 4296 uint8_t att_type; 4297 4298 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4299 case LPFC_ASYNC_LINK_STATUS_DOWN: 4300 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4301 att_type = LPFC_ATT_LINK_DOWN; 4302 break; 4303 case LPFC_ASYNC_LINK_STATUS_UP: 4304 /* Ignore physical link up events - wait for logical link up */ 4305 att_type = LPFC_ATT_RESERVED; 4306 break; 4307 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4308 att_type = LPFC_ATT_LINK_UP; 4309 break; 4310 default: 4311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4312 "0399 Invalid link attention type: x%x\n", 4313 bf_get(lpfc_acqe_link_status, acqe_link)); 4314 att_type = LPFC_ATT_RESERVED; 4315 break; 4316 } 4317 return att_type; 4318 } 4319 4320 /** 4321 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4322 * @phba: pointer to lpfc hba data structure. 4323 * 4324 * This routine is to get an SLI3 FC port's link speed in Mbps. 4325 * 4326 * Return: link speed in terms of Mbps. 4327 **/ 4328 uint32_t 4329 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4330 { 4331 uint32_t link_speed; 4332 4333 if (!lpfc_is_link_up(phba)) 4334 return 0; 4335 4336 if (phba->sli_rev <= LPFC_SLI_REV3) { 4337 switch (phba->fc_linkspeed) { 4338 case LPFC_LINK_SPEED_1GHZ: 4339 link_speed = 1000; 4340 break; 4341 case LPFC_LINK_SPEED_2GHZ: 4342 link_speed = 2000; 4343 break; 4344 case LPFC_LINK_SPEED_4GHZ: 4345 link_speed = 4000; 4346 break; 4347 case LPFC_LINK_SPEED_8GHZ: 4348 link_speed = 8000; 4349 break; 4350 case LPFC_LINK_SPEED_10GHZ: 4351 link_speed = 10000; 4352 break; 4353 case LPFC_LINK_SPEED_16GHZ: 4354 link_speed = 16000; 4355 break; 4356 default: 4357 link_speed = 0; 4358 } 4359 } else { 4360 if (phba->sli4_hba.link_state.logical_speed) 4361 link_speed = 4362 phba->sli4_hba.link_state.logical_speed; 4363 else 4364 link_speed = phba->sli4_hba.link_state.speed; 4365 } 4366 return link_speed; 4367 } 4368 4369 /** 4370 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4371 * @phba: pointer to lpfc hba data structure. 4372 * @evt_code: asynchronous event code. 4373 * @speed_code: asynchronous event link speed code. 4374 * 4375 * This routine is to parse the giving SLI4 async event link speed code into 4376 * value of Mbps for the link speed. 4377 * 4378 * Return: link speed in terms of Mbps. 4379 **/ 4380 static uint32_t 4381 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4382 uint8_t speed_code) 4383 { 4384 uint32_t port_speed; 4385 4386 switch (evt_code) { 4387 case LPFC_TRAILER_CODE_LINK: 4388 switch (speed_code) { 4389 case LPFC_ASYNC_LINK_SPEED_ZERO: 4390 port_speed = 0; 4391 break; 4392 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4393 port_speed = 10; 4394 break; 4395 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4396 port_speed = 100; 4397 break; 4398 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4399 port_speed = 1000; 4400 break; 4401 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4402 port_speed = 10000; 4403 break; 4404 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4405 port_speed = 20000; 4406 break; 4407 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4408 port_speed = 25000; 4409 break; 4410 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4411 port_speed = 40000; 4412 break; 4413 default: 4414 port_speed = 0; 4415 } 4416 break; 4417 case LPFC_TRAILER_CODE_FC: 4418 switch (speed_code) { 4419 case LPFC_FC_LA_SPEED_UNKNOWN: 4420 port_speed = 0; 4421 break; 4422 case LPFC_FC_LA_SPEED_1G: 4423 port_speed = 1000; 4424 break; 4425 case LPFC_FC_LA_SPEED_2G: 4426 port_speed = 2000; 4427 break; 4428 case LPFC_FC_LA_SPEED_4G: 4429 port_speed = 4000; 4430 break; 4431 case LPFC_FC_LA_SPEED_8G: 4432 port_speed = 8000; 4433 break; 4434 case LPFC_FC_LA_SPEED_10G: 4435 port_speed = 10000; 4436 break; 4437 case LPFC_FC_LA_SPEED_16G: 4438 port_speed = 16000; 4439 break; 4440 case LPFC_FC_LA_SPEED_32G: 4441 port_speed = 32000; 4442 break; 4443 default: 4444 port_speed = 0; 4445 } 4446 break; 4447 default: 4448 port_speed = 0; 4449 } 4450 return port_speed; 4451 } 4452 4453 /** 4454 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4455 * @phba: pointer to lpfc hba data structure. 4456 * @acqe_link: pointer to the async link completion queue entry. 4457 * 4458 * This routine is to handle the SLI4 asynchronous FCoE link event. 4459 **/ 4460 static void 4461 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4462 struct lpfc_acqe_link *acqe_link) 4463 { 4464 struct lpfc_dmabuf *mp; 4465 LPFC_MBOXQ_t *pmb; 4466 MAILBOX_t *mb; 4467 struct lpfc_mbx_read_top *la; 4468 uint8_t att_type; 4469 int rc; 4470 4471 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4472 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4473 return; 4474 phba->fcoe_eventtag = acqe_link->event_tag; 4475 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4476 if (!pmb) { 4477 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4478 "0395 The mboxq allocation failed\n"); 4479 return; 4480 } 4481 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4482 if (!mp) { 4483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4484 "0396 The lpfc_dmabuf allocation failed\n"); 4485 goto out_free_pmb; 4486 } 4487 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4488 if (!mp->virt) { 4489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4490 "0397 The mbuf allocation failed\n"); 4491 goto out_free_dmabuf; 4492 } 4493 4494 /* Cleanup any outstanding ELS commands */ 4495 lpfc_els_flush_all_cmd(phba); 4496 4497 /* Block ELS IOCBs until we have done process link event */ 4498 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4499 4500 /* Update link event statistics */ 4501 phba->sli.slistat.link_event++; 4502 4503 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4504 lpfc_read_topology(phba, pmb, mp); 4505 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4506 pmb->vport = phba->pport; 4507 4508 /* Keep the link status for extra SLI4 state machine reference */ 4509 phba->sli4_hba.link_state.speed = 4510 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4511 bf_get(lpfc_acqe_link_speed, acqe_link)); 4512 phba->sli4_hba.link_state.duplex = 4513 bf_get(lpfc_acqe_link_duplex, acqe_link); 4514 phba->sli4_hba.link_state.status = 4515 bf_get(lpfc_acqe_link_status, acqe_link); 4516 phba->sli4_hba.link_state.type = 4517 bf_get(lpfc_acqe_link_type, acqe_link); 4518 phba->sli4_hba.link_state.number = 4519 bf_get(lpfc_acqe_link_number, acqe_link); 4520 phba->sli4_hba.link_state.fault = 4521 bf_get(lpfc_acqe_link_fault, acqe_link); 4522 phba->sli4_hba.link_state.logical_speed = 4523 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4524 4525 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4526 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4527 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4528 "Logical speed:%dMbps Fault:%d\n", 4529 phba->sli4_hba.link_state.speed, 4530 phba->sli4_hba.link_state.topology, 4531 phba->sli4_hba.link_state.status, 4532 phba->sli4_hba.link_state.type, 4533 phba->sli4_hba.link_state.number, 4534 phba->sli4_hba.link_state.logical_speed, 4535 phba->sli4_hba.link_state.fault); 4536 /* 4537 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4538 * topology info. Note: Optional for non FC-AL ports. 4539 */ 4540 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4541 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4542 if (rc == MBX_NOT_FINISHED) 4543 goto out_free_dmabuf; 4544 return; 4545 } 4546 /* 4547 * For FCoE Mode: fill in all the topology information we need and call 4548 * the READ_TOPOLOGY completion routine to continue without actually 4549 * sending the READ_TOPOLOGY mailbox command to the port. 4550 */ 4551 /* Parse and translate status field */ 4552 mb = &pmb->u.mb; 4553 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 4554 4555 /* Parse and translate link attention fields */ 4556 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4557 la->eventTag = acqe_link->event_tag; 4558 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4559 bf_set(lpfc_mbx_read_top_link_spd, la, 4560 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4561 4562 /* Fake the the following irrelvant fields */ 4563 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4564 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4565 bf_set(lpfc_mbx_read_top_il, la, 0); 4566 bf_set(lpfc_mbx_read_top_pb, la, 0); 4567 bf_set(lpfc_mbx_read_top_fa, la, 0); 4568 bf_set(lpfc_mbx_read_top_mm, la, 0); 4569 4570 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4571 lpfc_mbx_cmpl_read_topology(phba, pmb); 4572 4573 return; 4574 4575 out_free_dmabuf: 4576 kfree(mp); 4577 out_free_pmb: 4578 mempool_free(pmb, phba->mbox_mem_pool); 4579 } 4580 4581 /** 4582 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4583 * @phba: pointer to lpfc hba data structure. 4584 * @acqe_fc: pointer to the async fc completion queue entry. 4585 * 4586 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4587 * that the event was received and then issue a read_topology mailbox command so 4588 * that the rest of the driver will treat it the same as SLI3. 4589 **/ 4590 static void 4591 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4592 { 4593 struct lpfc_dmabuf *mp; 4594 LPFC_MBOXQ_t *pmb; 4595 MAILBOX_t *mb; 4596 struct lpfc_mbx_read_top *la; 4597 int rc; 4598 4599 if (bf_get(lpfc_trailer_type, acqe_fc) != 4600 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4601 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4602 "2895 Non FC link Event detected.(%d)\n", 4603 bf_get(lpfc_trailer_type, acqe_fc)); 4604 return; 4605 } 4606 /* Keep the link status for extra SLI4 state machine reference */ 4607 phba->sli4_hba.link_state.speed = 4608 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4609 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4610 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4611 phba->sli4_hba.link_state.topology = 4612 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4613 phba->sli4_hba.link_state.status = 4614 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4615 phba->sli4_hba.link_state.type = 4616 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4617 phba->sli4_hba.link_state.number = 4618 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4619 phba->sli4_hba.link_state.fault = 4620 bf_get(lpfc_acqe_link_fault, acqe_fc); 4621 phba->sli4_hba.link_state.logical_speed = 4622 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4623 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4624 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4625 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4626 "%dMbps Fault:%d\n", 4627 phba->sli4_hba.link_state.speed, 4628 phba->sli4_hba.link_state.topology, 4629 phba->sli4_hba.link_state.status, 4630 phba->sli4_hba.link_state.type, 4631 phba->sli4_hba.link_state.number, 4632 phba->sli4_hba.link_state.logical_speed, 4633 phba->sli4_hba.link_state.fault); 4634 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4635 if (!pmb) { 4636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4637 "2897 The mboxq allocation failed\n"); 4638 return; 4639 } 4640 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4641 if (!mp) { 4642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4643 "2898 The lpfc_dmabuf allocation failed\n"); 4644 goto out_free_pmb; 4645 } 4646 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4647 if (!mp->virt) { 4648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4649 "2899 The mbuf allocation failed\n"); 4650 goto out_free_dmabuf; 4651 } 4652 4653 /* Cleanup any outstanding ELS commands */ 4654 lpfc_els_flush_all_cmd(phba); 4655 4656 /* Block ELS IOCBs until we have done process link event */ 4657 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4658 4659 /* Update link event statistics */ 4660 phba->sli.slistat.link_event++; 4661 4662 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4663 lpfc_read_topology(phba, pmb, mp); 4664 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4665 pmb->vport = phba->pport; 4666 4667 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4668 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 4669 4670 switch (phba->sli4_hba.link_state.status) { 4671 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 4672 phba->link_flag |= LS_MDS_LINK_DOWN; 4673 break; 4674 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 4675 phba->link_flag |= LS_MDS_LOOPBACK; 4676 break; 4677 default: 4678 break; 4679 } 4680 4681 /* Parse and translate status field */ 4682 mb = &pmb->u.mb; 4683 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4684 (void *)acqe_fc); 4685 4686 /* Parse and translate link attention fields */ 4687 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4688 la->eventTag = acqe_fc->event_tag; 4689 4690 if (phba->sli4_hba.link_state.status == 4691 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 4692 bf_set(lpfc_mbx_read_top_att_type, la, 4693 LPFC_FC_LA_TYPE_UNEXP_WWPN); 4694 } else { 4695 bf_set(lpfc_mbx_read_top_att_type, la, 4696 LPFC_FC_LA_TYPE_LINK_DOWN); 4697 } 4698 /* Invoke the mailbox command callback function */ 4699 lpfc_mbx_cmpl_read_topology(phba, pmb); 4700 4701 return; 4702 } 4703 4704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4705 if (rc == MBX_NOT_FINISHED) 4706 goto out_free_dmabuf; 4707 return; 4708 4709 out_free_dmabuf: 4710 kfree(mp); 4711 out_free_pmb: 4712 mempool_free(pmb, phba->mbox_mem_pool); 4713 } 4714 4715 /** 4716 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4717 * @phba: pointer to lpfc hba data structure. 4718 * @acqe_fc: pointer to the async SLI completion queue entry. 4719 * 4720 * This routine is to handle the SLI4 asynchronous SLI events. 4721 **/ 4722 static void 4723 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4724 { 4725 char port_name; 4726 char message[128]; 4727 uint8_t status; 4728 uint8_t evt_type; 4729 uint8_t operational = 0; 4730 struct temp_event temp_event_data; 4731 struct lpfc_acqe_misconfigured_event *misconfigured; 4732 struct Scsi_Host *shost; 4733 4734 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4735 4736 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4737 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4738 "x%08x SLI Event Type:%d\n", 4739 acqe_sli->event_data1, acqe_sli->event_data2, 4740 evt_type); 4741 4742 port_name = phba->Port[0]; 4743 if (port_name == 0x00) 4744 port_name = '?'; /* get port name is empty */ 4745 4746 switch (evt_type) { 4747 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4748 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4749 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4750 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4751 4752 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4753 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4754 acqe_sli->event_data1, port_name); 4755 4756 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 4757 shost = lpfc_shost_from_vport(phba->pport); 4758 fc_host_post_vendor_event(shost, fc_get_event_number(), 4759 sizeof(temp_event_data), 4760 (char *)&temp_event_data, 4761 SCSI_NL_VID_TYPE_PCI 4762 | PCI_VENDOR_ID_EMULEX); 4763 break; 4764 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4765 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4766 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4767 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4768 4769 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4770 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4771 acqe_sli->event_data1, port_name); 4772 4773 shost = lpfc_shost_from_vport(phba->pport); 4774 fc_host_post_vendor_event(shost, fc_get_event_number(), 4775 sizeof(temp_event_data), 4776 (char *)&temp_event_data, 4777 SCSI_NL_VID_TYPE_PCI 4778 | PCI_VENDOR_ID_EMULEX); 4779 break; 4780 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4781 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4782 &acqe_sli->event_data1; 4783 4784 /* fetch the status for this port */ 4785 switch (phba->sli4_hba.lnk_info.lnk_no) { 4786 case LPFC_LINK_NUMBER_0: 4787 status = bf_get(lpfc_sli_misconfigured_port0_state, 4788 &misconfigured->theEvent); 4789 operational = bf_get(lpfc_sli_misconfigured_port0_op, 4790 &misconfigured->theEvent); 4791 break; 4792 case LPFC_LINK_NUMBER_1: 4793 status = bf_get(lpfc_sli_misconfigured_port1_state, 4794 &misconfigured->theEvent); 4795 operational = bf_get(lpfc_sli_misconfigured_port1_op, 4796 &misconfigured->theEvent); 4797 break; 4798 case LPFC_LINK_NUMBER_2: 4799 status = bf_get(lpfc_sli_misconfigured_port2_state, 4800 &misconfigured->theEvent); 4801 operational = bf_get(lpfc_sli_misconfigured_port2_op, 4802 &misconfigured->theEvent); 4803 break; 4804 case LPFC_LINK_NUMBER_3: 4805 status = bf_get(lpfc_sli_misconfigured_port3_state, 4806 &misconfigured->theEvent); 4807 operational = bf_get(lpfc_sli_misconfigured_port3_op, 4808 &misconfigured->theEvent); 4809 break; 4810 default: 4811 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4812 "3296 " 4813 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 4814 "event: Invalid link %d", 4815 phba->sli4_hba.lnk_info.lnk_no); 4816 return; 4817 } 4818 4819 /* Skip if optic state unchanged */ 4820 if (phba->sli4_hba.lnk_info.optic_state == status) 4821 return; 4822 4823 switch (status) { 4824 case LPFC_SLI_EVENT_STATUS_VALID: 4825 sprintf(message, "Physical Link is functional"); 4826 break; 4827 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4828 sprintf(message, "Optics faulted/incorrectly " 4829 "installed/not installed - Reseat optics, " 4830 "if issue not resolved, replace."); 4831 break; 4832 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4833 sprintf(message, 4834 "Optics of two types installed - Remove one " 4835 "optic or install matching pair of optics."); 4836 break; 4837 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4838 sprintf(message, "Incompatible optics - Replace with " 4839 "compatible optics for card to function."); 4840 break; 4841 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 4842 sprintf(message, "Unqualified optics - Replace with " 4843 "Avago optics for Warranty and Technical " 4844 "Support - Link is%s operational", 4845 (operational) ? " not" : ""); 4846 break; 4847 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 4848 sprintf(message, "Uncertified optics - Replace with " 4849 "Avago-certified optics to enable link " 4850 "operation - Link is%s operational", 4851 (operational) ? " not" : ""); 4852 break; 4853 default: 4854 /* firmware is reporting a status we don't know about */ 4855 sprintf(message, "Unknown event status x%02x", status); 4856 break; 4857 } 4858 phba->sli4_hba.lnk_info.optic_state = status; 4859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4860 "3176 Port Name %c %s\n", port_name, message); 4861 break; 4862 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4863 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4864 "3192 Remote DPort Test Initiated - " 4865 "Event Data1:x%08x Event Data2: x%08x\n", 4866 acqe_sli->event_data1, acqe_sli->event_data2); 4867 break; 4868 default: 4869 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4870 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4871 "x%08x SLI Event Type:%d\n", 4872 acqe_sli->event_data1, acqe_sli->event_data2, 4873 evt_type); 4874 break; 4875 } 4876 } 4877 4878 /** 4879 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4880 * @vport: pointer to vport data structure. 4881 * 4882 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4883 * response to a CVL event. 4884 * 4885 * Return the pointer to the ndlp with the vport if successful, otherwise 4886 * return NULL. 4887 **/ 4888 static struct lpfc_nodelist * 4889 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4890 { 4891 struct lpfc_nodelist *ndlp; 4892 struct Scsi_Host *shost; 4893 struct lpfc_hba *phba; 4894 4895 if (!vport) 4896 return NULL; 4897 phba = vport->phba; 4898 if (!phba) 4899 return NULL; 4900 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4901 if (!ndlp) { 4902 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4903 ndlp = lpfc_nlp_init(vport, Fabric_DID); 4904 if (!ndlp) 4905 return 0; 4906 /* Set the node type */ 4907 ndlp->nlp_type |= NLP_FABRIC; 4908 /* Put ndlp onto node list */ 4909 lpfc_enqueue_node(vport, ndlp); 4910 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4911 /* re-setup ndlp without removing from node list */ 4912 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4913 if (!ndlp) 4914 return 0; 4915 } 4916 if ((phba->pport->port_state < LPFC_FLOGI) && 4917 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4918 return NULL; 4919 /* If virtual link is not yet instantiated ignore CVL */ 4920 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4921 && (vport->port_state != LPFC_VPORT_FAILED)) 4922 return NULL; 4923 shost = lpfc_shost_from_vport(vport); 4924 if (!shost) 4925 return NULL; 4926 lpfc_linkdown_port(vport); 4927 lpfc_cleanup_pending_mbox(vport); 4928 spin_lock_irq(shost->host_lock); 4929 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4930 spin_unlock_irq(shost->host_lock); 4931 4932 return ndlp; 4933 } 4934 4935 /** 4936 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4937 * @vport: pointer to lpfc hba data structure. 4938 * 4939 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4940 * response to a FCF dead event. 4941 **/ 4942 static void 4943 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4944 { 4945 struct lpfc_vport **vports; 4946 int i; 4947 4948 vports = lpfc_create_vport_work_array(phba); 4949 if (vports) 4950 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4951 lpfc_sli4_perform_vport_cvl(vports[i]); 4952 lpfc_destroy_vport_work_array(phba, vports); 4953 } 4954 4955 /** 4956 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4957 * @phba: pointer to lpfc hba data structure. 4958 * @acqe_link: pointer to the async fcoe completion queue entry. 4959 * 4960 * This routine is to handle the SLI4 asynchronous fcoe event. 4961 **/ 4962 static void 4963 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4964 struct lpfc_acqe_fip *acqe_fip) 4965 { 4966 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4967 int rc; 4968 struct lpfc_vport *vport; 4969 struct lpfc_nodelist *ndlp; 4970 struct Scsi_Host *shost; 4971 int active_vlink_present; 4972 struct lpfc_vport **vports; 4973 int i; 4974 4975 phba->fc_eventTag = acqe_fip->event_tag; 4976 phba->fcoe_eventtag = acqe_fip->event_tag; 4977 switch (event_type) { 4978 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4979 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4980 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4981 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4982 LOG_DISCOVERY, 4983 "2546 New FCF event, evt_tag:x%x, " 4984 "index:x%x\n", 4985 acqe_fip->event_tag, 4986 acqe_fip->index); 4987 else 4988 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4989 LOG_DISCOVERY, 4990 "2788 FCF param modified event, " 4991 "evt_tag:x%x, index:x%x\n", 4992 acqe_fip->event_tag, 4993 acqe_fip->index); 4994 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4995 /* 4996 * During period of FCF discovery, read the FCF 4997 * table record indexed by the event to update 4998 * FCF roundrobin failover eligible FCF bmask. 4999 */ 5000 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5001 LOG_DISCOVERY, 5002 "2779 Read FCF (x%x) for updating " 5003 "roundrobin FCF failover bmask\n", 5004 acqe_fip->index); 5005 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5006 } 5007 5008 /* If the FCF discovery is in progress, do nothing. */ 5009 spin_lock_irq(&phba->hbalock); 5010 if (phba->hba_flag & FCF_TS_INPROG) { 5011 spin_unlock_irq(&phba->hbalock); 5012 break; 5013 } 5014 /* If fast FCF failover rescan event is pending, do nothing */ 5015 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 5016 spin_unlock_irq(&phba->hbalock); 5017 break; 5018 } 5019 5020 /* If the FCF has been in discovered state, do nothing. */ 5021 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5022 spin_unlock_irq(&phba->hbalock); 5023 break; 5024 } 5025 spin_unlock_irq(&phba->hbalock); 5026 5027 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5028 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5029 "2770 Start FCF table scan per async FCF " 5030 "event, evt_tag:x%x, index:x%x\n", 5031 acqe_fip->event_tag, acqe_fip->index); 5032 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5033 LPFC_FCOE_FCF_GET_FIRST); 5034 if (rc) 5035 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5036 "2547 Issue FCF scan read FCF mailbox " 5037 "command failed (x%x)\n", rc); 5038 break; 5039 5040 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5041 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5042 "2548 FCF Table full count 0x%x tag 0x%x\n", 5043 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5044 acqe_fip->event_tag); 5045 break; 5046 5047 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5048 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5049 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5050 "2549 FCF (x%x) disconnected from network, " 5051 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5052 /* 5053 * If we are in the middle of FCF failover process, clear 5054 * the corresponding FCF bit in the roundrobin bitmap. 5055 */ 5056 spin_lock_irq(&phba->hbalock); 5057 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5058 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5059 spin_unlock_irq(&phba->hbalock); 5060 /* Update FLOGI FCF failover eligible FCF bmask */ 5061 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5062 break; 5063 } 5064 spin_unlock_irq(&phba->hbalock); 5065 5066 /* If the event is not for currently used fcf do nothing */ 5067 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5068 break; 5069 5070 /* 5071 * Otherwise, request the port to rediscover the entire FCF 5072 * table for a fast recovery from case that the current FCF 5073 * is no longer valid as we are not in the middle of FCF 5074 * failover process already. 5075 */ 5076 spin_lock_irq(&phba->hbalock); 5077 /* Mark the fast failover process in progress */ 5078 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5079 spin_unlock_irq(&phba->hbalock); 5080 5081 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5082 "2771 Start FCF fast failover process due to " 5083 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5084 "\n", acqe_fip->event_tag, acqe_fip->index); 5085 rc = lpfc_sli4_redisc_fcf_table(phba); 5086 if (rc) { 5087 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5088 LOG_DISCOVERY, 5089 "2772 Issue FCF rediscover mabilbox " 5090 "command failed, fail through to FCF " 5091 "dead event\n"); 5092 spin_lock_irq(&phba->hbalock); 5093 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5094 spin_unlock_irq(&phba->hbalock); 5095 /* 5096 * Last resort will fail over by treating this 5097 * as a link down to FCF registration. 5098 */ 5099 lpfc_sli4_fcf_dead_failthrough(phba); 5100 } else { 5101 /* Reset FCF roundrobin bmask for new discovery */ 5102 lpfc_sli4_clear_fcf_rr_bmask(phba); 5103 /* 5104 * Handling fast FCF failover to a DEAD FCF event is 5105 * considered equalivant to receiving CVL to all vports. 5106 */ 5107 lpfc_sli4_perform_all_vport_cvl(phba); 5108 } 5109 break; 5110 case LPFC_FIP_EVENT_TYPE_CVL: 5111 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5112 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5113 "2718 Clear Virtual Link Received for VPI 0x%x" 5114 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5115 5116 vport = lpfc_find_vport_by_vpid(phba, 5117 acqe_fip->index); 5118 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5119 if (!ndlp) 5120 break; 5121 active_vlink_present = 0; 5122 5123 vports = lpfc_create_vport_work_array(phba); 5124 if (vports) { 5125 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5126 i++) { 5127 if ((!(vports[i]->fc_flag & 5128 FC_VPORT_CVL_RCVD)) && 5129 (vports[i]->port_state > LPFC_FDISC)) { 5130 active_vlink_present = 1; 5131 break; 5132 } 5133 } 5134 lpfc_destroy_vport_work_array(phba, vports); 5135 } 5136 5137 /* 5138 * Don't re-instantiate if vport is marked for deletion. 5139 * If we are here first then vport_delete is going to wait 5140 * for discovery to complete. 5141 */ 5142 if (!(vport->load_flag & FC_UNLOADING) && 5143 active_vlink_present) { 5144 /* 5145 * If there are other active VLinks present, 5146 * re-instantiate the Vlink using FDISC. 5147 */ 5148 mod_timer(&ndlp->nlp_delayfunc, 5149 jiffies + msecs_to_jiffies(1000)); 5150 shost = lpfc_shost_from_vport(vport); 5151 spin_lock_irq(shost->host_lock); 5152 ndlp->nlp_flag |= NLP_DELAY_TMO; 5153 spin_unlock_irq(shost->host_lock); 5154 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5155 vport->port_state = LPFC_FDISC; 5156 } else { 5157 /* 5158 * Otherwise, we request port to rediscover 5159 * the entire FCF table for a fast recovery 5160 * from possible case that the current FCF 5161 * is no longer valid if we are not already 5162 * in the FCF failover process. 5163 */ 5164 spin_lock_irq(&phba->hbalock); 5165 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5166 spin_unlock_irq(&phba->hbalock); 5167 break; 5168 } 5169 /* Mark the fast failover process in progress */ 5170 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5171 spin_unlock_irq(&phba->hbalock); 5172 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5173 LOG_DISCOVERY, 5174 "2773 Start FCF failover per CVL, " 5175 "evt_tag:x%x\n", acqe_fip->event_tag); 5176 rc = lpfc_sli4_redisc_fcf_table(phba); 5177 if (rc) { 5178 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5179 LOG_DISCOVERY, 5180 "2774 Issue FCF rediscover " 5181 "mabilbox command failed, " 5182 "through to CVL event\n"); 5183 spin_lock_irq(&phba->hbalock); 5184 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5185 spin_unlock_irq(&phba->hbalock); 5186 /* 5187 * Last resort will be re-try on the 5188 * the current registered FCF entry. 5189 */ 5190 lpfc_retry_pport_discovery(phba); 5191 } else 5192 /* 5193 * Reset FCF roundrobin bmask for new 5194 * discovery. 5195 */ 5196 lpfc_sli4_clear_fcf_rr_bmask(phba); 5197 } 5198 break; 5199 default: 5200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5201 "0288 Unknown FCoE event type 0x%x event tag " 5202 "0x%x\n", event_type, acqe_fip->event_tag); 5203 break; 5204 } 5205 } 5206 5207 /** 5208 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5209 * @phba: pointer to lpfc hba data structure. 5210 * @acqe_link: pointer to the async dcbx completion queue entry. 5211 * 5212 * This routine is to handle the SLI4 asynchronous dcbx event. 5213 **/ 5214 static void 5215 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5216 struct lpfc_acqe_dcbx *acqe_dcbx) 5217 { 5218 phba->fc_eventTag = acqe_dcbx->event_tag; 5219 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5220 "0290 The SLI4 DCBX asynchronous event is not " 5221 "handled yet\n"); 5222 } 5223 5224 /** 5225 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5226 * @phba: pointer to lpfc hba data structure. 5227 * @acqe_link: pointer to the async grp5 completion queue entry. 5228 * 5229 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5230 * is an asynchronous notified of a logical link speed change. The Port 5231 * reports the logical link speed in units of 10Mbps. 5232 **/ 5233 static void 5234 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5235 struct lpfc_acqe_grp5 *acqe_grp5) 5236 { 5237 uint16_t prev_ll_spd; 5238 5239 phba->fc_eventTag = acqe_grp5->event_tag; 5240 phba->fcoe_eventtag = acqe_grp5->event_tag; 5241 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5242 phba->sli4_hba.link_state.logical_speed = 5243 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5244 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5245 "2789 GRP5 Async Event: Updating logical link speed " 5246 "from %dMbps to %dMbps\n", prev_ll_spd, 5247 phba->sli4_hba.link_state.logical_speed); 5248 } 5249 5250 /** 5251 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5252 * @phba: pointer to lpfc hba data structure. 5253 * 5254 * This routine is invoked by the worker thread to process all the pending 5255 * SLI4 asynchronous events. 5256 **/ 5257 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5258 { 5259 struct lpfc_cq_event *cq_event; 5260 5261 /* First, declare the async event has been handled */ 5262 spin_lock_irq(&phba->hbalock); 5263 phba->hba_flag &= ~ASYNC_EVENT; 5264 spin_unlock_irq(&phba->hbalock); 5265 /* Now, handle all the async events */ 5266 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5267 /* Get the first event from the head of the event queue */ 5268 spin_lock_irq(&phba->hbalock); 5269 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5270 cq_event, struct lpfc_cq_event, list); 5271 spin_unlock_irq(&phba->hbalock); 5272 /* Process the asynchronous event */ 5273 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5274 case LPFC_TRAILER_CODE_LINK: 5275 lpfc_sli4_async_link_evt(phba, 5276 &cq_event->cqe.acqe_link); 5277 break; 5278 case LPFC_TRAILER_CODE_FCOE: 5279 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5280 break; 5281 case LPFC_TRAILER_CODE_DCBX: 5282 lpfc_sli4_async_dcbx_evt(phba, 5283 &cq_event->cqe.acqe_dcbx); 5284 break; 5285 case LPFC_TRAILER_CODE_GRP5: 5286 lpfc_sli4_async_grp5_evt(phba, 5287 &cq_event->cqe.acqe_grp5); 5288 break; 5289 case LPFC_TRAILER_CODE_FC: 5290 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5291 break; 5292 case LPFC_TRAILER_CODE_SLI: 5293 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5294 break; 5295 default: 5296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5297 "1804 Invalid asynchrous event code: " 5298 "x%x\n", bf_get(lpfc_trailer_code, 5299 &cq_event->cqe.mcqe_cmpl)); 5300 break; 5301 } 5302 /* Free the completion event processed to the free pool */ 5303 lpfc_sli4_cq_event_release(phba, cq_event); 5304 } 5305 } 5306 5307 /** 5308 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5309 * @phba: pointer to lpfc hba data structure. 5310 * 5311 * This routine is invoked by the worker thread to process FCF table 5312 * rediscovery pending completion event. 5313 **/ 5314 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5315 { 5316 int rc; 5317 5318 spin_lock_irq(&phba->hbalock); 5319 /* Clear FCF rediscovery timeout event */ 5320 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5321 /* Clear driver fast failover FCF record flag */ 5322 phba->fcf.failover_rec.flag = 0; 5323 /* Set state for FCF fast failover */ 5324 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5325 spin_unlock_irq(&phba->hbalock); 5326 5327 /* Scan FCF table from the first entry to re-discover SAN */ 5328 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5329 "2777 Start post-quiescent FCF table scan\n"); 5330 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5331 if (rc) 5332 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5333 "2747 Issue FCF scan read FCF mailbox " 5334 "command failed 0x%x\n", rc); 5335 } 5336 5337 /** 5338 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5339 * @phba: pointer to lpfc hba data structure. 5340 * @dev_grp: The HBA PCI-Device group number. 5341 * 5342 * This routine is invoked to set up the per HBA PCI-Device group function 5343 * API jump table entries. 5344 * 5345 * Return: 0 if success, otherwise -ENODEV 5346 **/ 5347 int 5348 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5349 { 5350 int rc; 5351 5352 /* Set up lpfc PCI-device group */ 5353 phba->pci_dev_grp = dev_grp; 5354 5355 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5356 if (dev_grp == LPFC_PCI_DEV_OC) 5357 phba->sli_rev = LPFC_SLI_REV4; 5358 5359 /* Set up device INIT API function jump table */ 5360 rc = lpfc_init_api_table_setup(phba, dev_grp); 5361 if (rc) 5362 return -ENODEV; 5363 /* Set up SCSI API function jump table */ 5364 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5365 if (rc) 5366 return -ENODEV; 5367 /* Set up SLI API function jump table */ 5368 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5369 if (rc) 5370 return -ENODEV; 5371 /* Set up MBOX API function jump table */ 5372 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5373 if (rc) 5374 return -ENODEV; 5375 5376 return 0; 5377 } 5378 5379 /** 5380 * lpfc_log_intr_mode - Log the active interrupt mode 5381 * @phba: pointer to lpfc hba data structure. 5382 * @intr_mode: active interrupt mode adopted. 5383 * 5384 * This routine it invoked to log the currently used active interrupt mode 5385 * to the device. 5386 **/ 5387 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5388 { 5389 switch (intr_mode) { 5390 case 0: 5391 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5392 "0470 Enable INTx interrupt mode.\n"); 5393 break; 5394 case 1: 5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5396 "0481 Enabled MSI interrupt mode.\n"); 5397 break; 5398 case 2: 5399 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5400 "0480 Enabled MSI-X interrupt mode.\n"); 5401 break; 5402 default: 5403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5404 "0482 Illegal interrupt mode.\n"); 5405 break; 5406 } 5407 return; 5408 } 5409 5410 /** 5411 * lpfc_enable_pci_dev - Enable a generic PCI device. 5412 * @phba: pointer to lpfc hba data structure. 5413 * 5414 * This routine is invoked to enable the PCI device that is common to all 5415 * PCI devices. 5416 * 5417 * Return codes 5418 * 0 - successful 5419 * other values - error 5420 **/ 5421 static int 5422 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5423 { 5424 struct pci_dev *pdev; 5425 5426 /* Obtain PCI device reference */ 5427 if (!phba->pcidev) 5428 goto out_error; 5429 else 5430 pdev = phba->pcidev; 5431 /* Enable PCI device */ 5432 if (pci_enable_device_mem(pdev)) 5433 goto out_error; 5434 /* Request PCI resource for the device */ 5435 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 5436 goto out_disable_device; 5437 /* Set up device as PCI master and save state for EEH */ 5438 pci_set_master(pdev); 5439 pci_try_set_mwi(pdev); 5440 pci_save_state(pdev); 5441 5442 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 5443 if (pci_is_pcie(pdev)) 5444 pdev->needs_freset = 1; 5445 5446 return 0; 5447 5448 out_disable_device: 5449 pci_disable_device(pdev); 5450 out_error: 5451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5452 "1401 Failed to enable pci device\n"); 5453 return -ENODEV; 5454 } 5455 5456 /** 5457 * lpfc_disable_pci_dev - Disable a generic PCI device. 5458 * @phba: pointer to lpfc hba data structure. 5459 * 5460 * This routine is invoked to disable the PCI device that is common to all 5461 * PCI devices. 5462 **/ 5463 static void 5464 lpfc_disable_pci_dev(struct lpfc_hba *phba) 5465 { 5466 struct pci_dev *pdev; 5467 5468 /* Obtain PCI device reference */ 5469 if (!phba->pcidev) 5470 return; 5471 else 5472 pdev = phba->pcidev; 5473 /* Release PCI resource and disable PCI device */ 5474 pci_release_mem_regions(pdev); 5475 pci_disable_device(pdev); 5476 5477 return; 5478 } 5479 5480 /** 5481 * lpfc_reset_hba - Reset a hba 5482 * @phba: pointer to lpfc hba data structure. 5483 * 5484 * This routine is invoked to reset a hba device. It brings the HBA 5485 * offline, performs a board restart, and then brings the board back 5486 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 5487 * on outstanding mailbox commands. 5488 **/ 5489 void 5490 lpfc_reset_hba(struct lpfc_hba *phba) 5491 { 5492 /* If resets are disabled then set error state and return. */ 5493 if (!phba->cfg_enable_hba_reset) { 5494 phba->link_state = LPFC_HBA_ERROR; 5495 return; 5496 } 5497 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 5498 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5499 else 5500 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 5501 lpfc_offline(phba); 5502 lpfc_sli_brdrestart(phba); 5503 lpfc_online(phba); 5504 lpfc_unblock_mgmt_io(phba); 5505 } 5506 5507 /** 5508 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 5509 * @phba: pointer to lpfc hba data structure. 5510 * 5511 * This function enables the PCI SR-IOV virtual functions to a physical 5512 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 5513 * enable the number of virtual functions to the physical function. As 5514 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 5515 * API call does not considered as an error condition for most of the device. 5516 **/ 5517 uint16_t 5518 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 5519 { 5520 struct pci_dev *pdev = phba->pcidev; 5521 uint16_t nr_virtfn; 5522 int pos; 5523 5524 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 5525 if (pos == 0) 5526 return 0; 5527 5528 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 5529 return nr_virtfn; 5530 } 5531 5532 /** 5533 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 5534 * @phba: pointer to lpfc hba data structure. 5535 * @nr_vfn: number of virtual functions to be enabled. 5536 * 5537 * This function enables the PCI SR-IOV virtual functions to a physical 5538 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 5539 * enable the number of virtual functions to the physical function. As 5540 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 5541 * API call does not considered as an error condition for most of the device. 5542 **/ 5543 int 5544 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 5545 { 5546 struct pci_dev *pdev = phba->pcidev; 5547 uint16_t max_nr_vfn; 5548 int rc; 5549 5550 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 5551 if (nr_vfn > max_nr_vfn) { 5552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5553 "3057 Requested vfs (%d) greater than " 5554 "supported vfs (%d)", nr_vfn, max_nr_vfn); 5555 return -EINVAL; 5556 } 5557 5558 rc = pci_enable_sriov(pdev, nr_vfn); 5559 if (rc) { 5560 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5561 "2806 Failed to enable sriov on this device " 5562 "with vfn number nr_vf:%d, rc:%d\n", 5563 nr_vfn, rc); 5564 } else 5565 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5566 "2807 Successful enable sriov on this device " 5567 "with vfn number nr_vf:%d\n", nr_vfn); 5568 return rc; 5569 } 5570 5571 /** 5572 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5573 * @phba: pointer to lpfc hba data structure. 5574 * 5575 * This routine is invoked to set up the driver internal resources before the 5576 * device specific resource setup to support the HBA device it attached to. 5577 * 5578 * Return codes 5579 * 0 - successful 5580 * other values - error 5581 **/ 5582 static int 5583 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5584 { 5585 struct lpfc_sli *psli = &phba->sli; 5586 5587 /* 5588 * Driver resources common to all SLI revisions 5589 */ 5590 atomic_set(&phba->fast_event_count, 0); 5591 spin_lock_init(&phba->hbalock); 5592 5593 /* Initialize ndlp management spinlock */ 5594 spin_lock_init(&phba->ndlp_lock); 5595 5596 INIT_LIST_HEAD(&phba->port_list); 5597 INIT_LIST_HEAD(&phba->work_list); 5598 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5599 5600 /* Initialize the wait queue head for the kernel thread */ 5601 init_waitqueue_head(&phba->work_waitq); 5602 5603 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5604 "1403 Protocols supported %s %s %s\n", 5605 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 5606 "SCSI" : " "), 5607 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 5608 "NVME" : " "), 5609 (phba->nvmet_support ? "NVMET" : " ")); 5610 5611 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 5612 /* Initialize the scsi buffer list used by driver for scsi IO */ 5613 spin_lock_init(&phba->scsi_buf_list_get_lock); 5614 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5615 spin_lock_init(&phba->scsi_buf_list_put_lock); 5616 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5617 } 5618 5619 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 5620 (phba->nvmet_support == 0)) { 5621 /* Initialize the NVME buffer list used by driver for NVME IO */ 5622 spin_lock_init(&phba->nvme_buf_list_get_lock); 5623 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); 5624 phba->get_nvme_bufs = 0; 5625 spin_lock_init(&phba->nvme_buf_list_put_lock); 5626 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 5627 phba->put_nvme_bufs = 0; 5628 } 5629 5630 /* Initialize the fabric iocb list */ 5631 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5632 5633 /* Initialize list to save ELS buffers */ 5634 INIT_LIST_HEAD(&phba->elsbuf); 5635 5636 /* Initialize FCF connection rec list */ 5637 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5638 5639 /* Initialize OAS configuration list */ 5640 spin_lock_init(&phba->devicelock); 5641 INIT_LIST_HEAD(&phba->luns); 5642 5643 /* MBOX heartbeat timer */ 5644 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 5645 /* Fabric block timer */ 5646 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 5647 /* EA polling mode timer */ 5648 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 5649 /* Heartbeat timer */ 5650 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 5651 5652 return 0; 5653 } 5654 5655 /** 5656 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 5657 * @phba: pointer to lpfc hba data structure. 5658 * 5659 * This routine is invoked to set up the driver internal resources specific to 5660 * support the SLI-3 HBA device it attached to. 5661 * 5662 * Return codes 5663 * 0 - successful 5664 * other values - error 5665 **/ 5666 static int 5667 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 5668 { 5669 int rc; 5670 5671 /* 5672 * Initialize timers used by driver 5673 */ 5674 5675 /* FCP polling mode timer */ 5676 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 5677 5678 /* Host attention work mask setup */ 5679 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5680 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 5681 5682 /* Get all the module params for configuring this host */ 5683 lpfc_get_cfgparam(phba); 5684 /* Set up phase-1 common device driver resources */ 5685 5686 rc = lpfc_setup_driver_resource_phase1(phba); 5687 if (rc) 5688 return -ENODEV; 5689 5690 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 5691 phba->menlo_flag |= HBA_MENLO_SUPPORT; 5692 /* check for menlo minimum sg count */ 5693 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 5694 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 5695 } 5696 5697 if (!phba->sli.sli3_ring) 5698 phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING * 5699 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5700 if (!phba->sli.sli3_ring) 5701 return -ENOMEM; 5702 5703 /* 5704 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5705 * used to create the sg_dma_buf_pool must be dynamically calculated. 5706 */ 5707 5708 /* Initialize the host templates the configured values. */ 5709 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5710 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5711 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5712 5713 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5714 if (phba->cfg_enable_bg) { 5715 /* 5716 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5717 * the FCP rsp, and a BDE for each. Sice we have no control 5718 * over how many protection data segments the SCSI Layer 5719 * will hand us (ie: there could be one for every block 5720 * in the IO), we just allocate enough BDEs to accomidate 5721 * our max amount and we need to limit lpfc_sg_seg_cnt to 5722 * minimize the risk of running out. 5723 */ 5724 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5725 sizeof(struct fcp_rsp) + 5726 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5727 5728 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5729 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5730 5731 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5732 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5733 } else { 5734 /* 5735 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5736 * the FCP rsp, a BDE for each, and a BDE for up to 5737 * cfg_sg_seg_cnt data segments. 5738 */ 5739 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5740 sizeof(struct fcp_rsp) + 5741 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5742 5743 /* Total BDEs in BPL for scsi_sg_list */ 5744 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5745 } 5746 5747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5748 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5749 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5750 phba->cfg_total_seg_cnt); 5751 5752 phba->max_vpi = LPFC_MAX_VPI; 5753 /* This will be set to correct value after config_port mbox */ 5754 phba->max_vports = 0; 5755 5756 /* 5757 * Initialize the SLI Layer to run with lpfc HBAs. 5758 */ 5759 lpfc_sli_setup(phba); 5760 lpfc_sli_queue_init(phba); 5761 5762 /* Allocate device driver memory */ 5763 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5764 return -ENOMEM; 5765 5766 /* 5767 * Enable sr-iov virtual functions if supported and configured 5768 * through the module parameter. 5769 */ 5770 if (phba->cfg_sriov_nr_virtfn > 0) { 5771 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5772 phba->cfg_sriov_nr_virtfn); 5773 if (rc) { 5774 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5775 "2808 Requested number of SR-IOV " 5776 "virtual functions (%d) is not " 5777 "supported\n", 5778 phba->cfg_sriov_nr_virtfn); 5779 phba->cfg_sriov_nr_virtfn = 0; 5780 } 5781 } 5782 5783 return 0; 5784 } 5785 5786 /** 5787 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5788 * @phba: pointer to lpfc hba data structure. 5789 * 5790 * This routine is invoked to unset the driver internal resources set up 5791 * specific for supporting the SLI-3 HBA device it attached to. 5792 **/ 5793 static void 5794 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5795 { 5796 /* Free device driver memory allocated */ 5797 lpfc_mem_free_all(phba); 5798 5799 return; 5800 } 5801 5802 /** 5803 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5804 * @phba: pointer to lpfc hba data structure. 5805 * 5806 * This routine is invoked to set up the driver internal resources specific to 5807 * support the SLI-4 HBA device it attached to. 5808 * 5809 * Return codes 5810 * 0 - successful 5811 * other values - error 5812 **/ 5813 static int 5814 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5815 { 5816 LPFC_MBOXQ_t *mboxq; 5817 MAILBOX_t *mb; 5818 int rc, i, max_buf_size; 5819 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5820 struct lpfc_mqe *mqe; 5821 int longs; 5822 int fof_vectors = 0; 5823 int extra; 5824 uint64_t wwn; 5825 5826 phba->sli4_hba.num_online_cpu = num_online_cpus(); 5827 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 5828 phba->sli4_hba.curr_disp_cpu = 0; 5829 5830 /* Get all the module params for configuring this host */ 5831 lpfc_get_cfgparam(phba); 5832 5833 /* Set up phase-1 common device driver resources */ 5834 rc = lpfc_setup_driver_resource_phase1(phba); 5835 if (rc) 5836 return -ENODEV; 5837 5838 /* Before proceed, wait for POST done and device ready */ 5839 rc = lpfc_sli4_post_status_check(phba); 5840 if (rc) 5841 return -ENODEV; 5842 5843 /* 5844 * Initialize timers used by driver 5845 */ 5846 5847 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 5848 5849 /* FCF rediscover timer */ 5850 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 5851 5852 /* 5853 * Control structure for handling external multi-buffer mailbox 5854 * command pass-through. 5855 */ 5856 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5857 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5858 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5859 5860 phba->max_vpi = LPFC_MAX_VPI; 5861 5862 /* This will be set to correct value after the read_config mbox */ 5863 phba->max_vports = 0; 5864 5865 /* Program the default value of vlan_id and fc_map */ 5866 phba->valid_vlan = 0; 5867 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5868 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5869 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5870 5871 /* 5872 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5873 * we will associate a new ring, for each EQ/CQ/WQ tuple. 5874 * The WQ create will allocate the ring. 5875 */ 5876 5877 /* 5878 * 1 for cmd, 1 for rsp, NVME adds an extra one 5879 * for boundary conditions in its max_sgl_segment template. 5880 */ 5881 extra = 2; 5882 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 5883 extra++; 5884 5885 /* 5886 * It doesn't matter what family our adapter is in, we are 5887 * limited to 2 Pages, 512 SGEs, for our SGL. 5888 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5889 */ 5890 max_buf_size = (2 * SLI4_PAGE_SIZE); 5891 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra) 5892 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra; 5893 5894 /* 5895 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 5896 * used to create the sg_dma_buf_pool must be calculated. 5897 */ 5898 if (phba->cfg_enable_bg) { 5899 /* 5900 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 5901 * the FCP rsp, and a SGE. Sice we have no control 5902 * over how many protection segments the SCSI Layer 5903 * will hand us (ie: there could be one for every block 5904 * in the IO), just allocate enough SGEs to accomidate 5905 * our max amount and we need to limit lpfc_sg_seg_cnt 5906 * to minimize the risk of running out. 5907 */ 5908 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5909 sizeof(struct fcp_rsp) + max_buf_size; 5910 5911 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5912 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5913 5914 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5915 phba->cfg_sg_seg_cnt = 5916 LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5917 } else { 5918 /* 5919 * The scsi_buf for a regular I/O holds the FCP cmnd, 5920 * the FCP rsp, a SGE for each, and a SGE for up to 5921 * cfg_sg_seg_cnt data segments. 5922 */ 5923 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5924 sizeof(struct fcp_rsp) + 5925 ((phba->cfg_sg_seg_cnt + extra) * 5926 sizeof(struct sli4_sge)); 5927 5928 /* Total SGEs for scsi_sg_list */ 5929 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 5930 5931 /* 5932 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 5933 * need to post 1 page for the SGL. 5934 */ 5935 } 5936 5937 /* Initialize the host templates with the updated values. */ 5938 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5939 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5940 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5941 5942 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5943 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5944 else 5945 phba->cfg_sg_dma_buf_size = 5946 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5947 5948 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5949 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5950 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5951 phba->cfg_total_seg_cnt); 5952 5953 /* Initialize buffer queue management fields */ 5954 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 5955 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5956 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5957 5958 /* 5959 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5960 */ 5961 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 5962 /* Initialize the Abort scsi buffer list used by driver */ 5963 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5964 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5965 } 5966 5967 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 5968 /* Initialize the Abort nvme buffer list used by driver */ 5969 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5970 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5971 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 5972 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 5973 } 5974 5975 /* This abort list used by worker thread */ 5976 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 5977 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 5978 5979 /* 5980 * Initialize driver internal slow-path work queues 5981 */ 5982 5983 /* Driver internel slow-path CQ Event pool */ 5984 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5985 /* Response IOCB work queue list */ 5986 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5987 /* Asynchronous event CQ Event work queue list */ 5988 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5989 /* Fast-path XRI aborted CQ Event work queue list */ 5990 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5991 /* Slow-path XRI aborted CQ Event work queue list */ 5992 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5993 /* Receive queue CQ Event work queue list */ 5994 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5995 5996 /* Initialize extent block lists. */ 5997 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5998 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5999 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6000 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6001 6002 /* Initialize mboxq lists. If the early init routines fail 6003 * these lists need to be correctly initialized. 6004 */ 6005 INIT_LIST_HEAD(&phba->sli.mboxq); 6006 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6007 6008 /* initialize optic_state to 0xFF */ 6009 phba->sli4_hba.lnk_info.optic_state = 0xff; 6010 6011 /* Allocate device driver memory */ 6012 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6013 if (rc) 6014 return -ENOMEM; 6015 6016 /* IF Type 2 ports get initialized now. */ 6017 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6018 LPFC_SLI_INTF_IF_TYPE_2) { 6019 rc = lpfc_pci_function_reset(phba); 6020 if (unlikely(rc)) { 6021 rc = -ENODEV; 6022 goto out_free_mem; 6023 } 6024 phba->temp_sensor_support = 1; 6025 } 6026 6027 /* Create the bootstrap mailbox command */ 6028 rc = lpfc_create_bootstrap_mbox(phba); 6029 if (unlikely(rc)) 6030 goto out_free_mem; 6031 6032 /* Set up the host's endian order with the device. */ 6033 rc = lpfc_setup_endian_order(phba); 6034 if (unlikely(rc)) 6035 goto out_free_bsmbx; 6036 6037 /* Set up the hba's configuration parameters. */ 6038 rc = lpfc_sli4_read_config(phba); 6039 if (unlikely(rc)) 6040 goto out_free_bsmbx; 6041 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6042 if (unlikely(rc)) 6043 goto out_free_bsmbx; 6044 6045 /* IF Type 0 ports get initialized now. */ 6046 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6047 LPFC_SLI_INTF_IF_TYPE_0) { 6048 rc = lpfc_pci_function_reset(phba); 6049 if (unlikely(rc)) 6050 goto out_free_bsmbx; 6051 } 6052 6053 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6054 GFP_KERNEL); 6055 if (!mboxq) { 6056 rc = -ENOMEM; 6057 goto out_free_bsmbx; 6058 } 6059 6060 /* Check for NVMET being configured */ 6061 phba->nvmet_support = 0; 6062 if (lpfc_enable_nvmet_cnt) { 6063 6064 /* First get WWN of HBA instance */ 6065 lpfc_read_nv(phba, mboxq); 6066 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6067 if (rc != MBX_SUCCESS) { 6068 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6069 "6016 Mailbox failed , mbxCmd x%x " 6070 "READ_NV, mbxStatus x%x\n", 6071 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6072 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6073 mempool_free(mboxq, phba->mbox_mem_pool); 6074 rc = -EIO; 6075 goto out_free_bsmbx; 6076 } 6077 mb = &mboxq->u.mb; 6078 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6079 sizeof(uint64_t)); 6080 wwn = cpu_to_be64(wwn); 6081 phba->sli4_hba.wwnn.u.name = wwn; 6082 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6083 sizeof(uint64_t)); 6084 /* wwn is WWPN of HBA instance */ 6085 wwn = cpu_to_be64(wwn); 6086 phba->sli4_hba.wwpn.u.name = wwn; 6087 6088 /* Check to see if it matches any module parameter */ 6089 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6090 if (wwn == lpfc_enable_nvmet[i]) { 6091 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6092 if (lpfc_nvmet_mem_alloc(phba)) 6093 break; 6094 6095 phba->nvmet_support = 1; /* a match */ 6096 6097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6098 "6017 NVME Target %016llx\n", 6099 wwn); 6100 #else 6101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6102 "6021 Can't enable NVME Target." 6103 " NVME_TARGET_FC infrastructure" 6104 " is not in kernel\n"); 6105 #endif 6106 break; 6107 } 6108 } 6109 } 6110 6111 lpfc_nvme_mod_param_dep(phba); 6112 6113 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6114 lpfc_supported_pages(mboxq); 6115 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6116 if (!rc) { 6117 mqe = &mboxq->u.mqe; 6118 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6119 LPFC_MAX_SUPPORTED_PAGES); 6120 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6121 switch (pn_page[i]) { 6122 case LPFC_SLI4_PARAMETERS: 6123 phba->sli4_hba.pc_sli4_params.supported = 1; 6124 break; 6125 default: 6126 break; 6127 } 6128 } 6129 /* Read the port's SLI4 Parameters capabilities if supported. */ 6130 if (phba->sli4_hba.pc_sli4_params.supported) 6131 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6132 if (rc) { 6133 mempool_free(mboxq, phba->mbox_mem_pool); 6134 rc = -EIO; 6135 goto out_free_bsmbx; 6136 } 6137 } 6138 6139 /* 6140 * Get sli4 parameters that override parameters from Port capabilities. 6141 * If this call fails, it isn't critical unless the SLI4 parameters come 6142 * back in conflict. 6143 */ 6144 rc = lpfc_get_sli4_parameters(phba, mboxq); 6145 if (rc) { 6146 if (phba->sli4_hba.extents_in_use && 6147 phba->sli4_hba.rpi_hdrs_in_use) { 6148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6149 "2999 Unsupported SLI4 Parameters " 6150 "Extents and RPI headers enabled.\n"); 6151 } 6152 mempool_free(mboxq, phba->mbox_mem_pool); 6153 rc = -EIO; 6154 goto out_free_bsmbx; 6155 } 6156 6157 mempool_free(mboxq, phba->mbox_mem_pool); 6158 6159 /* Verify OAS is supported */ 6160 lpfc_sli4_oas_verify(phba); 6161 if (phba->cfg_fof) 6162 fof_vectors = 1; 6163 6164 /* Verify all the SLI4 queues */ 6165 rc = lpfc_sli4_queue_verify(phba); 6166 if (rc) 6167 goto out_free_bsmbx; 6168 6169 /* Create driver internal CQE event pool */ 6170 rc = lpfc_sli4_cq_event_pool_create(phba); 6171 if (rc) 6172 goto out_free_bsmbx; 6173 6174 /* Initialize sgl lists per host */ 6175 lpfc_init_sgl_list(phba); 6176 6177 /* Allocate and initialize active sgl array */ 6178 rc = lpfc_init_active_sgl_array(phba); 6179 if (rc) { 6180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6181 "1430 Failed to initialize sgl list.\n"); 6182 goto out_destroy_cq_event_pool; 6183 } 6184 rc = lpfc_sli4_init_rpi_hdrs(phba); 6185 if (rc) { 6186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6187 "1432 Failed to initialize rpi headers.\n"); 6188 goto out_free_active_sgl; 6189 } 6190 6191 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6192 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6193 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 6194 GFP_KERNEL); 6195 if (!phba->fcf.fcf_rr_bmask) { 6196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6197 "2759 Failed allocate memory for FCF round " 6198 "robin failover bmask\n"); 6199 rc = -ENOMEM; 6200 goto out_remove_rpi_hdrs; 6201 } 6202 6203 phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, 6204 sizeof(struct lpfc_hba_eq_hdl), 6205 GFP_KERNEL); 6206 if (!phba->sli4_hba.hba_eq_hdl) { 6207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6208 "2572 Failed allocate memory for " 6209 "fast-path per-EQ handle array\n"); 6210 rc = -ENOMEM; 6211 goto out_free_fcf_rr_bmask; 6212 } 6213 6214 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, 6215 sizeof(struct lpfc_vector_map_info), 6216 GFP_KERNEL); 6217 if (!phba->sli4_hba.cpu_map) { 6218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6219 "3327 Failed allocate memory for msi-x " 6220 "interrupt vector mapping\n"); 6221 rc = -ENOMEM; 6222 goto out_free_hba_eq_hdl; 6223 } 6224 if (lpfc_used_cpu == NULL) { 6225 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), 6226 GFP_KERNEL); 6227 if (!lpfc_used_cpu) { 6228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6229 "3335 Failed allocate memory for msi-x " 6230 "interrupt vector mapping\n"); 6231 kfree(phba->sli4_hba.cpu_map); 6232 rc = -ENOMEM; 6233 goto out_free_hba_eq_hdl; 6234 } 6235 for (i = 0; i < lpfc_present_cpu; i++) 6236 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 6237 } 6238 6239 /* 6240 * Enable sr-iov virtual functions if supported and configured 6241 * through the module parameter. 6242 */ 6243 if (phba->cfg_sriov_nr_virtfn > 0) { 6244 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6245 phba->cfg_sriov_nr_virtfn); 6246 if (rc) { 6247 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6248 "3020 Requested number of SR-IOV " 6249 "virtual functions (%d) is not " 6250 "supported\n", 6251 phba->cfg_sriov_nr_virtfn); 6252 phba->cfg_sriov_nr_virtfn = 0; 6253 } 6254 } 6255 6256 return 0; 6257 6258 out_free_hba_eq_hdl: 6259 kfree(phba->sli4_hba.hba_eq_hdl); 6260 out_free_fcf_rr_bmask: 6261 kfree(phba->fcf.fcf_rr_bmask); 6262 out_remove_rpi_hdrs: 6263 lpfc_sli4_remove_rpi_hdrs(phba); 6264 out_free_active_sgl: 6265 lpfc_free_active_sgl(phba); 6266 out_destroy_cq_event_pool: 6267 lpfc_sli4_cq_event_pool_destroy(phba); 6268 out_free_bsmbx: 6269 lpfc_destroy_bootstrap_mbox(phba); 6270 out_free_mem: 6271 lpfc_mem_free(phba); 6272 return rc; 6273 } 6274 6275 /** 6276 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6277 * @phba: pointer to lpfc hba data structure. 6278 * 6279 * This routine is invoked to unset the driver internal resources set up 6280 * specific for supporting the SLI-4 HBA device it attached to. 6281 **/ 6282 static void 6283 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6284 { 6285 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6286 6287 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6288 kfree(phba->sli4_hba.cpu_map); 6289 phba->sli4_hba.num_present_cpu = 0; 6290 phba->sli4_hba.num_online_cpu = 0; 6291 phba->sli4_hba.curr_disp_cpu = 0; 6292 6293 /* Free memory allocated for fast-path work queue handles */ 6294 kfree(phba->sli4_hba.hba_eq_hdl); 6295 6296 /* Free the allocated rpi headers. */ 6297 lpfc_sli4_remove_rpi_hdrs(phba); 6298 lpfc_sli4_remove_rpis(phba); 6299 6300 /* Free eligible FCF index bmask */ 6301 kfree(phba->fcf.fcf_rr_bmask); 6302 6303 /* Free the ELS sgl list */ 6304 lpfc_free_active_sgl(phba); 6305 lpfc_free_els_sgl_list(phba); 6306 lpfc_free_nvmet_sgl_list(phba); 6307 6308 /* Free the completion queue EQ event pool */ 6309 lpfc_sli4_cq_event_release_all(phba); 6310 lpfc_sli4_cq_event_pool_destroy(phba); 6311 6312 /* Release resource identifiers. */ 6313 lpfc_sli4_dealloc_resource_identifiers(phba); 6314 6315 /* Free the bsmbx region. */ 6316 lpfc_destroy_bootstrap_mbox(phba); 6317 6318 /* Free the SLI Layer memory with SLI4 HBAs */ 6319 lpfc_mem_free_all(phba); 6320 6321 /* Free the current connect table */ 6322 list_for_each_entry_safe(conn_entry, next_conn_entry, 6323 &phba->fcf_conn_rec_list, list) { 6324 list_del_init(&conn_entry->list); 6325 kfree(conn_entry); 6326 } 6327 6328 return; 6329 } 6330 6331 /** 6332 * lpfc_init_api_table_setup - Set up init api function jump table 6333 * @phba: The hba struct for which this call is being executed. 6334 * @dev_grp: The HBA PCI-Device group number. 6335 * 6336 * This routine sets up the device INIT interface API function jump table 6337 * in @phba struct. 6338 * 6339 * Returns: 0 - success, -ENODEV - failure. 6340 **/ 6341 int 6342 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6343 { 6344 phba->lpfc_hba_init_link = lpfc_hba_init_link; 6345 phba->lpfc_hba_down_link = lpfc_hba_down_link; 6346 phba->lpfc_selective_reset = lpfc_selective_reset; 6347 switch (dev_grp) { 6348 case LPFC_PCI_DEV_LP: 6349 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 6350 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 6351 phba->lpfc_stop_port = lpfc_stop_port_s3; 6352 break; 6353 case LPFC_PCI_DEV_OC: 6354 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 6355 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 6356 phba->lpfc_stop_port = lpfc_stop_port_s4; 6357 break; 6358 default: 6359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6360 "1431 Invalid HBA PCI-device group: 0x%x\n", 6361 dev_grp); 6362 return -ENODEV; 6363 break; 6364 } 6365 return 0; 6366 } 6367 6368 /** 6369 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 6370 * @phba: pointer to lpfc hba data structure. 6371 * 6372 * This routine is invoked to set up the driver internal resources after the 6373 * device specific resource setup to support the HBA device it attached to. 6374 * 6375 * Return codes 6376 * 0 - successful 6377 * other values - error 6378 **/ 6379 static int 6380 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 6381 { 6382 int error; 6383 6384 /* Startup the kernel thread for this host adapter. */ 6385 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6386 "lpfc_worker_%d", phba->brd_no); 6387 if (IS_ERR(phba->worker_thread)) { 6388 error = PTR_ERR(phba->worker_thread); 6389 return error; 6390 } 6391 6392 /* workqueue for deferred irq use */ 6393 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6394 6395 return 0; 6396 } 6397 6398 /** 6399 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 6400 * @phba: pointer to lpfc hba data structure. 6401 * 6402 * This routine is invoked to unset the driver internal resources set up after 6403 * the device specific resource setup for supporting the HBA device it 6404 * attached to. 6405 **/ 6406 static void 6407 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 6408 { 6409 if (phba->wq) { 6410 flush_workqueue(phba->wq); 6411 destroy_workqueue(phba->wq); 6412 phba->wq = NULL; 6413 } 6414 6415 /* Stop kernel worker thread */ 6416 kthread_stop(phba->worker_thread); 6417 } 6418 6419 /** 6420 * lpfc_free_iocb_list - Free iocb list. 6421 * @phba: pointer to lpfc hba data structure. 6422 * 6423 * This routine is invoked to free the driver's IOCB list and memory. 6424 **/ 6425 void 6426 lpfc_free_iocb_list(struct lpfc_hba *phba) 6427 { 6428 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 6429 6430 spin_lock_irq(&phba->hbalock); 6431 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6432 &phba->lpfc_iocb_list, list) { 6433 list_del(&iocbq_entry->list); 6434 kfree(iocbq_entry); 6435 phba->total_iocbq_bufs--; 6436 } 6437 spin_unlock_irq(&phba->hbalock); 6438 6439 return; 6440 } 6441 6442 /** 6443 * lpfc_init_iocb_list - Allocate and initialize iocb list. 6444 * @phba: pointer to lpfc hba data structure. 6445 * 6446 * This routine is invoked to allocate and initizlize the driver's IOCB 6447 * list and set up the IOCB tag array accordingly. 6448 * 6449 * Return codes 6450 * 0 - successful 6451 * other values - error 6452 **/ 6453 int 6454 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 6455 { 6456 struct lpfc_iocbq *iocbq_entry = NULL; 6457 uint16_t iotag; 6458 int i; 6459 6460 /* Initialize and populate the iocb list per host. */ 6461 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6462 for (i = 0; i < iocb_count; i++) { 6463 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 6464 if (iocbq_entry == NULL) { 6465 printk(KERN_ERR "%s: only allocated %d iocbs of " 6466 "expected %d count. Unloading driver.\n", 6467 __func__, i, LPFC_IOCB_LIST_CNT); 6468 goto out_free_iocbq; 6469 } 6470 6471 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6472 if (iotag == 0) { 6473 kfree(iocbq_entry); 6474 printk(KERN_ERR "%s: failed to allocate IOTAG. " 6475 "Unloading driver.\n", __func__); 6476 goto out_free_iocbq; 6477 } 6478 iocbq_entry->sli4_lxritag = NO_XRI; 6479 iocbq_entry->sli4_xritag = NO_XRI; 6480 6481 spin_lock_irq(&phba->hbalock); 6482 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6483 phba->total_iocbq_bufs++; 6484 spin_unlock_irq(&phba->hbalock); 6485 } 6486 6487 return 0; 6488 6489 out_free_iocbq: 6490 lpfc_free_iocb_list(phba); 6491 6492 return -ENOMEM; 6493 } 6494 6495 /** 6496 * lpfc_free_sgl_list - Free a given sgl list. 6497 * @phba: pointer to lpfc hba data structure. 6498 * @sglq_list: pointer to the head of sgl list. 6499 * 6500 * This routine is invoked to free a give sgl list and memory. 6501 **/ 6502 void 6503 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 6504 { 6505 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6506 6507 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 6508 list_del(&sglq_entry->list); 6509 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 6510 kfree(sglq_entry); 6511 } 6512 } 6513 6514 /** 6515 * lpfc_free_els_sgl_list - Free els sgl list. 6516 * @phba: pointer to lpfc hba data structure. 6517 * 6518 * This routine is invoked to free the driver's els sgl list and memory. 6519 **/ 6520 static void 6521 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 6522 { 6523 LIST_HEAD(sglq_list); 6524 6525 /* Retrieve all els sgls from driver list */ 6526 spin_lock_irq(&phba->hbalock); 6527 spin_lock(&phba->sli4_hba.sgl_list_lock); 6528 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 6529 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6530 spin_unlock_irq(&phba->hbalock); 6531 6532 /* Now free the sgl list */ 6533 lpfc_free_sgl_list(phba, &sglq_list); 6534 } 6535 6536 /** 6537 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 6538 * @phba: pointer to lpfc hba data structure. 6539 * 6540 * This routine is invoked to free the driver's nvmet sgl list and memory. 6541 **/ 6542 static void 6543 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 6544 { 6545 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6546 LIST_HEAD(sglq_list); 6547 6548 /* Retrieve all nvmet sgls from driver list */ 6549 spin_lock_irq(&phba->hbalock); 6550 spin_lock(&phba->sli4_hba.sgl_list_lock); 6551 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 6552 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6553 spin_unlock_irq(&phba->hbalock); 6554 6555 /* Now free the sgl list */ 6556 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 6557 list_del(&sglq_entry->list); 6558 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 6559 kfree(sglq_entry); 6560 } 6561 6562 /* Update the nvmet_xri_cnt to reflect no current sgls. 6563 * The next initialization cycle sets the count and allocates 6564 * the sgls over again. 6565 */ 6566 phba->sli4_hba.nvmet_xri_cnt = 0; 6567 } 6568 6569 /** 6570 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 6571 * @phba: pointer to lpfc hba data structure. 6572 * 6573 * This routine is invoked to allocate the driver's active sgl memory. 6574 * This array will hold the sglq_entry's for active IOs. 6575 **/ 6576 static int 6577 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 6578 { 6579 int size; 6580 size = sizeof(struct lpfc_sglq *); 6581 size *= phba->sli4_hba.max_cfg_param.max_xri; 6582 6583 phba->sli4_hba.lpfc_sglq_active_list = 6584 kzalloc(size, GFP_KERNEL); 6585 if (!phba->sli4_hba.lpfc_sglq_active_list) 6586 return -ENOMEM; 6587 return 0; 6588 } 6589 6590 /** 6591 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 6592 * @phba: pointer to lpfc hba data structure. 6593 * 6594 * This routine is invoked to walk through the array of active sglq entries 6595 * and free all of the resources. 6596 * This is just a place holder for now. 6597 **/ 6598 static void 6599 lpfc_free_active_sgl(struct lpfc_hba *phba) 6600 { 6601 kfree(phba->sli4_hba.lpfc_sglq_active_list); 6602 } 6603 6604 /** 6605 * lpfc_init_sgl_list - Allocate and initialize sgl list. 6606 * @phba: pointer to lpfc hba data structure. 6607 * 6608 * This routine is invoked to allocate and initizlize the driver's sgl 6609 * list and set up the sgl xritag tag array accordingly. 6610 * 6611 **/ 6612 static void 6613 lpfc_init_sgl_list(struct lpfc_hba *phba) 6614 { 6615 /* Initialize and populate the sglq list per host/VF. */ 6616 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 6617 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 6618 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 6619 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6620 6621 /* els xri-sgl book keeping */ 6622 phba->sli4_hba.els_xri_cnt = 0; 6623 6624 /* scsi xri-buffer book keeping */ 6625 phba->sli4_hba.scsi_xri_cnt = 0; 6626 6627 /* nvme xri-buffer book keeping */ 6628 phba->sli4_hba.nvme_xri_cnt = 0; 6629 } 6630 6631 /** 6632 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 6633 * @phba: pointer to lpfc hba data structure. 6634 * 6635 * This routine is invoked to post rpi header templates to the 6636 * port for those SLI4 ports that do not support extents. This routine 6637 * posts a PAGE_SIZE memory region to the port to hold up to 6638 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 6639 * and should be called only when interrupts are disabled. 6640 * 6641 * Return codes 6642 * 0 - successful 6643 * -ERROR - otherwise. 6644 **/ 6645 int 6646 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 6647 { 6648 int rc = 0; 6649 struct lpfc_rpi_hdr *rpi_hdr; 6650 6651 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 6652 if (!phba->sli4_hba.rpi_hdrs_in_use) 6653 return rc; 6654 if (phba->sli4_hba.extents_in_use) 6655 return -EIO; 6656 6657 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 6658 if (!rpi_hdr) { 6659 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6660 "0391 Error during rpi post operation\n"); 6661 lpfc_sli4_remove_rpis(phba); 6662 rc = -ENODEV; 6663 } 6664 6665 return rc; 6666 } 6667 6668 /** 6669 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 6670 * @phba: pointer to lpfc hba data structure. 6671 * 6672 * This routine is invoked to allocate a single 4KB memory region to 6673 * support rpis and stores them in the phba. This single region 6674 * provides support for up to 64 rpis. The region is used globally 6675 * by the device. 6676 * 6677 * Returns: 6678 * A valid rpi hdr on success. 6679 * A NULL pointer on any failure. 6680 **/ 6681 struct lpfc_rpi_hdr * 6682 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 6683 { 6684 uint16_t rpi_limit, curr_rpi_range; 6685 struct lpfc_dmabuf *dmabuf; 6686 struct lpfc_rpi_hdr *rpi_hdr; 6687 6688 /* 6689 * If the SLI4 port supports extents, posting the rpi header isn't 6690 * required. Set the expected maximum count and let the actual value 6691 * get set when extents are fully allocated. 6692 */ 6693 if (!phba->sli4_hba.rpi_hdrs_in_use) 6694 return NULL; 6695 if (phba->sli4_hba.extents_in_use) 6696 return NULL; 6697 6698 /* The limit on the logical index is just the max_rpi count. */ 6699 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 6700 6701 spin_lock_irq(&phba->hbalock); 6702 /* 6703 * Establish the starting RPI in this header block. The starting 6704 * rpi is normalized to a zero base because the physical rpi is 6705 * port based. 6706 */ 6707 curr_rpi_range = phba->sli4_hba.next_rpi; 6708 spin_unlock_irq(&phba->hbalock); 6709 6710 /* Reached full RPI range */ 6711 if (curr_rpi_range == rpi_limit) 6712 return NULL; 6713 6714 /* 6715 * First allocate the protocol header region for the port. The 6716 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6717 */ 6718 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6719 if (!dmabuf) 6720 return NULL; 6721 6722 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6723 LPFC_HDR_TEMPLATE_SIZE, 6724 &dmabuf->phys, GFP_KERNEL); 6725 if (!dmabuf->virt) { 6726 rpi_hdr = NULL; 6727 goto err_free_dmabuf; 6728 } 6729 6730 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 6731 rpi_hdr = NULL; 6732 goto err_free_coherent; 6733 } 6734 6735 /* Save the rpi header data for cleanup later. */ 6736 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 6737 if (!rpi_hdr) 6738 goto err_free_coherent; 6739 6740 rpi_hdr->dmabuf = dmabuf; 6741 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6742 rpi_hdr->page_count = 1; 6743 spin_lock_irq(&phba->hbalock); 6744 6745 /* The rpi_hdr stores the logical index only. */ 6746 rpi_hdr->start_rpi = curr_rpi_range; 6747 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 6748 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6749 6750 spin_unlock_irq(&phba->hbalock); 6751 return rpi_hdr; 6752 6753 err_free_coherent: 6754 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6755 dmabuf->virt, dmabuf->phys); 6756 err_free_dmabuf: 6757 kfree(dmabuf); 6758 return NULL; 6759 } 6760 6761 /** 6762 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6763 * @phba: pointer to lpfc hba data structure. 6764 * 6765 * This routine is invoked to remove all memory resources allocated 6766 * to support rpis for SLI4 ports not supporting extents. This routine 6767 * presumes the caller has released all rpis consumed by fabric or port 6768 * logins and is prepared to have the header pages removed. 6769 **/ 6770 void 6771 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6772 { 6773 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6774 6775 if (!phba->sli4_hba.rpi_hdrs_in_use) 6776 goto exit; 6777 6778 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6779 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6780 list_del(&rpi_hdr->list); 6781 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6782 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6783 kfree(rpi_hdr->dmabuf); 6784 kfree(rpi_hdr); 6785 } 6786 exit: 6787 /* There are no rpis available to the port now. */ 6788 phba->sli4_hba.next_rpi = 0; 6789 } 6790 6791 /** 6792 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6793 * @pdev: pointer to pci device data structure. 6794 * 6795 * This routine is invoked to allocate the driver hba data structure for an 6796 * HBA device. If the allocation is successful, the phba reference to the 6797 * PCI device data structure is set. 6798 * 6799 * Return codes 6800 * pointer to @phba - successful 6801 * NULL - error 6802 **/ 6803 static struct lpfc_hba * 6804 lpfc_hba_alloc(struct pci_dev *pdev) 6805 { 6806 struct lpfc_hba *phba; 6807 6808 /* Allocate memory for HBA structure */ 6809 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6810 if (!phba) { 6811 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6812 return NULL; 6813 } 6814 6815 /* Set reference to PCI device in HBA structure */ 6816 phba->pcidev = pdev; 6817 6818 /* Assign an unused board number */ 6819 phba->brd_no = lpfc_get_instance(); 6820 if (phba->brd_no < 0) { 6821 kfree(phba); 6822 return NULL; 6823 } 6824 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 6825 6826 spin_lock_init(&phba->ct_ev_lock); 6827 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6828 6829 return phba; 6830 } 6831 6832 /** 6833 * lpfc_hba_free - Free driver hba data structure with a device. 6834 * @phba: pointer to lpfc hba data structure. 6835 * 6836 * This routine is invoked to free the driver hba data structure with an 6837 * HBA device. 6838 **/ 6839 static void 6840 lpfc_hba_free(struct lpfc_hba *phba) 6841 { 6842 /* Release the driver assigned board number */ 6843 idr_remove(&lpfc_hba_index, phba->brd_no); 6844 6845 /* Free memory allocated with sli3 rings */ 6846 kfree(phba->sli.sli3_ring); 6847 phba->sli.sli3_ring = NULL; 6848 6849 kfree(phba); 6850 return; 6851 } 6852 6853 /** 6854 * lpfc_create_shost - Create hba physical port with associated scsi host. 6855 * @phba: pointer to lpfc hba data structure. 6856 * 6857 * This routine is invoked to create HBA physical port and associate a SCSI 6858 * host with it. 6859 * 6860 * Return codes 6861 * 0 - successful 6862 * other values - error 6863 **/ 6864 static int 6865 lpfc_create_shost(struct lpfc_hba *phba) 6866 { 6867 struct lpfc_vport *vport; 6868 struct Scsi_Host *shost; 6869 6870 /* Initialize HBA FC structure */ 6871 phba->fc_edtov = FF_DEF_EDTOV; 6872 phba->fc_ratov = FF_DEF_RATOV; 6873 phba->fc_altov = FF_DEF_ALTOV; 6874 phba->fc_arbtov = FF_DEF_ARBTOV; 6875 6876 atomic_set(&phba->sdev_cnt, 0); 6877 atomic_set(&phba->fc4ScsiInputRequests, 0); 6878 atomic_set(&phba->fc4ScsiOutputRequests, 0); 6879 atomic_set(&phba->fc4ScsiControlRequests, 0); 6880 atomic_set(&phba->fc4ScsiIoCmpls, 0); 6881 atomic_set(&phba->fc4NvmeInputRequests, 0); 6882 atomic_set(&phba->fc4NvmeOutputRequests, 0); 6883 atomic_set(&phba->fc4NvmeControlRequests, 0); 6884 atomic_set(&phba->fc4NvmeIoCmpls, 0); 6885 atomic_set(&phba->fc4NvmeLsRequests, 0); 6886 atomic_set(&phba->fc4NvmeLsCmpls, 0); 6887 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6888 if (!vport) 6889 return -ENODEV; 6890 6891 shost = lpfc_shost_from_vport(vport); 6892 phba->pport = vport; 6893 6894 if (phba->nvmet_support) { 6895 /* Only 1 vport (pport) will support NVME target */ 6896 if (phba->txrdy_payload_pool == NULL) { 6897 phba->txrdy_payload_pool = dma_pool_create( 6898 "txrdy_pool", &phba->pcidev->dev, 6899 TXRDY_PAYLOAD_LEN, 16, 0); 6900 if (phba->txrdy_payload_pool) { 6901 phba->targetport = NULL; 6902 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 6903 lpfc_printf_log(phba, KERN_INFO, 6904 LOG_INIT | LOG_NVME_DISC, 6905 "6076 NVME Target Found\n"); 6906 } 6907 } 6908 } 6909 6910 lpfc_debugfs_initialize(vport); 6911 /* Put reference to SCSI host to driver's device private data */ 6912 pci_set_drvdata(phba->pcidev, shost); 6913 6914 /* 6915 * At this point we are fully registered with PSA. In addition, 6916 * any initial discovery should be completed. 6917 */ 6918 vport->load_flag |= FC_ALLOW_FDMI; 6919 if (phba->cfg_enable_SmartSAN || 6920 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6921 6922 /* Setup appropriate attribute masks */ 6923 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6924 if (phba->cfg_enable_SmartSAN) 6925 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6926 else 6927 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 6928 } 6929 return 0; 6930 } 6931 6932 /** 6933 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6934 * @phba: pointer to lpfc hba data structure. 6935 * 6936 * This routine is invoked to destroy HBA physical port and the associated 6937 * SCSI host. 6938 **/ 6939 static void 6940 lpfc_destroy_shost(struct lpfc_hba *phba) 6941 { 6942 struct lpfc_vport *vport = phba->pport; 6943 6944 /* Destroy physical port that associated with the SCSI host */ 6945 destroy_port(vport); 6946 6947 return; 6948 } 6949 6950 /** 6951 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6952 * @phba: pointer to lpfc hba data structure. 6953 * @shost: the shost to be used to detect Block guard settings. 6954 * 6955 * This routine sets up the local Block guard protocol settings for @shost. 6956 * This routine also allocates memory for debugging bg buffers. 6957 **/ 6958 static void 6959 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6960 { 6961 uint32_t old_mask; 6962 uint32_t old_guard; 6963 6964 int pagecnt = 10; 6965 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6966 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6967 "1478 Registering BlockGuard with the " 6968 "SCSI layer\n"); 6969 6970 old_mask = phba->cfg_prot_mask; 6971 old_guard = phba->cfg_prot_guard; 6972 6973 /* Only allow supported values */ 6974 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6975 SHOST_DIX_TYPE0_PROTECTION | 6976 SHOST_DIX_TYPE1_PROTECTION); 6977 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 6978 SHOST_DIX_GUARD_CRC); 6979 6980 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6981 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6982 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6983 6984 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6985 if ((old_mask != phba->cfg_prot_mask) || 6986 (old_guard != phba->cfg_prot_guard)) 6987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6988 "1475 Registering BlockGuard with the " 6989 "SCSI layer: mask %d guard %d\n", 6990 phba->cfg_prot_mask, 6991 phba->cfg_prot_guard); 6992 6993 scsi_host_set_prot(shost, phba->cfg_prot_mask); 6994 scsi_host_set_guard(shost, phba->cfg_prot_guard); 6995 } else 6996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6997 "1479 Not Registering BlockGuard with the SCSI " 6998 "layer, Bad protection parameters: %d %d\n", 6999 old_mask, old_guard); 7000 } 7001 7002 if (!_dump_buf_data) { 7003 while (pagecnt) { 7004 spin_lock_init(&_dump_buf_lock); 7005 _dump_buf_data = 7006 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7007 if (_dump_buf_data) { 7008 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7009 "9043 BLKGRD: allocated %d pages for " 7010 "_dump_buf_data at 0x%p\n", 7011 (1 << pagecnt), _dump_buf_data); 7012 _dump_buf_data_order = pagecnt; 7013 memset(_dump_buf_data, 0, 7014 ((1 << PAGE_SHIFT) << pagecnt)); 7015 break; 7016 } else 7017 --pagecnt; 7018 } 7019 if (!_dump_buf_data_order) 7020 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7021 "9044 BLKGRD: ERROR unable to allocate " 7022 "memory for hexdump\n"); 7023 } else 7024 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7025 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 7026 "\n", _dump_buf_data); 7027 if (!_dump_buf_dif) { 7028 while (pagecnt) { 7029 _dump_buf_dif = 7030 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7031 if (_dump_buf_dif) { 7032 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7033 "9046 BLKGRD: allocated %d pages for " 7034 "_dump_buf_dif at 0x%p\n", 7035 (1 << pagecnt), _dump_buf_dif); 7036 _dump_buf_dif_order = pagecnt; 7037 memset(_dump_buf_dif, 0, 7038 ((1 << PAGE_SHIFT) << pagecnt)); 7039 break; 7040 } else 7041 --pagecnt; 7042 } 7043 if (!_dump_buf_dif_order) 7044 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7045 "9047 BLKGRD: ERROR unable to allocate " 7046 "memory for hexdump\n"); 7047 } else 7048 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7049 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 7050 _dump_buf_dif); 7051 } 7052 7053 /** 7054 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7055 * @phba: pointer to lpfc hba data structure. 7056 * 7057 * This routine is invoked to perform all the necessary post initialization 7058 * setup for the device. 7059 **/ 7060 static void 7061 lpfc_post_init_setup(struct lpfc_hba *phba) 7062 { 7063 struct Scsi_Host *shost; 7064 struct lpfc_adapter_event_header adapter_event; 7065 7066 /* Get the default values for Model Name and Description */ 7067 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7068 7069 /* 7070 * hba setup may have changed the hba_queue_depth so we need to 7071 * adjust the value of can_queue. 7072 */ 7073 shost = pci_get_drvdata(phba->pcidev); 7074 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7075 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 7076 lpfc_setup_bg(phba, shost); 7077 7078 lpfc_host_attrib_init(shost); 7079 7080 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7081 spin_lock_irq(shost->host_lock); 7082 lpfc_poll_start_timer(phba); 7083 spin_unlock_irq(shost->host_lock); 7084 } 7085 7086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7087 "0428 Perform SCSI scan\n"); 7088 /* Send board arrival event to upper layer */ 7089 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7090 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7091 fc_host_post_vendor_event(shost, fc_get_event_number(), 7092 sizeof(adapter_event), 7093 (char *) &adapter_event, 7094 LPFC_NL_VENDOR_ID); 7095 return; 7096 } 7097 7098 /** 7099 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7100 * @phba: pointer to lpfc hba data structure. 7101 * 7102 * This routine is invoked to set up the PCI device memory space for device 7103 * with SLI-3 interface spec. 7104 * 7105 * Return codes 7106 * 0 - successful 7107 * other values - error 7108 **/ 7109 static int 7110 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7111 { 7112 struct pci_dev *pdev; 7113 unsigned long bar0map_len, bar2map_len; 7114 int i, hbq_count; 7115 void *ptr; 7116 int error = -ENODEV; 7117 7118 /* Obtain PCI device reference */ 7119 if (!phba->pcidev) 7120 return error; 7121 else 7122 pdev = phba->pcidev; 7123 7124 /* Set the device DMA mask size */ 7125 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7126 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7127 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7128 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7129 return error; 7130 } 7131 } 7132 7133 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7134 * required by each mapping. 7135 */ 7136 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7137 bar0map_len = pci_resource_len(pdev, 0); 7138 7139 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7140 bar2map_len = pci_resource_len(pdev, 2); 7141 7142 /* Map HBA SLIM to a kernel virtual address. */ 7143 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7144 if (!phba->slim_memmap_p) { 7145 dev_printk(KERN_ERR, &pdev->dev, 7146 "ioremap failed for SLIM memory.\n"); 7147 goto out; 7148 } 7149 7150 /* Map HBA Control Registers to a kernel virtual address. */ 7151 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7152 if (!phba->ctrl_regs_memmap_p) { 7153 dev_printk(KERN_ERR, &pdev->dev, 7154 "ioremap failed for HBA control registers.\n"); 7155 goto out_iounmap_slim; 7156 } 7157 7158 /* Allocate memory for SLI-2 structures */ 7159 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7160 &phba->slim2p.phys, GFP_KERNEL); 7161 if (!phba->slim2p.virt) 7162 goto out_iounmap; 7163 7164 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7165 phba->mbox_ext = (phba->slim2p.virt + 7166 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7167 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7168 phba->IOCBs = (phba->slim2p.virt + 7169 offsetof(struct lpfc_sli2_slim, IOCBs)); 7170 7171 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7172 lpfc_sli_hbq_size(), 7173 &phba->hbqslimp.phys, 7174 GFP_KERNEL); 7175 if (!phba->hbqslimp.virt) 7176 goto out_free_slim; 7177 7178 hbq_count = lpfc_sli_hbq_count(); 7179 ptr = phba->hbqslimp.virt; 7180 for (i = 0; i < hbq_count; ++i) { 7181 phba->hbqs[i].hbq_virt = ptr; 7182 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7183 ptr += (lpfc_hbq_defs[i]->entry_count * 7184 sizeof(struct lpfc_hbq_entry)); 7185 } 7186 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7187 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7188 7189 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7190 7191 phba->MBslimaddr = phba->slim_memmap_p; 7192 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7193 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7194 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7195 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7196 7197 return 0; 7198 7199 out_free_slim: 7200 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7201 phba->slim2p.virt, phba->slim2p.phys); 7202 out_iounmap: 7203 iounmap(phba->ctrl_regs_memmap_p); 7204 out_iounmap_slim: 7205 iounmap(phba->slim_memmap_p); 7206 out: 7207 return error; 7208 } 7209 7210 /** 7211 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7212 * @phba: pointer to lpfc hba data structure. 7213 * 7214 * This routine is invoked to unset the PCI device memory space for device 7215 * with SLI-3 interface spec. 7216 **/ 7217 static void 7218 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7219 { 7220 struct pci_dev *pdev; 7221 7222 /* Obtain PCI device reference */ 7223 if (!phba->pcidev) 7224 return; 7225 else 7226 pdev = phba->pcidev; 7227 7228 /* Free coherent DMA memory allocated */ 7229 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7230 phba->hbqslimp.virt, phba->hbqslimp.phys); 7231 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7232 phba->slim2p.virt, phba->slim2p.phys); 7233 7234 /* I/O memory unmap */ 7235 iounmap(phba->ctrl_regs_memmap_p); 7236 iounmap(phba->slim_memmap_p); 7237 7238 return; 7239 } 7240 7241 /** 7242 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7243 * @phba: pointer to lpfc hba data structure. 7244 * 7245 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7246 * done and check status. 7247 * 7248 * Return 0 if successful, otherwise -ENODEV. 7249 **/ 7250 int 7251 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7252 { 7253 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7254 struct lpfc_register reg_data; 7255 int i, port_error = 0; 7256 uint32_t if_type; 7257 7258 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7259 memset(®_data, 0, sizeof(reg_data)); 7260 if (!phba->sli4_hba.PSMPHRregaddr) 7261 return -ENODEV; 7262 7263 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7264 for (i = 0; i < 3000; i++) { 7265 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7266 &portsmphr_reg.word0) || 7267 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7268 /* Port has a fatal POST error, break out */ 7269 port_error = -ENODEV; 7270 break; 7271 } 7272 if (LPFC_POST_STAGE_PORT_READY == 7273 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7274 break; 7275 msleep(10); 7276 } 7277 7278 /* 7279 * If there was a port error during POST, then don't proceed with 7280 * other register reads as the data may not be valid. Just exit. 7281 */ 7282 if (port_error) { 7283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7284 "1408 Port Failed POST - portsmphr=0x%x, " 7285 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7286 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7287 portsmphr_reg.word0, 7288 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7289 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7290 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7291 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7292 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7293 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7294 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7295 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7296 } else { 7297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7298 "2534 Device Info: SLIFamily=0x%x, " 7299 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7300 "SLIHint_2=0x%x, FT=0x%x\n", 7301 bf_get(lpfc_sli_intf_sli_family, 7302 &phba->sli4_hba.sli_intf), 7303 bf_get(lpfc_sli_intf_slirev, 7304 &phba->sli4_hba.sli_intf), 7305 bf_get(lpfc_sli_intf_if_type, 7306 &phba->sli4_hba.sli_intf), 7307 bf_get(lpfc_sli_intf_sli_hint1, 7308 &phba->sli4_hba.sli_intf), 7309 bf_get(lpfc_sli_intf_sli_hint2, 7310 &phba->sli4_hba.sli_intf), 7311 bf_get(lpfc_sli_intf_func_type, 7312 &phba->sli4_hba.sli_intf)); 7313 /* 7314 * Check for other Port errors during the initialization 7315 * process. Fail the load if the port did not come up 7316 * correctly. 7317 */ 7318 if_type = bf_get(lpfc_sli_intf_if_type, 7319 &phba->sli4_hba.sli_intf); 7320 switch (if_type) { 7321 case LPFC_SLI_INTF_IF_TYPE_0: 7322 phba->sli4_hba.ue_mask_lo = 7323 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7324 phba->sli4_hba.ue_mask_hi = 7325 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7326 uerrlo_reg.word0 = 7327 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7328 uerrhi_reg.word0 = 7329 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7330 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7331 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7333 "1422 Unrecoverable Error " 7334 "Detected during POST " 7335 "uerr_lo_reg=0x%x, " 7336 "uerr_hi_reg=0x%x, " 7337 "ue_mask_lo_reg=0x%x, " 7338 "ue_mask_hi_reg=0x%x\n", 7339 uerrlo_reg.word0, 7340 uerrhi_reg.word0, 7341 phba->sli4_hba.ue_mask_lo, 7342 phba->sli4_hba.ue_mask_hi); 7343 port_error = -ENODEV; 7344 } 7345 break; 7346 case LPFC_SLI_INTF_IF_TYPE_2: 7347 /* Final checks. The port status should be clean. */ 7348 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7349 ®_data.word0) || 7350 (bf_get(lpfc_sliport_status_err, ®_data) && 7351 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7352 phba->work_status[0] = 7353 readl(phba->sli4_hba.u.if_type2. 7354 ERR1regaddr); 7355 phba->work_status[1] = 7356 readl(phba->sli4_hba.u.if_type2. 7357 ERR2regaddr); 7358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7359 "2888 Unrecoverable port error " 7360 "following POST: port status reg " 7361 "0x%x, port_smphr reg 0x%x, " 7362 "error 1=0x%x, error 2=0x%x\n", 7363 reg_data.word0, 7364 portsmphr_reg.word0, 7365 phba->work_status[0], 7366 phba->work_status[1]); 7367 port_error = -ENODEV; 7368 } 7369 break; 7370 case LPFC_SLI_INTF_IF_TYPE_1: 7371 default: 7372 break; 7373 } 7374 } 7375 return port_error; 7376 } 7377 7378 /** 7379 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7380 * @phba: pointer to lpfc hba data structure. 7381 * @if_type: The SLI4 interface type getting configured. 7382 * 7383 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7384 * memory map. 7385 **/ 7386 static void 7387 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7388 { 7389 switch (if_type) { 7390 case LPFC_SLI_INTF_IF_TYPE_0: 7391 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7392 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7393 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7394 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7395 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7396 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7397 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7398 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7399 phba->sli4_hba.SLIINTFregaddr = 7400 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7401 break; 7402 case LPFC_SLI_INTF_IF_TYPE_2: 7403 phba->sli4_hba.u.if_type2.EQDregaddr = 7404 phba->sli4_hba.conf_regs_memmap_p + 7405 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 7406 phba->sli4_hba.u.if_type2.ERR1regaddr = 7407 phba->sli4_hba.conf_regs_memmap_p + 7408 LPFC_CTL_PORT_ER1_OFFSET; 7409 phba->sli4_hba.u.if_type2.ERR2regaddr = 7410 phba->sli4_hba.conf_regs_memmap_p + 7411 LPFC_CTL_PORT_ER2_OFFSET; 7412 phba->sli4_hba.u.if_type2.CTRLregaddr = 7413 phba->sli4_hba.conf_regs_memmap_p + 7414 LPFC_CTL_PORT_CTL_OFFSET; 7415 phba->sli4_hba.u.if_type2.STATUSregaddr = 7416 phba->sli4_hba.conf_regs_memmap_p + 7417 LPFC_CTL_PORT_STA_OFFSET; 7418 phba->sli4_hba.SLIINTFregaddr = 7419 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7420 phba->sli4_hba.PSMPHRregaddr = 7421 phba->sli4_hba.conf_regs_memmap_p + 7422 LPFC_CTL_PORT_SEM_OFFSET; 7423 phba->sli4_hba.RQDBregaddr = 7424 phba->sli4_hba.conf_regs_memmap_p + 7425 LPFC_ULP0_RQ_DOORBELL; 7426 phba->sli4_hba.WQDBregaddr = 7427 phba->sli4_hba.conf_regs_memmap_p + 7428 LPFC_ULP0_WQ_DOORBELL; 7429 phba->sli4_hba.EQCQDBregaddr = 7430 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 7431 phba->sli4_hba.MQDBregaddr = 7432 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 7433 phba->sli4_hba.BMBXregaddr = 7434 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 7435 break; 7436 case LPFC_SLI_INTF_IF_TYPE_1: 7437 default: 7438 dev_printk(KERN_ERR, &phba->pcidev->dev, 7439 "FATAL - unsupported SLI4 interface type - %d\n", 7440 if_type); 7441 break; 7442 } 7443 } 7444 7445 /** 7446 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 7447 * @phba: pointer to lpfc hba data structure. 7448 * 7449 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 7450 * memory map. 7451 **/ 7452 static void 7453 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 7454 { 7455 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7456 LPFC_SLIPORT_IF0_SMPHR; 7457 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7458 LPFC_HST_ISR0; 7459 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7460 LPFC_HST_IMR0; 7461 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 7462 LPFC_HST_ISCR0; 7463 } 7464 7465 /** 7466 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 7467 * @phba: pointer to lpfc hba data structure. 7468 * @vf: virtual function number 7469 * 7470 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 7471 * based on the given viftual function number, @vf. 7472 * 7473 * Return 0 if successful, otherwise -ENODEV. 7474 **/ 7475 static int 7476 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 7477 { 7478 if (vf > LPFC_VIR_FUNC_MAX) 7479 return -ENODEV; 7480 7481 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7482 vf * LPFC_VFR_PAGE_SIZE + 7483 LPFC_ULP0_RQ_DOORBELL); 7484 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7485 vf * LPFC_VFR_PAGE_SIZE + 7486 LPFC_ULP0_WQ_DOORBELL); 7487 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7488 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 7489 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7490 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 7491 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 7492 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 7493 return 0; 7494 } 7495 7496 /** 7497 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 7498 * @phba: pointer to lpfc hba data structure. 7499 * 7500 * This routine is invoked to create the bootstrap mailbox 7501 * region consistent with the SLI-4 interface spec. This 7502 * routine allocates all memory necessary to communicate 7503 * mailbox commands to the port and sets up all alignment 7504 * needs. No locks are expected to be held when calling 7505 * this routine. 7506 * 7507 * Return codes 7508 * 0 - successful 7509 * -ENOMEM - could not allocated memory. 7510 **/ 7511 static int 7512 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 7513 { 7514 uint32_t bmbx_size; 7515 struct lpfc_dmabuf *dmabuf; 7516 struct dma_address *dma_address; 7517 uint32_t pa_addr; 7518 uint64_t phys_addr; 7519 7520 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7521 if (!dmabuf) 7522 return -ENOMEM; 7523 7524 /* 7525 * The bootstrap mailbox region is comprised of 2 parts 7526 * plus an alignment restriction of 16 bytes. 7527 */ 7528 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7529 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7530 &dmabuf->phys, GFP_KERNEL); 7531 if (!dmabuf->virt) { 7532 kfree(dmabuf); 7533 return -ENOMEM; 7534 } 7535 7536 /* 7537 * Initialize the bootstrap mailbox pointers now so that the register 7538 * operations are simple later. The mailbox dma address is required 7539 * to be 16-byte aligned. Also align the virtual memory as each 7540 * maibox is copied into the bmbx mailbox region before issuing the 7541 * command to the port. 7542 */ 7543 phba->sli4_hba.bmbx.dmabuf = dmabuf; 7544 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 7545 7546 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 7547 LPFC_ALIGN_16_BYTE); 7548 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 7549 LPFC_ALIGN_16_BYTE); 7550 7551 /* 7552 * Set the high and low physical addresses now. The SLI4 alignment 7553 * requirement is 16 bytes and the mailbox is posted to the port 7554 * as two 30-bit addresses. The other data is a bit marking whether 7555 * the 30-bit address is the high or low address. 7556 * Upcast bmbx aphys to 64bits so shift instruction compiles 7557 * clean on 32 bit machines. 7558 */ 7559 dma_address = &phba->sli4_hba.bmbx.dma_address; 7560 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 7561 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 7562 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 7563 LPFC_BMBX_BIT1_ADDR_HI); 7564 7565 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 7566 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 7567 LPFC_BMBX_BIT1_ADDR_LO); 7568 return 0; 7569 } 7570 7571 /** 7572 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 7573 * @phba: pointer to lpfc hba data structure. 7574 * 7575 * This routine is invoked to teardown the bootstrap mailbox 7576 * region and release all host resources. This routine requires 7577 * the caller to ensure all mailbox commands recovered, no 7578 * additional mailbox comands are sent, and interrupts are disabled 7579 * before calling this routine. 7580 * 7581 **/ 7582 static void 7583 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 7584 { 7585 dma_free_coherent(&phba->pcidev->dev, 7586 phba->sli4_hba.bmbx.bmbx_size, 7587 phba->sli4_hba.bmbx.dmabuf->virt, 7588 phba->sli4_hba.bmbx.dmabuf->phys); 7589 7590 kfree(phba->sli4_hba.bmbx.dmabuf); 7591 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 7592 } 7593 7594 /** 7595 * lpfc_sli4_read_config - Get the config parameters. 7596 * @phba: pointer to lpfc hba data structure. 7597 * 7598 * This routine is invoked to read the configuration parameters from the HBA. 7599 * The configuration parameters are used to set the base and maximum values 7600 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 7601 * allocation for the port. 7602 * 7603 * Return codes 7604 * 0 - successful 7605 * -ENOMEM - No available memory 7606 * -EIO - The mailbox failed to complete successfully. 7607 **/ 7608 int 7609 lpfc_sli4_read_config(struct lpfc_hba *phba) 7610 { 7611 LPFC_MBOXQ_t *pmb; 7612 struct lpfc_mbx_read_config *rd_config; 7613 union lpfc_sli4_cfg_shdr *shdr; 7614 uint32_t shdr_status, shdr_add_status; 7615 struct lpfc_mbx_get_func_cfg *get_func_cfg; 7616 struct lpfc_rsrc_desc_fcfcoe *desc; 7617 char *pdesc_0; 7618 uint16_t forced_link_speed; 7619 uint32_t if_type; 7620 int length, i, rc = 0, rc2; 7621 7622 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7623 if (!pmb) { 7624 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7625 "2011 Unable to allocate memory for issuing " 7626 "SLI_CONFIG_SPECIAL mailbox command\n"); 7627 return -ENOMEM; 7628 } 7629 7630 lpfc_read_config(phba, pmb); 7631 7632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7633 if (rc != MBX_SUCCESS) { 7634 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7635 "2012 Mailbox failed , mbxCmd x%x " 7636 "READ_CONFIG, mbxStatus x%x\n", 7637 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7638 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7639 rc = -EIO; 7640 } else { 7641 rd_config = &pmb->u.mqe.un.rd_config; 7642 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 7643 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 7644 phba->sli4_hba.lnk_info.lnk_tp = 7645 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 7646 phba->sli4_hba.lnk_info.lnk_no = 7647 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 7648 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7649 "3081 lnk_type:%d, lnk_numb:%d\n", 7650 phba->sli4_hba.lnk_info.lnk_tp, 7651 phba->sli4_hba.lnk_info.lnk_no); 7652 } else 7653 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7654 "3082 Mailbox (x%x) returned ldv:x0\n", 7655 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 7656 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 7657 phba->bbcredit_support = 1; 7658 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 7659 } 7660 7661 phba->sli4_hba.extents_in_use = 7662 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 7663 phba->sli4_hba.max_cfg_param.max_xri = 7664 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 7665 phba->sli4_hba.max_cfg_param.xri_base = 7666 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 7667 phba->sli4_hba.max_cfg_param.max_vpi = 7668 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 7669 phba->sli4_hba.max_cfg_param.vpi_base = 7670 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 7671 phba->sli4_hba.max_cfg_param.max_rpi = 7672 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 7673 phba->sli4_hba.max_cfg_param.rpi_base = 7674 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 7675 phba->sli4_hba.max_cfg_param.max_vfi = 7676 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 7677 phba->sli4_hba.max_cfg_param.vfi_base = 7678 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 7679 phba->sli4_hba.max_cfg_param.max_fcfi = 7680 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 7681 phba->sli4_hba.max_cfg_param.max_eq = 7682 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 7683 phba->sli4_hba.max_cfg_param.max_rq = 7684 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 7685 phba->sli4_hba.max_cfg_param.max_wq = 7686 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 7687 phba->sli4_hba.max_cfg_param.max_cq = 7688 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 7689 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 7690 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 7691 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 7692 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 7693 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 7694 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 7695 phba->max_vports = phba->max_vpi; 7696 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7697 "2003 cfg params Extents? %d " 7698 "XRI(B:%d M:%d), " 7699 "VPI(B:%d M:%d) " 7700 "VFI(B:%d M:%d) " 7701 "RPI(B:%d M:%d) " 7702 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 7703 phba->sli4_hba.extents_in_use, 7704 phba->sli4_hba.max_cfg_param.xri_base, 7705 phba->sli4_hba.max_cfg_param.max_xri, 7706 phba->sli4_hba.max_cfg_param.vpi_base, 7707 phba->sli4_hba.max_cfg_param.max_vpi, 7708 phba->sli4_hba.max_cfg_param.vfi_base, 7709 phba->sli4_hba.max_cfg_param.max_vfi, 7710 phba->sli4_hba.max_cfg_param.rpi_base, 7711 phba->sli4_hba.max_cfg_param.max_rpi, 7712 phba->sli4_hba.max_cfg_param.max_fcfi, 7713 phba->sli4_hba.max_cfg_param.max_eq, 7714 phba->sli4_hba.max_cfg_param.max_cq, 7715 phba->sli4_hba.max_cfg_param.max_wq, 7716 phba->sli4_hba.max_cfg_param.max_rq); 7717 7718 } 7719 7720 if (rc) 7721 goto read_cfg_out; 7722 7723 /* Update link speed if forced link speed is supported */ 7724 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7725 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7726 forced_link_speed = 7727 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 7728 if (forced_link_speed) { 7729 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 7730 7731 switch (forced_link_speed) { 7732 case LINK_SPEED_1G: 7733 phba->cfg_link_speed = 7734 LPFC_USER_LINK_SPEED_1G; 7735 break; 7736 case LINK_SPEED_2G: 7737 phba->cfg_link_speed = 7738 LPFC_USER_LINK_SPEED_2G; 7739 break; 7740 case LINK_SPEED_4G: 7741 phba->cfg_link_speed = 7742 LPFC_USER_LINK_SPEED_4G; 7743 break; 7744 case LINK_SPEED_8G: 7745 phba->cfg_link_speed = 7746 LPFC_USER_LINK_SPEED_8G; 7747 break; 7748 case LINK_SPEED_10G: 7749 phba->cfg_link_speed = 7750 LPFC_USER_LINK_SPEED_10G; 7751 break; 7752 case LINK_SPEED_16G: 7753 phba->cfg_link_speed = 7754 LPFC_USER_LINK_SPEED_16G; 7755 break; 7756 case LINK_SPEED_32G: 7757 phba->cfg_link_speed = 7758 LPFC_USER_LINK_SPEED_32G; 7759 break; 7760 case 0xffff: 7761 phba->cfg_link_speed = 7762 LPFC_USER_LINK_SPEED_AUTO; 7763 break; 7764 default: 7765 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7766 "0047 Unrecognized link " 7767 "speed : %d\n", 7768 forced_link_speed); 7769 phba->cfg_link_speed = 7770 LPFC_USER_LINK_SPEED_AUTO; 7771 } 7772 } 7773 } 7774 7775 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7776 length = phba->sli4_hba.max_cfg_param.max_xri - 7777 lpfc_sli4_get_els_iocb_cnt(phba); 7778 if (phba->cfg_hba_queue_depth > length) { 7779 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7780 "3361 HBA queue depth changed from %d to %d\n", 7781 phba->cfg_hba_queue_depth, length); 7782 phba->cfg_hba_queue_depth = length; 7783 } 7784 7785 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 7786 LPFC_SLI_INTF_IF_TYPE_2) 7787 goto read_cfg_out; 7788 7789 /* get the pf# and vf# for SLI4 if_type 2 port */ 7790 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 7791 sizeof(struct lpfc_sli4_cfg_mhdr)); 7792 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 7793 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 7794 length, LPFC_SLI4_MBX_EMBED); 7795 7796 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7797 shdr = (union lpfc_sli4_cfg_shdr *) 7798 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7799 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7800 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7801 if (rc2 || shdr_status || shdr_add_status) { 7802 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7803 "3026 Mailbox failed , mbxCmd x%x " 7804 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 7805 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7806 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7807 goto read_cfg_out; 7808 } 7809 7810 /* search for fc_fcoe resrouce descriptor */ 7811 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 7812 7813 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 7814 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 7815 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 7816 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 7817 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 7818 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 7819 goto read_cfg_out; 7820 7821 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 7822 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 7823 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 7824 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 7825 phba->sli4_hba.iov.pf_number = 7826 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 7827 phba->sli4_hba.iov.vf_number = 7828 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 7829 break; 7830 } 7831 } 7832 7833 if (i < LPFC_RSRC_DESC_MAX_NUM) 7834 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7835 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 7836 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 7837 phba->sli4_hba.iov.vf_number); 7838 else 7839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7840 "3028 GET_FUNCTION_CONFIG: failed to find " 7841 "Resrouce Descriptor:x%x\n", 7842 LPFC_RSRC_DESC_TYPE_FCFCOE); 7843 7844 read_cfg_out: 7845 mempool_free(pmb, phba->mbox_mem_pool); 7846 return rc; 7847 } 7848 7849 /** 7850 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7851 * @phba: pointer to lpfc hba data structure. 7852 * 7853 * This routine is invoked to setup the port-side endian order when 7854 * the port if_type is 0. This routine has no function for other 7855 * if_types. 7856 * 7857 * Return codes 7858 * 0 - successful 7859 * -ENOMEM - No available memory 7860 * -EIO - The mailbox failed to complete successfully. 7861 **/ 7862 static int 7863 lpfc_setup_endian_order(struct lpfc_hba *phba) 7864 { 7865 LPFC_MBOXQ_t *mboxq; 7866 uint32_t if_type, rc = 0; 7867 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7868 HOST_ENDIAN_HIGH_WORD1}; 7869 7870 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7871 switch (if_type) { 7872 case LPFC_SLI_INTF_IF_TYPE_0: 7873 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7874 GFP_KERNEL); 7875 if (!mboxq) { 7876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7877 "0492 Unable to allocate memory for " 7878 "issuing SLI_CONFIG_SPECIAL mailbox " 7879 "command\n"); 7880 return -ENOMEM; 7881 } 7882 7883 /* 7884 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 7885 * two words to contain special data values and no other data. 7886 */ 7887 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 7888 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 7889 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7890 if (rc != MBX_SUCCESS) { 7891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7892 "0493 SLI_CONFIG_SPECIAL mailbox " 7893 "failed with status x%x\n", 7894 rc); 7895 rc = -EIO; 7896 } 7897 mempool_free(mboxq, phba->mbox_mem_pool); 7898 break; 7899 case LPFC_SLI_INTF_IF_TYPE_2: 7900 case LPFC_SLI_INTF_IF_TYPE_1: 7901 default: 7902 break; 7903 } 7904 return rc; 7905 } 7906 7907 /** 7908 * lpfc_sli4_queue_verify - Verify and update EQ counts 7909 * @phba: pointer to lpfc hba data structure. 7910 * 7911 * This routine is invoked to check the user settable queue counts for EQs. 7912 * After this routine is called the counts will be set to valid values that 7913 * adhere to the constraints of the system's interrupt vectors and the port's 7914 * queue resources. 7915 * 7916 * Return codes 7917 * 0 - successful 7918 * -ENOMEM - No available memory 7919 **/ 7920 static int 7921 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 7922 { 7923 int io_channel; 7924 int fof_vectors = phba->cfg_fof ? 1 : 0; 7925 7926 /* 7927 * Sanity check for configured queue parameters against the run-time 7928 * device parameters 7929 */ 7930 7931 /* Sanity check on HBA EQ parameters */ 7932 io_channel = phba->io_channel_irqs; 7933 7934 if (phba->sli4_hba.num_online_cpu < io_channel) { 7935 lpfc_printf_log(phba, 7936 KERN_ERR, LOG_INIT, 7937 "3188 Reducing IO channels to match number of " 7938 "online CPUs: from %d to %d\n", 7939 io_channel, phba->sli4_hba.num_online_cpu); 7940 io_channel = phba->sli4_hba.num_online_cpu; 7941 } 7942 7943 if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { 7944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7945 "2575 Reducing IO channels to match number of " 7946 "available EQs: from %d to %d\n", 7947 io_channel, 7948 phba->sli4_hba.max_cfg_param.max_eq); 7949 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; 7950 } 7951 7952 /* The actual number of FCP / NVME event queues adopted */ 7953 if (io_channel != phba->io_channel_irqs) 7954 phba->io_channel_irqs = io_channel; 7955 if (phba->cfg_fcp_io_channel > io_channel) 7956 phba->cfg_fcp_io_channel = io_channel; 7957 if (phba->cfg_nvme_io_channel > io_channel) 7958 phba->cfg_nvme_io_channel = io_channel; 7959 if (phba->nvmet_support) { 7960 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) 7961 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; 7962 } 7963 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 7964 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 7965 7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7967 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", 7968 phba->io_channel_irqs, phba->cfg_fcp_io_channel, 7969 phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); 7970 7971 /* Get EQ depth from module parameter, fake the default for now */ 7972 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7973 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7974 7975 /* Get CQ depth from module parameter, fake the default for now */ 7976 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7977 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7978 return 0; 7979 } 7980 7981 static int 7982 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 7983 { 7984 struct lpfc_queue *qdesc; 7985 7986 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 7987 phba->sli4_hba.cq_esize, 7988 LPFC_CQE_EXP_COUNT); 7989 if (!qdesc) { 7990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7991 "0508 Failed allocate fast-path NVME CQ (%d)\n", 7992 wqidx); 7993 return 1; 7994 } 7995 phba->sli4_hba.nvme_cq[wqidx] = qdesc; 7996 7997 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 7998 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); 7999 if (!qdesc) { 8000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8001 "0509 Failed allocate fast-path NVME WQ (%d)\n", 8002 wqidx); 8003 return 1; 8004 } 8005 phba->sli4_hba.nvme_wq[wqidx] = qdesc; 8006 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8007 return 0; 8008 } 8009 8010 static int 8011 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 8012 { 8013 struct lpfc_queue *qdesc; 8014 8015 /* Create Fast Path FCP CQs */ 8016 if (phba->fcp_embed_io) 8017 /* Increase the CQ size when WQEs contain an embedded cdb */ 8018 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8019 phba->sli4_hba.cq_esize, 8020 LPFC_CQE_EXP_COUNT); 8021 8022 else 8023 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8024 phba->sli4_hba.cq_esize, 8025 phba->sli4_hba.cq_ecount); 8026 if (!qdesc) { 8027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8028 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8029 return 1; 8030 } 8031 phba->sli4_hba.fcp_cq[wqidx] = qdesc; 8032 8033 /* Create Fast Path FCP WQs */ 8034 if (phba->fcp_embed_io) 8035 /* Increase the WQ size when WQEs contain an embedded cdb */ 8036 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8037 LPFC_WQE128_SIZE, 8038 LPFC_WQE_EXP_COUNT); 8039 else 8040 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8041 phba->sli4_hba.wq_esize, 8042 phba->sli4_hba.wq_ecount); 8043 if (!qdesc) { 8044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8045 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8046 wqidx); 8047 return 1; 8048 } 8049 phba->sli4_hba.fcp_wq[wqidx] = qdesc; 8050 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8051 return 0; 8052 } 8053 8054 /** 8055 * lpfc_sli4_queue_create - Create all the SLI4 queues 8056 * @phba: pointer to lpfc hba data structure. 8057 * 8058 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8059 * operation. For each SLI4 queue type, the parameters such as queue entry 8060 * count (queue depth) shall be taken from the module parameter. For now, 8061 * we just use some constant number as place holder. 8062 * 8063 * Return codes 8064 * 0 - successful 8065 * -ENOMEM - No availble memory 8066 * -EIO - The mailbox failed to complete successfully. 8067 **/ 8068 int 8069 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8070 { 8071 struct lpfc_queue *qdesc; 8072 int idx, io_channel; 8073 8074 /* 8075 * Create HBA Record arrays. 8076 * Both NVME and FCP will share that same vectors / EQs 8077 */ 8078 io_channel = phba->io_channel_irqs; 8079 if (!io_channel) 8080 return -ERANGE; 8081 8082 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8083 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8084 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8085 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8086 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8087 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8088 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8089 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8090 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8091 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8092 8093 phba->sli4_hba.hba_eq = kcalloc(io_channel, 8094 sizeof(struct lpfc_queue *), 8095 GFP_KERNEL); 8096 if (!phba->sli4_hba.hba_eq) { 8097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8098 "2576 Failed allocate memory for " 8099 "fast-path EQ record array\n"); 8100 goto out_error; 8101 } 8102 8103 if (phba->cfg_fcp_io_channel) { 8104 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, 8105 sizeof(struct lpfc_queue *), 8106 GFP_KERNEL); 8107 if (!phba->sli4_hba.fcp_cq) { 8108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8109 "2577 Failed allocate memory for " 8110 "fast-path CQ record array\n"); 8111 goto out_error; 8112 } 8113 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, 8114 sizeof(struct lpfc_queue *), 8115 GFP_KERNEL); 8116 if (!phba->sli4_hba.fcp_wq) { 8117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8118 "2578 Failed allocate memory for " 8119 "fast-path FCP WQ record array\n"); 8120 goto out_error; 8121 } 8122 /* 8123 * Since the first EQ can have multiple CQs associated with it, 8124 * this array is used to quickly see if we have a FCP fast-path 8125 * CQ match. 8126 */ 8127 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, 8128 sizeof(uint16_t), 8129 GFP_KERNEL); 8130 if (!phba->sli4_hba.fcp_cq_map) { 8131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8132 "2545 Failed allocate memory for " 8133 "fast-path CQ map\n"); 8134 goto out_error; 8135 } 8136 } 8137 8138 if (phba->cfg_nvme_io_channel) { 8139 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, 8140 sizeof(struct lpfc_queue *), 8141 GFP_KERNEL); 8142 if (!phba->sli4_hba.nvme_cq) { 8143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8144 "6077 Failed allocate memory for " 8145 "fast-path CQ record array\n"); 8146 goto out_error; 8147 } 8148 8149 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, 8150 sizeof(struct lpfc_queue *), 8151 GFP_KERNEL); 8152 if (!phba->sli4_hba.nvme_wq) { 8153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8154 "2581 Failed allocate memory for " 8155 "fast-path NVME WQ record array\n"); 8156 goto out_error; 8157 } 8158 8159 /* 8160 * Since the first EQ can have multiple CQs associated with it, 8161 * this array is used to quickly see if we have a NVME fast-path 8162 * CQ match. 8163 */ 8164 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, 8165 sizeof(uint16_t), 8166 GFP_KERNEL); 8167 if (!phba->sli4_hba.nvme_cq_map) { 8168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8169 "6078 Failed allocate memory for " 8170 "fast-path CQ map\n"); 8171 goto out_error; 8172 } 8173 8174 if (phba->nvmet_support) { 8175 phba->sli4_hba.nvmet_cqset = kcalloc( 8176 phba->cfg_nvmet_mrq, 8177 sizeof(struct lpfc_queue *), 8178 GFP_KERNEL); 8179 if (!phba->sli4_hba.nvmet_cqset) { 8180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8181 "3121 Fail allocate memory for " 8182 "fast-path CQ set array\n"); 8183 goto out_error; 8184 } 8185 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8186 phba->cfg_nvmet_mrq, 8187 sizeof(struct lpfc_queue *), 8188 GFP_KERNEL); 8189 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8191 "3122 Fail allocate memory for " 8192 "fast-path RQ set hdr array\n"); 8193 goto out_error; 8194 } 8195 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8196 phba->cfg_nvmet_mrq, 8197 sizeof(struct lpfc_queue *), 8198 GFP_KERNEL); 8199 if (!phba->sli4_hba.nvmet_mrq_data) { 8200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8201 "3124 Fail allocate memory for " 8202 "fast-path RQ set data array\n"); 8203 goto out_error; 8204 } 8205 } 8206 } 8207 8208 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8209 8210 /* Create HBA Event Queues (EQs) */ 8211 for (idx = 0; idx < io_channel; idx++) { 8212 /* Create EQs */ 8213 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8214 phba->sli4_hba.eq_esize, 8215 phba->sli4_hba.eq_ecount); 8216 if (!qdesc) { 8217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8218 "0497 Failed allocate EQ (%d)\n", idx); 8219 goto out_error; 8220 } 8221 phba->sli4_hba.hba_eq[idx] = qdesc; 8222 } 8223 8224 /* FCP and NVME io channels are not required to be balanced */ 8225 8226 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8227 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8228 goto out_error; 8229 8230 for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) 8231 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8232 goto out_error; 8233 8234 if (phba->nvmet_support) { 8235 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8236 qdesc = lpfc_sli4_queue_alloc(phba, 8237 LPFC_DEFAULT_PAGE_SIZE, 8238 phba->sli4_hba.cq_esize, 8239 phba->sli4_hba.cq_ecount); 8240 if (!qdesc) { 8241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8242 "3142 Failed allocate NVME " 8243 "CQ Set (%d)\n", idx); 8244 goto out_error; 8245 } 8246 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8247 } 8248 } 8249 8250 /* 8251 * Create Slow Path Completion Queues (CQs) 8252 */ 8253 8254 /* Create slow-path Mailbox Command Complete Queue */ 8255 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8256 phba->sli4_hba.cq_esize, 8257 phba->sli4_hba.cq_ecount); 8258 if (!qdesc) { 8259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8260 "0500 Failed allocate slow-path mailbox CQ\n"); 8261 goto out_error; 8262 } 8263 phba->sli4_hba.mbx_cq = qdesc; 8264 8265 /* Create slow-path ELS Complete Queue */ 8266 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8267 phba->sli4_hba.cq_esize, 8268 phba->sli4_hba.cq_ecount); 8269 if (!qdesc) { 8270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8271 "0501 Failed allocate slow-path ELS CQ\n"); 8272 goto out_error; 8273 } 8274 phba->sli4_hba.els_cq = qdesc; 8275 8276 8277 /* 8278 * Create Slow Path Work Queues (WQs) 8279 */ 8280 8281 /* Create Mailbox Command Queue */ 8282 8283 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8284 phba->sli4_hba.mq_esize, 8285 phba->sli4_hba.mq_ecount); 8286 if (!qdesc) { 8287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8288 "0505 Failed allocate slow-path MQ\n"); 8289 goto out_error; 8290 } 8291 phba->sli4_hba.mbx_wq = qdesc; 8292 8293 /* 8294 * Create ELS Work Queues 8295 */ 8296 8297 /* Create slow-path ELS Work Queue */ 8298 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8299 phba->sli4_hba.wq_esize, 8300 phba->sli4_hba.wq_ecount); 8301 if (!qdesc) { 8302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8303 "0504 Failed allocate slow-path ELS WQ\n"); 8304 goto out_error; 8305 } 8306 phba->sli4_hba.els_wq = qdesc; 8307 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8308 8309 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8310 /* Create NVME LS Complete Queue */ 8311 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8312 phba->sli4_hba.cq_esize, 8313 phba->sli4_hba.cq_ecount); 8314 if (!qdesc) { 8315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8316 "6079 Failed allocate NVME LS CQ\n"); 8317 goto out_error; 8318 } 8319 phba->sli4_hba.nvmels_cq = qdesc; 8320 8321 /* Create NVME LS Work Queue */ 8322 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8323 phba->sli4_hba.wq_esize, 8324 phba->sli4_hba.wq_ecount); 8325 if (!qdesc) { 8326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8327 "6080 Failed allocate NVME LS WQ\n"); 8328 goto out_error; 8329 } 8330 phba->sli4_hba.nvmels_wq = qdesc; 8331 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8332 } 8333 8334 /* 8335 * Create Receive Queue (RQ) 8336 */ 8337 8338 /* Create Receive Queue for header */ 8339 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8340 phba->sli4_hba.rq_esize, 8341 phba->sli4_hba.rq_ecount); 8342 if (!qdesc) { 8343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8344 "0506 Failed allocate receive HRQ\n"); 8345 goto out_error; 8346 } 8347 phba->sli4_hba.hdr_rq = qdesc; 8348 8349 /* Create Receive Queue for data */ 8350 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8351 phba->sli4_hba.rq_esize, 8352 phba->sli4_hba.rq_ecount); 8353 if (!qdesc) { 8354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8355 "0507 Failed allocate receive DRQ\n"); 8356 goto out_error; 8357 } 8358 phba->sli4_hba.dat_rq = qdesc; 8359 8360 if (phba->nvmet_support) { 8361 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8362 /* Create NVMET Receive Queue for header */ 8363 qdesc = lpfc_sli4_queue_alloc(phba, 8364 LPFC_DEFAULT_PAGE_SIZE, 8365 phba->sli4_hba.rq_esize, 8366 LPFC_NVMET_RQE_DEF_COUNT); 8367 if (!qdesc) { 8368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8369 "3146 Failed allocate " 8370 "receive HRQ\n"); 8371 goto out_error; 8372 } 8373 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 8374 8375 /* Only needed for header of RQ pair */ 8376 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), 8377 GFP_KERNEL); 8378 if (qdesc->rqbp == NULL) { 8379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8380 "6131 Failed allocate " 8381 "Header RQBP\n"); 8382 goto out_error; 8383 } 8384 8385 /* Put list in known state in case driver load fails. */ 8386 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 8387 8388 /* Create NVMET Receive Queue for data */ 8389 qdesc = lpfc_sli4_queue_alloc(phba, 8390 LPFC_DEFAULT_PAGE_SIZE, 8391 phba->sli4_hba.rq_esize, 8392 LPFC_NVMET_RQE_DEF_COUNT); 8393 if (!qdesc) { 8394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8395 "3156 Failed allocate " 8396 "receive DRQ\n"); 8397 goto out_error; 8398 } 8399 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 8400 } 8401 } 8402 8403 /* Create the Queues needed for Flash Optimized Fabric operations */ 8404 if (phba->cfg_fof) 8405 lpfc_fof_queue_create(phba); 8406 return 0; 8407 8408 out_error: 8409 lpfc_sli4_queue_destroy(phba); 8410 return -ENOMEM; 8411 } 8412 8413 static inline void 8414 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 8415 { 8416 if (*qp != NULL) { 8417 lpfc_sli4_queue_free(*qp); 8418 *qp = NULL; 8419 } 8420 } 8421 8422 static inline void 8423 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 8424 { 8425 int idx; 8426 8427 if (*qs == NULL) 8428 return; 8429 8430 for (idx = 0; idx < max; idx++) 8431 __lpfc_sli4_release_queue(&(*qs)[idx]); 8432 8433 kfree(*qs); 8434 *qs = NULL; 8435 } 8436 8437 static inline void 8438 lpfc_sli4_release_queue_map(uint16_t **qmap) 8439 { 8440 if (*qmap != NULL) { 8441 kfree(*qmap); 8442 *qmap = NULL; 8443 } 8444 } 8445 8446 /** 8447 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 8448 * @phba: pointer to lpfc hba data structure. 8449 * 8450 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 8451 * operation. 8452 * 8453 * Return codes 8454 * 0 - successful 8455 * -ENOMEM - No available memory 8456 * -EIO - The mailbox failed to complete successfully. 8457 **/ 8458 void 8459 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 8460 { 8461 if (phba->cfg_fof) 8462 lpfc_fof_queue_destroy(phba); 8463 8464 /* Release HBA eqs */ 8465 lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); 8466 8467 /* Release FCP cqs */ 8468 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, 8469 phba->cfg_fcp_io_channel); 8470 8471 /* Release FCP wqs */ 8472 lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, 8473 phba->cfg_fcp_io_channel); 8474 8475 /* Release FCP CQ mapping array */ 8476 lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); 8477 8478 /* Release NVME cqs */ 8479 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, 8480 phba->cfg_nvme_io_channel); 8481 8482 /* Release NVME wqs */ 8483 lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, 8484 phba->cfg_nvme_io_channel); 8485 8486 /* Release NVME CQ mapping array */ 8487 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); 8488 8489 if (phba->nvmet_support) { 8490 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 8491 phba->cfg_nvmet_mrq); 8492 8493 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 8494 phba->cfg_nvmet_mrq); 8495 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 8496 phba->cfg_nvmet_mrq); 8497 } 8498 8499 /* Release mailbox command work queue */ 8500 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 8501 8502 /* Release ELS work queue */ 8503 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 8504 8505 /* Release ELS work queue */ 8506 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 8507 8508 /* Release unsolicited receive queue */ 8509 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 8510 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 8511 8512 /* Release ELS complete queue */ 8513 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 8514 8515 /* Release NVME LS complete queue */ 8516 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 8517 8518 /* Release mailbox command complete queue */ 8519 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 8520 8521 /* Everything on this list has been freed */ 8522 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8523 } 8524 8525 int 8526 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 8527 { 8528 struct lpfc_rqb *rqbp; 8529 struct lpfc_dmabuf *h_buf; 8530 struct rqb_dmabuf *rqb_buffer; 8531 8532 rqbp = rq->rqbp; 8533 while (!list_empty(&rqbp->rqb_buffer_list)) { 8534 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 8535 struct lpfc_dmabuf, list); 8536 8537 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 8538 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 8539 rqbp->buffer_count--; 8540 } 8541 return 1; 8542 } 8543 8544 static int 8545 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 8546 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 8547 int qidx, uint32_t qtype) 8548 { 8549 struct lpfc_sli_ring *pring; 8550 int rc; 8551 8552 if (!eq || !cq || !wq) { 8553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8554 "6085 Fast-path %s (%d) not allocated\n", 8555 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 8556 return -ENOMEM; 8557 } 8558 8559 /* create the Cq first */ 8560 rc = lpfc_cq_create(phba, cq, eq, 8561 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 8562 if (rc) { 8563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8564 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 8565 qidx, (uint32_t)rc); 8566 return rc; 8567 } 8568 cq->chann = qidx; 8569 8570 if (qtype != LPFC_MBOX) { 8571 /* Setup nvme_cq_map for fast lookup */ 8572 if (cq_map) 8573 *cq_map = cq->queue_id; 8574 8575 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8576 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 8577 qidx, cq->queue_id, qidx, eq->queue_id); 8578 8579 /* create the wq */ 8580 rc = lpfc_wq_create(phba, wq, cq, qtype); 8581 if (rc) { 8582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8583 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", 8584 qidx, (uint32_t)rc); 8585 /* no need to tear down cq - caller will do so */ 8586 return rc; 8587 } 8588 wq->chann = qidx; 8589 8590 /* Bind this CQ/WQ to the NVME ring */ 8591 pring = wq->pring; 8592 pring->sli.sli4.wqp = (void *)wq; 8593 cq->pring = pring; 8594 8595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8596 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 8597 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 8598 } else { 8599 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 8600 if (rc) { 8601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8602 "0539 Failed setup of slow-path MQ: " 8603 "rc = 0x%x\n", rc); 8604 /* no need to tear down cq - caller will do so */ 8605 return rc; 8606 } 8607 8608 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8609 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 8610 phba->sli4_hba.mbx_wq->queue_id, 8611 phba->sli4_hba.mbx_cq->queue_id); 8612 } 8613 8614 return 0; 8615 } 8616 8617 /** 8618 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 8619 * @phba: pointer to lpfc hba data structure. 8620 * 8621 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 8622 * operation. 8623 * 8624 * Return codes 8625 * 0 - successful 8626 * -ENOMEM - No available memory 8627 * -EIO - The mailbox failed to complete successfully. 8628 **/ 8629 int 8630 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 8631 { 8632 uint32_t shdr_status, shdr_add_status; 8633 union lpfc_sli4_cfg_shdr *shdr; 8634 LPFC_MBOXQ_t *mboxq; 8635 int qidx; 8636 uint32_t length, io_channel; 8637 int rc = -ENOMEM; 8638 8639 /* Check for dual-ULP support */ 8640 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8641 if (!mboxq) { 8642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8643 "3249 Unable to allocate memory for " 8644 "QUERY_FW_CFG mailbox command\n"); 8645 return -ENOMEM; 8646 } 8647 length = (sizeof(struct lpfc_mbx_query_fw_config) - 8648 sizeof(struct lpfc_sli4_cfg_mhdr)); 8649 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8650 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 8651 length, LPFC_SLI4_MBX_EMBED); 8652 8653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8654 8655 shdr = (union lpfc_sli4_cfg_shdr *) 8656 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8657 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8658 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8659 if (shdr_status || shdr_add_status || rc) { 8660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8661 "3250 QUERY_FW_CFG mailbox failed with status " 8662 "x%x add_status x%x, mbx status x%x\n", 8663 shdr_status, shdr_add_status, rc); 8664 if (rc != MBX_TIMEOUT) 8665 mempool_free(mboxq, phba->mbox_mem_pool); 8666 rc = -ENXIO; 8667 goto out_error; 8668 } 8669 8670 phba->sli4_hba.fw_func_mode = 8671 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 8672 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 8673 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 8674 phba->sli4_hba.physical_port = 8675 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 8676 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8677 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 8678 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 8679 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 8680 8681 if (rc != MBX_TIMEOUT) 8682 mempool_free(mboxq, phba->mbox_mem_pool); 8683 8684 /* 8685 * Set up HBA Event Queues (EQs) 8686 */ 8687 io_channel = phba->io_channel_irqs; 8688 8689 /* Set up HBA event queue */ 8690 if (io_channel && !phba->sli4_hba.hba_eq) { 8691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8692 "3147 Fast-path EQs not allocated\n"); 8693 rc = -ENOMEM; 8694 goto out_error; 8695 } 8696 for (qidx = 0; qidx < io_channel; qidx++) { 8697 if (!phba->sli4_hba.hba_eq[qidx]) { 8698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8699 "0522 Fast-path EQ (%d) not " 8700 "allocated\n", qidx); 8701 rc = -ENOMEM; 8702 goto out_destroy; 8703 } 8704 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], 8705 phba->cfg_fcp_imax); 8706 if (rc) { 8707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8708 "0523 Failed setup of fast-path EQ " 8709 "(%d), rc = 0x%x\n", qidx, 8710 (uint32_t)rc); 8711 goto out_destroy; 8712 } 8713 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8714 "2584 HBA EQ setup: queue[%d]-id=%d\n", 8715 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); 8716 } 8717 8718 if (phba->cfg_nvme_io_channel) { 8719 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { 8720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8721 "6084 Fast-path NVME %s array not allocated\n", 8722 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); 8723 rc = -ENOMEM; 8724 goto out_destroy; 8725 } 8726 8727 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { 8728 rc = lpfc_create_wq_cq(phba, 8729 phba->sli4_hba.hba_eq[ 8730 qidx % io_channel], 8731 phba->sli4_hba.nvme_cq[qidx], 8732 phba->sli4_hba.nvme_wq[qidx], 8733 &phba->sli4_hba.nvme_cq_map[qidx], 8734 qidx, LPFC_NVME); 8735 if (rc) { 8736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8737 "6123 Failed to setup fastpath " 8738 "NVME WQ/CQ (%d), rc = 0x%x\n", 8739 qidx, (uint32_t)rc); 8740 goto out_destroy; 8741 } 8742 } 8743 } 8744 8745 if (phba->cfg_fcp_io_channel) { 8746 /* Set up fast-path FCP Response Complete Queue */ 8747 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { 8748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8749 "3148 Fast-path FCP %s array not allocated\n", 8750 phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); 8751 rc = -ENOMEM; 8752 goto out_destroy; 8753 } 8754 8755 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { 8756 rc = lpfc_create_wq_cq(phba, 8757 phba->sli4_hba.hba_eq[ 8758 qidx % io_channel], 8759 phba->sli4_hba.fcp_cq[qidx], 8760 phba->sli4_hba.fcp_wq[qidx], 8761 &phba->sli4_hba.fcp_cq_map[qidx], 8762 qidx, LPFC_FCP); 8763 if (rc) { 8764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8765 "0535 Failed to setup fastpath " 8766 "FCP WQ/CQ (%d), rc = 0x%x\n", 8767 qidx, (uint32_t)rc); 8768 goto out_destroy; 8769 } 8770 } 8771 } 8772 8773 /* 8774 * Set up Slow Path Complete Queues (CQs) 8775 */ 8776 8777 /* Set up slow-path MBOX CQ/MQ */ 8778 8779 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 8780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8781 "0528 %s not allocated\n", 8782 phba->sli4_hba.mbx_cq ? 8783 "Mailbox WQ" : "Mailbox CQ"); 8784 rc = -ENOMEM; 8785 goto out_destroy; 8786 } 8787 8788 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8789 phba->sli4_hba.mbx_cq, 8790 phba->sli4_hba.mbx_wq, 8791 NULL, 0, LPFC_MBOX); 8792 if (rc) { 8793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8794 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 8795 (uint32_t)rc); 8796 goto out_destroy; 8797 } 8798 if (phba->nvmet_support) { 8799 if (!phba->sli4_hba.nvmet_cqset) { 8800 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8801 "3165 Fast-path NVME CQ Set " 8802 "array not allocated\n"); 8803 rc = -ENOMEM; 8804 goto out_destroy; 8805 } 8806 if (phba->cfg_nvmet_mrq > 1) { 8807 rc = lpfc_cq_create_set(phba, 8808 phba->sli4_hba.nvmet_cqset, 8809 phba->sli4_hba.hba_eq, 8810 LPFC_WCQ, LPFC_NVMET); 8811 if (rc) { 8812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8813 "3164 Failed setup of NVME CQ " 8814 "Set, rc = 0x%x\n", 8815 (uint32_t)rc); 8816 goto out_destroy; 8817 } 8818 } else { 8819 /* Set up NVMET Receive Complete Queue */ 8820 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 8821 phba->sli4_hba.hba_eq[0], 8822 LPFC_WCQ, LPFC_NVMET); 8823 if (rc) { 8824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8825 "6089 Failed setup NVMET CQ: " 8826 "rc = 0x%x\n", (uint32_t)rc); 8827 goto out_destroy; 8828 } 8829 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 8830 8831 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8832 "6090 NVMET CQ setup: cq-id=%d, " 8833 "parent eq-id=%d\n", 8834 phba->sli4_hba.nvmet_cqset[0]->queue_id, 8835 phba->sli4_hba.hba_eq[0]->queue_id); 8836 } 8837 } 8838 8839 /* Set up slow-path ELS WQ/CQ */ 8840 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 8841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8842 "0530 ELS %s not allocated\n", 8843 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 8844 rc = -ENOMEM; 8845 goto out_destroy; 8846 } 8847 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8848 phba->sli4_hba.els_cq, 8849 phba->sli4_hba.els_wq, 8850 NULL, 0, LPFC_ELS); 8851 if (rc) { 8852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8853 "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 8854 (uint32_t)rc); 8855 goto out_destroy; 8856 } 8857 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8858 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 8859 phba->sli4_hba.els_wq->queue_id, 8860 phba->sli4_hba.els_cq->queue_id); 8861 8862 if (phba->cfg_nvme_io_channel) { 8863 /* Set up NVME LS Complete Queue */ 8864 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 8865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8866 "6091 LS %s not allocated\n", 8867 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 8868 rc = -ENOMEM; 8869 goto out_destroy; 8870 } 8871 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], 8872 phba->sli4_hba.nvmels_cq, 8873 phba->sli4_hba.nvmels_wq, 8874 NULL, 0, LPFC_NVME_LS); 8875 if (rc) { 8876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8877 "0529 Failed setup of NVVME LS WQ/CQ: " 8878 "rc = 0x%x\n", (uint32_t)rc); 8879 goto out_destroy; 8880 } 8881 8882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8883 "6096 ELS WQ setup: wq-id=%d, " 8884 "parent cq-id=%d\n", 8885 phba->sli4_hba.nvmels_wq->queue_id, 8886 phba->sli4_hba.nvmels_cq->queue_id); 8887 } 8888 8889 /* 8890 * Create NVMET Receive Queue (RQ) 8891 */ 8892 if (phba->nvmet_support) { 8893 if ((!phba->sli4_hba.nvmet_cqset) || 8894 (!phba->sli4_hba.nvmet_mrq_hdr) || 8895 (!phba->sli4_hba.nvmet_mrq_data)) { 8896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8897 "6130 MRQ CQ Queues not " 8898 "allocated\n"); 8899 rc = -ENOMEM; 8900 goto out_destroy; 8901 } 8902 if (phba->cfg_nvmet_mrq > 1) { 8903 rc = lpfc_mrq_create(phba, 8904 phba->sli4_hba.nvmet_mrq_hdr, 8905 phba->sli4_hba.nvmet_mrq_data, 8906 phba->sli4_hba.nvmet_cqset, 8907 LPFC_NVMET); 8908 if (rc) { 8909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8910 "6098 Failed setup of NVMET " 8911 "MRQ: rc = 0x%x\n", 8912 (uint32_t)rc); 8913 goto out_destroy; 8914 } 8915 8916 } else { 8917 rc = lpfc_rq_create(phba, 8918 phba->sli4_hba.nvmet_mrq_hdr[0], 8919 phba->sli4_hba.nvmet_mrq_data[0], 8920 phba->sli4_hba.nvmet_cqset[0], 8921 LPFC_NVMET); 8922 if (rc) { 8923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8924 "6057 Failed setup of NVMET " 8925 "Receive Queue: rc = 0x%x\n", 8926 (uint32_t)rc); 8927 goto out_destroy; 8928 } 8929 8930 lpfc_printf_log( 8931 phba, KERN_INFO, LOG_INIT, 8932 "6099 NVMET RQ setup: hdr-rq-id=%d, " 8933 "dat-rq-id=%d parent cq-id=%d\n", 8934 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 8935 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 8936 phba->sli4_hba.nvmet_cqset[0]->queue_id); 8937 8938 } 8939 } 8940 8941 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 8942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8943 "0540 Receive Queue not allocated\n"); 8944 rc = -ENOMEM; 8945 goto out_destroy; 8946 } 8947 8948 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 8949 phba->sli4_hba.els_cq, LPFC_USOL); 8950 if (rc) { 8951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8952 "0541 Failed setup of Receive Queue: " 8953 "rc = 0x%x\n", (uint32_t)rc); 8954 goto out_destroy; 8955 } 8956 8957 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8958 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 8959 "parent cq-id=%d\n", 8960 phba->sli4_hba.hdr_rq->queue_id, 8961 phba->sli4_hba.dat_rq->queue_id, 8962 phba->sli4_hba.els_cq->queue_id); 8963 8964 if (phba->cfg_fof) { 8965 rc = lpfc_fof_queue_setup(phba); 8966 if (rc) { 8967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8968 "0549 Failed setup of FOF Queues: " 8969 "rc = 0x%x\n", rc); 8970 goto out_destroy; 8971 } 8972 } 8973 8974 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 8975 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 8976 phba->cfg_fcp_imax); 8977 8978 return 0; 8979 8980 out_destroy: 8981 lpfc_sli4_queue_unset(phba); 8982 out_error: 8983 return rc; 8984 } 8985 8986 /** 8987 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 8988 * @phba: pointer to lpfc hba data structure. 8989 * 8990 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 8991 * operation. 8992 * 8993 * Return codes 8994 * 0 - successful 8995 * -ENOMEM - No available memory 8996 * -EIO - The mailbox failed to complete successfully. 8997 **/ 8998 void 8999 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9000 { 9001 int qidx; 9002 9003 /* Unset the queues created for Flash Optimized Fabric operations */ 9004 if (phba->cfg_fof) 9005 lpfc_fof_queue_destroy(phba); 9006 9007 /* Unset mailbox command work queue */ 9008 if (phba->sli4_hba.mbx_wq) 9009 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9010 9011 /* Unset NVME LS work queue */ 9012 if (phba->sli4_hba.nvmels_wq) 9013 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9014 9015 /* Unset ELS work queue */ 9016 if (phba->sli4_hba.els_wq) 9017 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9018 9019 /* Unset unsolicited receive queue */ 9020 if (phba->sli4_hba.hdr_rq) 9021 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9022 phba->sli4_hba.dat_rq); 9023 9024 /* Unset FCP work queue */ 9025 if (phba->sli4_hba.fcp_wq) 9026 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 9027 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); 9028 9029 /* Unset NVME work queue */ 9030 if (phba->sli4_hba.nvme_wq) { 9031 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 9032 lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); 9033 } 9034 9035 /* Unset mailbox command complete queue */ 9036 if (phba->sli4_hba.mbx_cq) 9037 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9038 9039 /* Unset ELS complete queue */ 9040 if (phba->sli4_hba.els_cq) 9041 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9042 9043 /* Unset NVME LS complete queue */ 9044 if (phba->sli4_hba.nvmels_cq) 9045 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9046 9047 /* Unset NVME response complete queue */ 9048 if (phba->sli4_hba.nvme_cq) 9049 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 9050 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); 9051 9052 if (phba->nvmet_support) { 9053 /* Unset NVMET MRQ queue */ 9054 if (phba->sli4_hba.nvmet_mrq_hdr) { 9055 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9056 lpfc_rq_destroy( 9057 phba, 9058 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9059 phba->sli4_hba.nvmet_mrq_data[qidx]); 9060 } 9061 9062 /* Unset NVMET CQ Set complete queue */ 9063 if (phba->sli4_hba.nvmet_cqset) { 9064 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9065 lpfc_cq_destroy( 9066 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9067 } 9068 } 9069 9070 /* Unset FCP response complete queue */ 9071 if (phba->sli4_hba.fcp_cq) 9072 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 9073 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); 9074 9075 /* Unset fast-path event queue */ 9076 if (phba->sli4_hba.hba_eq) 9077 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 9078 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); 9079 } 9080 9081 /** 9082 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9083 * @phba: pointer to lpfc hba data structure. 9084 * 9085 * This routine is invoked to allocate and set up a pool of completion queue 9086 * events. The body of the completion queue event is a completion queue entry 9087 * CQE. For now, this pool is used for the interrupt service routine to queue 9088 * the following HBA completion queue events for the worker thread to process: 9089 * - Mailbox asynchronous events 9090 * - Receive queue completion unsolicited events 9091 * Later, this can be used for all the slow-path events. 9092 * 9093 * Return codes 9094 * 0 - successful 9095 * -ENOMEM - No available memory 9096 **/ 9097 static int 9098 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9099 { 9100 struct lpfc_cq_event *cq_event; 9101 int i; 9102 9103 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9104 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9105 if (!cq_event) 9106 goto out_pool_create_fail; 9107 list_add_tail(&cq_event->list, 9108 &phba->sli4_hba.sp_cqe_event_pool); 9109 } 9110 return 0; 9111 9112 out_pool_create_fail: 9113 lpfc_sli4_cq_event_pool_destroy(phba); 9114 return -ENOMEM; 9115 } 9116 9117 /** 9118 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9119 * @phba: pointer to lpfc hba data structure. 9120 * 9121 * This routine is invoked to free the pool of completion queue events at 9122 * driver unload time. Note that, it is the responsibility of the driver 9123 * cleanup routine to free all the outstanding completion-queue events 9124 * allocated from this pool back into the pool before invoking this routine 9125 * to destroy the pool. 9126 **/ 9127 static void 9128 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9129 { 9130 struct lpfc_cq_event *cq_event, *next_cq_event; 9131 9132 list_for_each_entry_safe(cq_event, next_cq_event, 9133 &phba->sli4_hba.sp_cqe_event_pool, list) { 9134 list_del(&cq_event->list); 9135 kfree(cq_event); 9136 } 9137 } 9138 9139 /** 9140 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9141 * @phba: pointer to lpfc hba data structure. 9142 * 9143 * This routine is the lock free version of the API invoked to allocate a 9144 * completion-queue event from the free pool. 9145 * 9146 * Return: Pointer to the newly allocated completion-queue event if successful 9147 * NULL otherwise. 9148 **/ 9149 struct lpfc_cq_event * 9150 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9151 { 9152 struct lpfc_cq_event *cq_event = NULL; 9153 9154 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9155 struct lpfc_cq_event, list); 9156 return cq_event; 9157 } 9158 9159 /** 9160 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9161 * @phba: pointer to lpfc hba data structure. 9162 * 9163 * This routine is the lock version of the API invoked to allocate a 9164 * completion-queue event from the free pool. 9165 * 9166 * Return: Pointer to the newly allocated completion-queue event if successful 9167 * NULL otherwise. 9168 **/ 9169 struct lpfc_cq_event * 9170 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9171 { 9172 struct lpfc_cq_event *cq_event; 9173 unsigned long iflags; 9174 9175 spin_lock_irqsave(&phba->hbalock, iflags); 9176 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9177 spin_unlock_irqrestore(&phba->hbalock, iflags); 9178 return cq_event; 9179 } 9180 9181 /** 9182 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9183 * @phba: pointer to lpfc hba data structure. 9184 * @cq_event: pointer to the completion queue event to be freed. 9185 * 9186 * This routine is the lock free version of the API invoked to release a 9187 * completion-queue event back into the free pool. 9188 **/ 9189 void 9190 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9191 struct lpfc_cq_event *cq_event) 9192 { 9193 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 9194 } 9195 9196 /** 9197 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9198 * @phba: pointer to lpfc hba data structure. 9199 * @cq_event: pointer to the completion queue event to be freed. 9200 * 9201 * This routine is the lock version of the API invoked to release a 9202 * completion-queue event back into the free pool. 9203 **/ 9204 void 9205 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9206 struct lpfc_cq_event *cq_event) 9207 { 9208 unsigned long iflags; 9209 spin_lock_irqsave(&phba->hbalock, iflags); 9210 __lpfc_sli4_cq_event_release(phba, cq_event); 9211 spin_unlock_irqrestore(&phba->hbalock, iflags); 9212 } 9213 9214 /** 9215 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9216 * @phba: pointer to lpfc hba data structure. 9217 * 9218 * This routine is to free all the pending completion-queue events to the 9219 * back into the free pool for device reset. 9220 **/ 9221 static void 9222 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 9223 { 9224 LIST_HEAD(cqelist); 9225 struct lpfc_cq_event *cqe; 9226 unsigned long iflags; 9227 9228 /* Retrieve all the pending WCQEs from pending WCQE lists */ 9229 spin_lock_irqsave(&phba->hbalock, iflags); 9230 /* Pending FCP XRI abort events */ 9231 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9232 &cqelist); 9233 /* Pending ELS XRI abort events */ 9234 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9235 &cqelist); 9236 /* Pending asynnc events */ 9237 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9238 &cqelist); 9239 spin_unlock_irqrestore(&phba->hbalock, iflags); 9240 9241 while (!list_empty(&cqelist)) { 9242 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 9243 lpfc_sli4_cq_event_release(phba, cqe); 9244 } 9245 } 9246 9247 /** 9248 * lpfc_pci_function_reset - Reset pci function. 9249 * @phba: pointer to lpfc hba data structure. 9250 * 9251 * This routine is invoked to request a PCI function reset. It will destroys 9252 * all resources assigned to the PCI function which originates this request. 9253 * 9254 * Return codes 9255 * 0 - successful 9256 * -ENOMEM - No available memory 9257 * -EIO - The mailbox failed to complete successfully. 9258 **/ 9259 int 9260 lpfc_pci_function_reset(struct lpfc_hba *phba) 9261 { 9262 LPFC_MBOXQ_t *mboxq; 9263 uint32_t rc = 0, if_type; 9264 uint32_t shdr_status, shdr_add_status; 9265 uint32_t rdy_chk; 9266 uint32_t port_reset = 0; 9267 union lpfc_sli4_cfg_shdr *shdr; 9268 struct lpfc_register reg_data; 9269 uint16_t devid; 9270 9271 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9272 switch (if_type) { 9273 case LPFC_SLI_INTF_IF_TYPE_0: 9274 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 9275 GFP_KERNEL); 9276 if (!mboxq) { 9277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9278 "0494 Unable to allocate memory for " 9279 "issuing SLI_FUNCTION_RESET mailbox " 9280 "command\n"); 9281 return -ENOMEM; 9282 } 9283 9284 /* Setup PCI function reset mailbox-ioctl command */ 9285 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9286 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 9287 LPFC_SLI4_MBX_EMBED); 9288 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9289 shdr = (union lpfc_sli4_cfg_shdr *) 9290 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9291 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9292 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 9293 &shdr->response); 9294 if (rc != MBX_TIMEOUT) 9295 mempool_free(mboxq, phba->mbox_mem_pool); 9296 if (shdr_status || shdr_add_status || rc) { 9297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9298 "0495 SLI_FUNCTION_RESET mailbox " 9299 "failed with status x%x add_status x%x," 9300 " mbx status x%x\n", 9301 shdr_status, shdr_add_status, rc); 9302 rc = -ENXIO; 9303 } 9304 break; 9305 case LPFC_SLI_INTF_IF_TYPE_2: 9306 wait: 9307 /* 9308 * Poll the Port Status Register and wait for RDY for 9309 * up to 30 seconds. If the port doesn't respond, treat 9310 * it as an error. 9311 */ 9312 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 9313 if (lpfc_readl(phba->sli4_hba.u.if_type2. 9314 STATUSregaddr, ®_data.word0)) { 9315 rc = -ENODEV; 9316 goto out; 9317 } 9318 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 9319 break; 9320 msleep(20); 9321 } 9322 9323 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 9324 phba->work_status[0] = readl( 9325 phba->sli4_hba.u.if_type2.ERR1regaddr); 9326 phba->work_status[1] = readl( 9327 phba->sli4_hba.u.if_type2.ERR2regaddr); 9328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9329 "2890 Port not ready, port status reg " 9330 "0x%x error 1=0x%x, error 2=0x%x\n", 9331 reg_data.word0, 9332 phba->work_status[0], 9333 phba->work_status[1]); 9334 rc = -ENODEV; 9335 goto out; 9336 } 9337 9338 if (!port_reset) { 9339 /* 9340 * Reset the port now 9341 */ 9342 reg_data.word0 = 0; 9343 bf_set(lpfc_sliport_ctrl_end, ®_data, 9344 LPFC_SLIPORT_LITTLE_ENDIAN); 9345 bf_set(lpfc_sliport_ctrl_ip, ®_data, 9346 LPFC_SLIPORT_INIT_PORT); 9347 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 9348 CTRLregaddr); 9349 /* flush */ 9350 pci_read_config_word(phba->pcidev, 9351 PCI_DEVICE_ID, &devid); 9352 9353 port_reset = 1; 9354 msleep(20); 9355 goto wait; 9356 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 9357 rc = -ENODEV; 9358 goto out; 9359 } 9360 break; 9361 9362 case LPFC_SLI_INTF_IF_TYPE_1: 9363 default: 9364 break; 9365 } 9366 9367 out: 9368 /* Catch the not-ready port failure after a port reset. */ 9369 if (rc) { 9370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9371 "3317 HBA not functional: IP Reset Failed " 9372 "try: echo fw_reset > board_mode\n"); 9373 rc = -ENODEV; 9374 } 9375 9376 return rc; 9377 } 9378 9379 /** 9380 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 9381 * @phba: pointer to lpfc hba data structure. 9382 * 9383 * This routine is invoked to set up the PCI device memory space for device 9384 * with SLI-4 interface spec. 9385 * 9386 * Return codes 9387 * 0 - successful 9388 * other values - error 9389 **/ 9390 static int 9391 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 9392 { 9393 struct pci_dev *pdev; 9394 unsigned long bar0map_len, bar1map_len, bar2map_len; 9395 int error = -ENODEV; 9396 uint32_t if_type; 9397 9398 /* Obtain PCI device reference */ 9399 if (!phba->pcidev) 9400 return error; 9401 else 9402 pdev = phba->pcidev; 9403 9404 /* Set the device DMA mask size */ 9405 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 9406 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 9407 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 9408 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 9409 return error; 9410 } 9411 } 9412 9413 /* 9414 * The BARs and register set definitions and offset locations are 9415 * dependent on the if_type. 9416 */ 9417 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 9418 &phba->sli4_hba.sli_intf.word0)) { 9419 return error; 9420 } 9421 9422 /* There is no SLI3 failback for SLI4 devices. */ 9423 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 9424 LPFC_SLI_INTF_VALID) { 9425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9426 "2894 SLI_INTF reg contents invalid " 9427 "sli_intf reg 0x%x\n", 9428 phba->sli4_hba.sli_intf.word0); 9429 return error; 9430 } 9431 9432 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9433 /* 9434 * Get the bus address of SLI4 device Bar regions and the 9435 * number of bytes required by each mapping. The mapping of the 9436 * particular PCI BARs regions is dependent on the type of 9437 * SLI4 device. 9438 */ 9439 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 9440 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 9441 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 9442 9443 /* 9444 * Map SLI4 PCI Config Space Register base to a kernel virtual 9445 * addr 9446 */ 9447 phba->sli4_hba.conf_regs_memmap_p = 9448 ioremap(phba->pci_bar0_map, bar0map_len); 9449 if (!phba->sli4_hba.conf_regs_memmap_p) { 9450 dev_printk(KERN_ERR, &pdev->dev, 9451 "ioremap failed for SLI4 PCI config " 9452 "registers.\n"); 9453 goto out; 9454 } 9455 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 9456 /* Set up BAR0 PCI config space register memory map */ 9457 lpfc_sli4_bar0_register_memmap(phba, if_type); 9458 } else { 9459 phba->pci_bar0_map = pci_resource_start(pdev, 1); 9460 bar0map_len = pci_resource_len(pdev, 1); 9461 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 9462 dev_printk(KERN_ERR, &pdev->dev, 9463 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 9464 goto out; 9465 } 9466 phba->sli4_hba.conf_regs_memmap_p = 9467 ioremap(phba->pci_bar0_map, bar0map_len); 9468 if (!phba->sli4_hba.conf_regs_memmap_p) { 9469 dev_printk(KERN_ERR, &pdev->dev, 9470 "ioremap failed for SLI4 PCI config " 9471 "registers.\n"); 9472 goto out; 9473 } 9474 lpfc_sli4_bar0_register_memmap(phba, if_type); 9475 } 9476 9477 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 9478 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 9479 /* 9480 * Map SLI4 if type 0 HBA Control Register base to a 9481 * kernel virtual address and setup the registers. 9482 */ 9483 phba->pci_bar1_map = pci_resource_start(pdev, 9484 PCI_64BIT_BAR2); 9485 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 9486 phba->sli4_hba.ctrl_regs_memmap_p = 9487 ioremap(phba->pci_bar1_map, 9488 bar1map_len); 9489 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 9490 dev_err(&pdev->dev, 9491 "ioremap failed for SLI4 HBA " 9492 "control registers.\n"); 9493 error = -ENOMEM; 9494 goto out_iounmap_conf; 9495 } 9496 phba->pci_bar2_memmap_p = 9497 phba->sli4_hba.ctrl_regs_memmap_p; 9498 lpfc_sli4_bar1_register_memmap(phba); 9499 } else { 9500 error = -ENOMEM; 9501 goto out_iounmap_conf; 9502 } 9503 } 9504 9505 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 9506 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 9507 /* 9508 * Map SLI4 if type 0 HBA Doorbell Register base to 9509 * a kernel virtual address and setup the registers. 9510 */ 9511 phba->pci_bar2_map = pci_resource_start(pdev, 9512 PCI_64BIT_BAR4); 9513 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 9514 phba->sli4_hba.drbl_regs_memmap_p = 9515 ioremap(phba->pci_bar2_map, 9516 bar2map_len); 9517 if (!phba->sli4_hba.drbl_regs_memmap_p) { 9518 dev_err(&pdev->dev, 9519 "ioremap failed for SLI4 HBA" 9520 " doorbell registers.\n"); 9521 error = -ENOMEM; 9522 goto out_iounmap_ctrl; 9523 } 9524 phba->pci_bar4_memmap_p = 9525 phba->sli4_hba.drbl_regs_memmap_p; 9526 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 9527 if (error) 9528 goto out_iounmap_all; 9529 } else { 9530 error = -ENOMEM; 9531 goto out_iounmap_all; 9532 } 9533 } 9534 9535 return 0; 9536 9537 out_iounmap_all: 9538 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9539 out_iounmap_ctrl: 9540 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 9541 out_iounmap_conf: 9542 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9543 out: 9544 return error; 9545 } 9546 9547 /** 9548 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 9549 * @phba: pointer to lpfc hba data structure. 9550 * 9551 * This routine is invoked to unset the PCI device memory space for device 9552 * with SLI-4 interface spec. 9553 **/ 9554 static void 9555 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 9556 { 9557 uint32_t if_type; 9558 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9559 9560 switch (if_type) { 9561 case LPFC_SLI_INTF_IF_TYPE_0: 9562 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 9563 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 9564 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9565 break; 9566 case LPFC_SLI_INTF_IF_TYPE_2: 9567 iounmap(phba->sli4_hba.conf_regs_memmap_p); 9568 break; 9569 case LPFC_SLI_INTF_IF_TYPE_1: 9570 default: 9571 dev_printk(KERN_ERR, &phba->pcidev->dev, 9572 "FATAL - unsupported SLI4 interface type - %d\n", 9573 if_type); 9574 break; 9575 } 9576 } 9577 9578 /** 9579 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 9580 * @phba: pointer to lpfc hba data structure. 9581 * 9582 * This routine is invoked to enable the MSI-X interrupt vectors to device 9583 * with SLI-3 interface specs. 9584 * 9585 * Return codes 9586 * 0 - successful 9587 * other values - error 9588 **/ 9589 static int 9590 lpfc_sli_enable_msix(struct lpfc_hba *phba) 9591 { 9592 int rc; 9593 LPFC_MBOXQ_t *pmb; 9594 9595 /* Set up MSI-X multi-message vectors */ 9596 rc = pci_alloc_irq_vectors(phba->pcidev, 9597 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 9598 if (rc < 0) { 9599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9600 "0420 PCI enable MSI-X failed (%d)\n", rc); 9601 goto vec_fail_out; 9602 } 9603 9604 /* 9605 * Assign MSI-X vectors to interrupt handlers 9606 */ 9607 9608 /* vector-0 is associated to slow-path handler */ 9609 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 9610 &lpfc_sli_sp_intr_handler, 0, 9611 LPFC_SP_DRIVER_HANDLER_NAME, phba); 9612 if (rc) { 9613 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9614 "0421 MSI-X slow-path request_irq failed " 9615 "(%d)\n", rc); 9616 goto msi_fail_out; 9617 } 9618 9619 /* vector-1 is associated to fast-path handler */ 9620 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 9621 &lpfc_sli_fp_intr_handler, 0, 9622 LPFC_FP_DRIVER_HANDLER_NAME, phba); 9623 9624 if (rc) { 9625 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9626 "0429 MSI-X fast-path request_irq failed " 9627 "(%d)\n", rc); 9628 goto irq_fail_out; 9629 } 9630 9631 /* 9632 * Configure HBA MSI-X attention conditions to messages 9633 */ 9634 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9635 9636 if (!pmb) { 9637 rc = -ENOMEM; 9638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9639 "0474 Unable to allocate memory for issuing " 9640 "MBOX_CONFIG_MSI command\n"); 9641 goto mem_fail_out; 9642 } 9643 rc = lpfc_config_msi(phba, pmb); 9644 if (rc) 9645 goto mbx_fail_out; 9646 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9647 if (rc != MBX_SUCCESS) { 9648 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 9649 "0351 Config MSI mailbox command failed, " 9650 "mbxCmd x%x, mbxStatus x%x\n", 9651 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 9652 goto mbx_fail_out; 9653 } 9654 9655 /* Free memory allocated for mailbox command */ 9656 mempool_free(pmb, phba->mbox_mem_pool); 9657 return rc; 9658 9659 mbx_fail_out: 9660 /* Free memory allocated for mailbox command */ 9661 mempool_free(pmb, phba->mbox_mem_pool); 9662 9663 mem_fail_out: 9664 /* free the irq already requested */ 9665 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 9666 9667 irq_fail_out: 9668 /* free the irq already requested */ 9669 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 9670 9671 msi_fail_out: 9672 /* Unconfigure MSI-X capability structure */ 9673 pci_free_irq_vectors(phba->pcidev); 9674 9675 vec_fail_out: 9676 return rc; 9677 } 9678 9679 /** 9680 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 9681 * @phba: pointer to lpfc hba data structure. 9682 * 9683 * This routine is invoked to enable the MSI interrupt mode to device with 9684 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 9685 * enable the MSI vector. The device driver is responsible for calling the 9686 * request_irq() to register MSI vector with a interrupt the handler, which 9687 * is done in this function. 9688 * 9689 * Return codes 9690 * 0 - successful 9691 * other values - error 9692 */ 9693 static int 9694 lpfc_sli_enable_msi(struct lpfc_hba *phba) 9695 { 9696 int rc; 9697 9698 rc = pci_enable_msi(phba->pcidev); 9699 if (!rc) 9700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9701 "0462 PCI enable MSI mode success.\n"); 9702 else { 9703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9704 "0471 PCI enable MSI mode failed (%d)\n", rc); 9705 return rc; 9706 } 9707 9708 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 9709 0, LPFC_DRIVER_NAME, phba); 9710 if (rc) { 9711 pci_disable_msi(phba->pcidev); 9712 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9713 "0478 MSI request_irq failed (%d)\n", rc); 9714 } 9715 return rc; 9716 } 9717 9718 /** 9719 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 9720 * @phba: pointer to lpfc hba data structure. 9721 * 9722 * This routine is invoked to enable device interrupt and associate driver's 9723 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 9724 * spec. Depends on the interrupt mode configured to the driver, the driver 9725 * will try to fallback from the configured interrupt mode to an interrupt 9726 * mode which is supported by the platform, kernel, and device in the order 9727 * of: 9728 * MSI-X -> MSI -> IRQ. 9729 * 9730 * Return codes 9731 * 0 - successful 9732 * other values - error 9733 **/ 9734 static uint32_t 9735 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9736 { 9737 uint32_t intr_mode = LPFC_INTR_ERROR; 9738 int retval; 9739 9740 if (cfg_mode == 2) { 9741 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 9742 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 9743 if (!retval) { 9744 /* Now, try to enable MSI-X interrupt mode */ 9745 retval = lpfc_sli_enable_msix(phba); 9746 if (!retval) { 9747 /* Indicate initialization to MSI-X mode */ 9748 phba->intr_type = MSIX; 9749 intr_mode = 2; 9750 } 9751 } 9752 } 9753 9754 /* Fallback to MSI if MSI-X initialization failed */ 9755 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9756 retval = lpfc_sli_enable_msi(phba); 9757 if (!retval) { 9758 /* Indicate initialization to MSI mode */ 9759 phba->intr_type = MSI; 9760 intr_mode = 1; 9761 } 9762 } 9763 9764 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9765 if (phba->intr_type == NONE) { 9766 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 9767 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9768 if (!retval) { 9769 /* Indicate initialization to INTx mode */ 9770 phba->intr_type = INTx; 9771 intr_mode = 0; 9772 } 9773 } 9774 return intr_mode; 9775 } 9776 9777 /** 9778 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 9779 * @phba: pointer to lpfc hba data structure. 9780 * 9781 * This routine is invoked to disable device interrupt and disassociate the 9782 * driver's interrupt handler(s) from interrupt vector(s) to device with 9783 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 9784 * release the interrupt vector(s) for the message signaled interrupt. 9785 **/ 9786 static void 9787 lpfc_sli_disable_intr(struct lpfc_hba *phba) 9788 { 9789 int nr_irqs, i; 9790 9791 if (phba->intr_type == MSIX) 9792 nr_irqs = LPFC_MSIX_VECTORS; 9793 else 9794 nr_irqs = 1; 9795 9796 for (i = 0; i < nr_irqs; i++) 9797 free_irq(pci_irq_vector(phba->pcidev, i), phba); 9798 pci_free_irq_vectors(phba->pcidev); 9799 9800 /* Reset interrupt management states */ 9801 phba->intr_type = NONE; 9802 phba->sli.slistat.sli_intr = 0; 9803 } 9804 9805 /** 9806 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 9807 * @phba: pointer to lpfc hba data structure. 9808 * @vectors: number of msix vectors allocated. 9809 * 9810 * The routine will figure out the CPU affinity assignment for every 9811 * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated 9812 * with a pointer to the CPU mask that defines ALL the CPUs this vector 9813 * can be associated with. If the vector can be unquely associated with 9814 * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. 9815 * In addition, the CPU to IO channel mapping will be calculated 9816 * and the phba->sli4_hba.cpu_map array will reflect this. 9817 */ 9818 static void 9819 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 9820 { 9821 struct lpfc_vector_map_info *cpup; 9822 int index = 0; 9823 int vec = 0; 9824 int cpu; 9825 #ifdef CONFIG_X86 9826 struct cpuinfo_x86 *cpuinfo; 9827 #endif 9828 9829 /* Init cpu_map array */ 9830 memset(phba->sli4_hba.cpu_map, 0xff, 9831 (sizeof(struct lpfc_vector_map_info) * 9832 phba->sli4_hba.num_present_cpu)); 9833 9834 /* Update CPU map with physical id and core id of each CPU */ 9835 cpup = phba->sli4_hba.cpu_map; 9836 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 9837 #ifdef CONFIG_X86 9838 cpuinfo = &cpu_data(cpu); 9839 cpup->phys_id = cpuinfo->phys_proc_id; 9840 cpup->core_id = cpuinfo->cpu_core_id; 9841 #else 9842 /* No distinction between CPUs for other platforms */ 9843 cpup->phys_id = 0; 9844 cpup->core_id = 0; 9845 #endif 9846 cpup->channel_id = index; /* For now round robin */ 9847 cpup->irq = pci_irq_vector(phba->pcidev, vec); 9848 vec++; 9849 if (vec >= vectors) 9850 vec = 0; 9851 index++; 9852 if (index >= phba->cfg_fcp_io_channel) 9853 index = 0; 9854 cpup++; 9855 } 9856 } 9857 9858 9859 /** 9860 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 9861 * @phba: pointer to lpfc hba data structure. 9862 * 9863 * This routine is invoked to enable the MSI-X interrupt vectors to device 9864 * with SLI-4 interface spec. 9865 * 9866 * Return codes 9867 * 0 - successful 9868 * other values - error 9869 **/ 9870 static int 9871 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 9872 { 9873 int vectors, rc, index; 9874 char *name; 9875 9876 /* Set up MSI-X multi-message vectors */ 9877 vectors = phba->io_channel_irqs; 9878 if (phba->cfg_fof) 9879 vectors++; 9880 9881 rc = pci_alloc_irq_vectors(phba->pcidev, 9882 (phba->nvmet_support) ? 1 : 2, 9883 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 9884 if (rc < 0) { 9885 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9886 "0484 PCI enable MSI-X failed (%d)\n", rc); 9887 goto vec_fail_out; 9888 } 9889 vectors = rc; 9890 9891 /* Assign MSI-X vectors to interrupt handlers */ 9892 for (index = 0; index < vectors; index++) { 9893 name = phba->sli4_hba.hba_eq_hdl[index].handler_name; 9894 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 9895 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 9896 LPFC_DRIVER_HANDLER_NAME"%d", index); 9897 9898 phba->sli4_hba.hba_eq_hdl[index].idx = index; 9899 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 9900 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); 9901 if (phba->cfg_fof && (index == (vectors - 1))) 9902 rc = request_irq(pci_irq_vector(phba->pcidev, index), 9903 &lpfc_sli4_fof_intr_handler, 0, 9904 name, 9905 &phba->sli4_hba.hba_eq_hdl[index]); 9906 else 9907 rc = request_irq(pci_irq_vector(phba->pcidev, index), 9908 &lpfc_sli4_hba_intr_handler, 0, 9909 name, 9910 &phba->sli4_hba.hba_eq_hdl[index]); 9911 if (rc) { 9912 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9913 "0486 MSI-X fast-path (%d) " 9914 "request_irq failed (%d)\n", index, rc); 9915 goto cfg_fail_out; 9916 } 9917 } 9918 9919 if (phba->cfg_fof) 9920 vectors--; 9921 9922 if (vectors != phba->io_channel_irqs) { 9923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9924 "3238 Reducing IO channels to match number of " 9925 "MSI-X vectors, requested %d got %d\n", 9926 phba->io_channel_irqs, vectors); 9927 if (phba->cfg_fcp_io_channel > vectors) 9928 phba->cfg_fcp_io_channel = vectors; 9929 if (phba->cfg_nvme_io_channel > vectors) 9930 phba->cfg_nvme_io_channel = vectors; 9931 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) 9932 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 9933 else 9934 phba->io_channel_irqs = phba->cfg_nvme_io_channel; 9935 } 9936 lpfc_cpu_affinity_check(phba, vectors); 9937 9938 return rc; 9939 9940 cfg_fail_out: 9941 /* free the irq already requested */ 9942 for (--index; index >= 0; index--) 9943 free_irq(pci_irq_vector(phba->pcidev, index), 9944 &phba->sli4_hba.hba_eq_hdl[index]); 9945 9946 /* Unconfigure MSI-X capability structure */ 9947 pci_free_irq_vectors(phba->pcidev); 9948 9949 vec_fail_out: 9950 return rc; 9951 } 9952 9953 /** 9954 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 9955 * @phba: pointer to lpfc hba data structure. 9956 * 9957 * This routine is invoked to enable the MSI interrupt mode to device with 9958 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 9959 * to enable the MSI vector. The device driver is responsible for calling 9960 * the request_irq() to register MSI vector with a interrupt the handler, 9961 * which is done in this function. 9962 * 9963 * Return codes 9964 * 0 - successful 9965 * other values - error 9966 **/ 9967 static int 9968 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 9969 { 9970 int rc, index; 9971 9972 rc = pci_enable_msi(phba->pcidev); 9973 if (!rc) 9974 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9975 "0487 PCI enable MSI mode success.\n"); 9976 else { 9977 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9978 "0488 PCI enable MSI mode failed (%d)\n", rc); 9979 return rc; 9980 } 9981 9982 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9983 0, LPFC_DRIVER_NAME, phba); 9984 if (rc) { 9985 pci_disable_msi(phba->pcidev); 9986 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9987 "0490 MSI request_irq failed (%d)\n", rc); 9988 return rc; 9989 } 9990 9991 for (index = 0; index < phba->io_channel_irqs; index++) { 9992 phba->sli4_hba.hba_eq_hdl[index].idx = index; 9993 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 9994 } 9995 9996 if (phba->cfg_fof) { 9997 phba->sli4_hba.hba_eq_hdl[index].idx = index; 9998 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 9999 } 10000 return 0; 10001 } 10002 10003 /** 10004 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 10005 * @phba: pointer to lpfc hba data structure. 10006 * 10007 * This routine is invoked to enable device interrupt and associate driver's 10008 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 10009 * interface spec. Depends on the interrupt mode configured to the driver, 10010 * the driver will try to fallback from the configured interrupt mode to an 10011 * interrupt mode which is supported by the platform, kernel, and device in 10012 * the order of: 10013 * MSI-X -> MSI -> IRQ. 10014 * 10015 * Return codes 10016 * 0 - successful 10017 * other values - error 10018 **/ 10019 static uint32_t 10020 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10021 { 10022 uint32_t intr_mode = LPFC_INTR_ERROR; 10023 int retval, idx; 10024 10025 if (cfg_mode == 2) { 10026 /* Preparation before conf_msi mbox cmd */ 10027 retval = 0; 10028 if (!retval) { 10029 /* Now, try to enable MSI-X interrupt mode */ 10030 retval = lpfc_sli4_enable_msix(phba); 10031 if (!retval) { 10032 /* Indicate initialization to MSI-X mode */ 10033 phba->intr_type = MSIX; 10034 intr_mode = 2; 10035 } 10036 } 10037 } 10038 10039 /* Fallback to MSI if MSI-X initialization failed */ 10040 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10041 retval = lpfc_sli4_enable_msi(phba); 10042 if (!retval) { 10043 /* Indicate initialization to MSI mode */ 10044 phba->intr_type = MSI; 10045 intr_mode = 1; 10046 } 10047 } 10048 10049 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10050 if (phba->intr_type == NONE) { 10051 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10052 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10053 if (!retval) { 10054 struct lpfc_hba_eq_hdl *eqhdl; 10055 10056 /* Indicate initialization to INTx mode */ 10057 phba->intr_type = INTx; 10058 intr_mode = 0; 10059 10060 for (idx = 0; idx < phba->io_channel_irqs; idx++) { 10061 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 10062 eqhdl->idx = idx; 10063 eqhdl->phba = phba; 10064 atomic_set(&eqhdl->hba_eq_in_use, 1); 10065 } 10066 if (phba->cfg_fof) { 10067 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 10068 eqhdl->idx = idx; 10069 eqhdl->phba = phba; 10070 atomic_set(&eqhdl->hba_eq_in_use, 1); 10071 } 10072 } 10073 } 10074 return intr_mode; 10075 } 10076 10077 /** 10078 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 10079 * @phba: pointer to lpfc hba data structure. 10080 * 10081 * This routine is invoked to disable device interrupt and disassociate 10082 * the driver's interrupt handler(s) from interrupt vector(s) to device 10083 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 10084 * will release the interrupt vector(s) for the message signaled interrupt. 10085 **/ 10086 static void 10087 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 10088 { 10089 /* Disable the currently initialized interrupt mode */ 10090 if (phba->intr_type == MSIX) { 10091 int index; 10092 10093 /* Free up MSI-X multi-message vectors */ 10094 for (index = 0; index < phba->io_channel_irqs; index++) 10095 free_irq(pci_irq_vector(phba->pcidev, index), 10096 &phba->sli4_hba.hba_eq_hdl[index]); 10097 10098 if (phba->cfg_fof) 10099 free_irq(pci_irq_vector(phba->pcidev, index), 10100 &phba->sli4_hba.hba_eq_hdl[index]); 10101 } else { 10102 free_irq(phba->pcidev->irq, phba); 10103 } 10104 10105 pci_free_irq_vectors(phba->pcidev); 10106 10107 /* Reset interrupt management states */ 10108 phba->intr_type = NONE; 10109 phba->sli.slistat.sli_intr = 0; 10110 } 10111 10112 /** 10113 * lpfc_unset_hba - Unset SLI3 hba device initialization 10114 * @phba: pointer to lpfc hba data structure. 10115 * 10116 * This routine is invoked to unset the HBA device initialization steps to 10117 * a device with SLI-3 interface spec. 10118 **/ 10119 static void 10120 lpfc_unset_hba(struct lpfc_hba *phba) 10121 { 10122 struct lpfc_vport *vport = phba->pport; 10123 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10124 10125 spin_lock_irq(shost->host_lock); 10126 vport->load_flag |= FC_UNLOADING; 10127 spin_unlock_irq(shost->host_lock); 10128 10129 kfree(phba->vpi_bmask); 10130 kfree(phba->vpi_ids); 10131 10132 lpfc_stop_hba_timers(phba); 10133 10134 phba->pport->work_port_events = 0; 10135 10136 lpfc_sli_hba_down(phba); 10137 10138 lpfc_sli_brdrestart(phba); 10139 10140 lpfc_sli_disable_intr(phba); 10141 10142 return; 10143 } 10144 10145 /** 10146 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 10147 * @phba: Pointer to HBA context object. 10148 * 10149 * This function is called in the SLI4 code path to wait for completion 10150 * of device's XRIs exchange busy. It will check the XRI exchange busy 10151 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 10152 * that, it will check the XRI exchange busy on outstanding FCP and ELS 10153 * I/Os every 30 seconds, log error message, and wait forever. Only when 10154 * all XRI exchange busy complete, the driver unload shall proceed with 10155 * invoking the function reset ioctl mailbox command to the CNA and the 10156 * the rest of the driver unload resource release. 10157 **/ 10158 static void 10159 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 10160 { 10161 int wait_time = 0; 10162 int nvme_xri_cmpl = 1; 10163 int nvmet_xri_cmpl = 1; 10164 int fcp_xri_cmpl = 1; 10165 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 10166 10167 /* Driver just aborted IOs during the hba_unset process. Pause 10168 * here to give the HBA time to complete the IO and get entries 10169 * into the abts lists. 10170 */ 10171 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 10172 10173 /* Wait for NVME pending IO to flush back to transport. */ 10174 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 10175 lpfc_nvme_wait_for_io_drain(phba); 10176 10177 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 10178 fcp_xri_cmpl = 10179 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 10180 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10181 nvme_xri_cmpl = 10182 list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 10183 nvmet_xri_cmpl = 10184 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 10185 } 10186 10187 while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || 10188 !nvmet_xri_cmpl) { 10189 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 10190 if (!nvme_xri_cmpl) 10191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10192 "6100 NVME XRI exchange busy " 10193 "wait time: %d seconds.\n", 10194 wait_time/1000); 10195 if (!fcp_xri_cmpl) 10196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10197 "2877 FCP XRI exchange busy " 10198 "wait time: %d seconds.\n", 10199 wait_time/1000); 10200 if (!els_xri_cmpl) 10201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10202 "2878 ELS XRI exchange busy " 10203 "wait time: %d seconds.\n", 10204 wait_time/1000); 10205 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 10206 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 10207 } else { 10208 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 10209 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 10210 } 10211 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10212 nvme_xri_cmpl = list_empty( 10213 &phba->sli4_hba.lpfc_abts_nvme_buf_list); 10214 nvmet_xri_cmpl = list_empty( 10215 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 10216 } 10217 10218 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 10219 fcp_xri_cmpl = list_empty( 10220 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 10221 10222 els_xri_cmpl = 10223 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 10224 10225 } 10226 } 10227 10228 /** 10229 * lpfc_sli4_hba_unset - Unset the fcoe hba 10230 * @phba: Pointer to HBA context object. 10231 * 10232 * This function is called in the SLI4 code path to reset the HBA's FCoE 10233 * function. The caller is not required to hold any lock. This routine 10234 * issues PCI function reset mailbox command to reset the FCoE function. 10235 * At the end of the function, it calls lpfc_hba_down_post function to 10236 * free any pending commands. 10237 **/ 10238 static void 10239 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 10240 { 10241 int wait_cnt = 0; 10242 LPFC_MBOXQ_t *mboxq; 10243 struct pci_dev *pdev = phba->pcidev; 10244 10245 lpfc_stop_hba_timers(phba); 10246 phba->sli4_hba.intr_enable = 0; 10247 10248 /* 10249 * Gracefully wait out the potential current outstanding asynchronous 10250 * mailbox command. 10251 */ 10252 10253 /* First, block any pending async mailbox command from posted */ 10254 spin_lock_irq(&phba->hbalock); 10255 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10256 spin_unlock_irq(&phba->hbalock); 10257 /* Now, trying to wait it out if we can */ 10258 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10259 msleep(10); 10260 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 10261 break; 10262 } 10263 /* Forcefully release the outstanding mailbox command if timed out */ 10264 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 10265 spin_lock_irq(&phba->hbalock); 10266 mboxq = phba->sli.mbox_active; 10267 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 10268 __lpfc_mbox_cmpl_put(phba, mboxq); 10269 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10270 phba->sli.mbox_active = NULL; 10271 spin_unlock_irq(&phba->hbalock); 10272 } 10273 10274 /* Abort all iocbs associated with the hba */ 10275 lpfc_sli_hba_iocb_abort(phba); 10276 10277 /* Wait for completion of device XRI exchange busy */ 10278 lpfc_sli4_xri_exchange_busy_wait(phba); 10279 10280 /* Disable PCI subsystem interrupt */ 10281 lpfc_sli4_disable_intr(phba); 10282 10283 /* Disable SR-IOV if enabled */ 10284 if (phba->cfg_sriov_nr_virtfn) 10285 pci_disable_sriov(pdev); 10286 10287 /* Stop kthread signal shall trigger work_done one more time */ 10288 kthread_stop(phba->worker_thread); 10289 10290 /* Unset the queues shared with the hardware then release all 10291 * allocated resources. 10292 */ 10293 lpfc_sli4_queue_unset(phba); 10294 lpfc_sli4_queue_destroy(phba); 10295 10296 /* Reset SLI4 HBA FCoE function */ 10297 lpfc_pci_function_reset(phba); 10298 10299 /* Stop the SLI4 device port */ 10300 phba->pport->work_port_events = 0; 10301 } 10302 10303 /** 10304 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 10305 * @phba: Pointer to HBA context object. 10306 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 10307 * 10308 * This function is called in the SLI4 code path to read the port's 10309 * sli4 capabilities. 10310 * 10311 * This function may be be called from any context that can block-wait 10312 * for the completion. The expectation is that this routine is called 10313 * typically from probe_one or from the online routine. 10314 **/ 10315 int 10316 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 10317 { 10318 int rc; 10319 struct lpfc_mqe *mqe; 10320 struct lpfc_pc_sli4_params *sli4_params; 10321 uint32_t mbox_tmo; 10322 10323 rc = 0; 10324 mqe = &mboxq->u.mqe; 10325 10326 /* Read the port's SLI4 Parameters port capabilities */ 10327 lpfc_pc_sli4_params(mboxq); 10328 if (!phba->sli4_hba.intr_enable) 10329 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10330 else { 10331 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 10332 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 10333 } 10334 10335 if (unlikely(rc)) 10336 return 1; 10337 10338 sli4_params = &phba->sli4_hba.pc_sli4_params; 10339 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 10340 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 10341 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 10342 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 10343 &mqe->un.sli4_params); 10344 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 10345 &mqe->un.sli4_params); 10346 sli4_params->proto_types = mqe->un.sli4_params.word3; 10347 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 10348 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 10349 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 10350 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 10351 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 10352 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 10353 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 10354 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 10355 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 10356 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 10357 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 10358 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 10359 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 10360 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 10361 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 10362 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 10363 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 10364 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 10365 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 10366 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 10367 10368 /* Make sure that sge_supp_len can be handled by the driver */ 10369 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 10370 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 10371 10372 return rc; 10373 } 10374 10375 /** 10376 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 10377 * @phba: Pointer to HBA context object. 10378 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 10379 * 10380 * This function is called in the SLI4 code path to read the port's 10381 * sli4 capabilities. 10382 * 10383 * This function may be be called from any context that can block-wait 10384 * for the completion. The expectation is that this routine is called 10385 * typically from probe_one or from the online routine. 10386 **/ 10387 int 10388 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 10389 { 10390 int rc; 10391 struct lpfc_mqe *mqe = &mboxq->u.mqe; 10392 struct lpfc_pc_sli4_params *sli4_params; 10393 uint32_t mbox_tmo; 10394 int length; 10395 struct lpfc_sli4_parameters *mbx_sli4_parameters; 10396 10397 /* 10398 * By default, the driver assumes the SLI4 port requires RPI 10399 * header postings. The SLI4_PARAM response will correct this 10400 * assumption. 10401 */ 10402 phba->sli4_hba.rpi_hdrs_in_use = 1; 10403 10404 /* Read the port's SLI4 Config Parameters */ 10405 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 10406 sizeof(struct lpfc_sli4_cfg_mhdr)); 10407 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10408 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 10409 length, LPFC_SLI4_MBX_EMBED); 10410 if (!phba->sli4_hba.intr_enable) 10411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10412 else { 10413 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 10414 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 10415 } 10416 if (unlikely(rc)) 10417 return rc; 10418 sli4_params = &phba->sli4_hba.pc_sli4_params; 10419 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 10420 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 10421 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 10422 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 10423 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 10424 mbx_sli4_parameters); 10425 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 10426 mbx_sli4_parameters); 10427 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 10428 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 10429 else 10430 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 10431 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 10432 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 10433 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 10434 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 10435 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 10436 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 10437 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 10438 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 10439 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 10440 mbx_sli4_parameters); 10441 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 10442 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 10443 mbx_sli4_parameters); 10444 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 10445 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 10446 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && 10447 bf_get(cfg_xib, mbx_sli4_parameters)); 10448 10449 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || 10450 !phba->nvme_support) { 10451 phba->nvme_support = 0; 10452 phba->nvmet_support = 0; 10453 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; 10454 phba->cfg_nvme_io_channel = 0; 10455 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 10456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 10457 "6101 Disabling NVME support: " 10458 "Not supported by firmware: %d %d\n", 10459 bf_get(cfg_nvme, mbx_sli4_parameters), 10460 bf_get(cfg_xib, mbx_sli4_parameters)); 10461 10462 /* If firmware doesn't support NVME, just use SCSI support */ 10463 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 10464 return -ENODEV; 10465 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 10466 } 10467 10468 if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp) 10469 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 10470 10471 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 10472 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 10473 10474 /* Make sure that sge_supp_len can be handled by the driver */ 10475 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 10476 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 10477 10478 /* 10479 * Issue IOs with CDB embedded in WQE to minimized the number 10480 * of DMAs the firmware has to do. Setting this to 1 also forces 10481 * the driver to use 128 bytes WQEs for FCP IOs. 10482 */ 10483 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 10484 phba->fcp_embed_io = 1; 10485 else 10486 phba->fcp_embed_io = 0; 10487 10488 /* 10489 * Check if the SLI port supports MDS Diagnostics 10490 */ 10491 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 10492 phba->mds_diags_support = 1; 10493 else 10494 phba->mds_diags_support = 0; 10495 return 0; 10496 } 10497 10498 /** 10499 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 10500 * @pdev: pointer to PCI device 10501 * @pid: pointer to PCI device identifier 10502 * 10503 * This routine is to be called to attach a device with SLI-3 interface spec 10504 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 10505 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10506 * information of the device and driver to see if the driver state that it can 10507 * support this kind of device. If the match is successful, the driver core 10508 * invokes this routine. If this routine determines it can claim the HBA, it 10509 * does all the initialization that it needs to do to handle the HBA properly. 10510 * 10511 * Return code 10512 * 0 - driver can claim the device 10513 * negative value - driver can not claim the device 10514 **/ 10515 static int 10516 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 10517 { 10518 struct lpfc_hba *phba; 10519 struct lpfc_vport *vport = NULL; 10520 struct Scsi_Host *shost = NULL; 10521 int error; 10522 uint32_t cfg_mode, intr_mode; 10523 10524 /* Allocate memory for HBA structure */ 10525 phba = lpfc_hba_alloc(pdev); 10526 if (!phba) 10527 return -ENOMEM; 10528 10529 /* Perform generic PCI device enabling operation */ 10530 error = lpfc_enable_pci_dev(phba); 10531 if (error) 10532 goto out_free_phba; 10533 10534 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 10535 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 10536 if (error) 10537 goto out_disable_pci_dev; 10538 10539 /* Set up SLI-3 specific device PCI memory space */ 10540 error = lpfc_sli_pci_mem_setup(phba); 10541 if (error) { 10542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10543 "1402 Failed to set up pci memory space.\n"); 10544 goto out_disable_pci_dev; 10545 } 10546 10547 /* Set up SLI-3 specific device driver resources */ 10548 error = lpfc_sli_driver_resource_setup(phba); 10549 if (error) { 10550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10551 "1404 Failed to set up driver resource.\n"); 10552 goto out_unset_pci_mem_s3; 10553 } 10554 10555 /* Initialize and populate the iocb list per host */ 10556 10557 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 10558 if (error) { 10559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10560 "1405 Failed to initialize iocb list.\n"); 10561 goto out_unset_driver_resource_s3; 10562 } 10563 10564 /* Set up common device driver resources */ 10565 error = lpfc_setup_driver_resource_phase2(phba); 10566 if (error) { 10567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10568 "1406 Failed to set up driver resource.\n"); 10569 goto out_free_iocb_list; 10570 } 10571 10572 /* Get the default values for Model Name and Description */ 10573 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10574 10575 /* Create SCSI host to the physical port */ 10576 error = lpfc_create_shost(phba); 10577 if (error) { 10578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10579 "1407 Failed to create scsi host.\n"); 10580 goto out_unset_driver_resource; 10581 } 10582 10583 /* Configure sysfs attributes */ 10584 vport = phba->pport; 10585 error = lpfc_alloc_sysfs_attr(vport); 10586 if (error) { 10587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10588 "1476 Failed to allocate sysfs attr\n"); 10589 goto out_destroy_shost; 10590 } 10591 10592 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10593 /* Now, trying to enable interrupt and bring up the device */ 10594 cfg_mode = phba->cfg_use_msi; 10595 while (true) { 10596 /* Put device to a known state before enabling interrupt */ 10597 lpfc_stop_port(phba); 10598 /* Configure and enable interrupt */ 10599 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 10600 if (intr_mode == LPFC_INTR_ERROR) { 10601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10602 "0431 Failed to enable interrupt.\n"); 10603 error = -ENODEV; 10604 goto out_free_sysfs_attr; 10605 } 10606 /* SLI-3 HBA setup */ 10607 if (lpfc_sli_hba_setup(phba)) { 10608 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10609 "1477 Failed to set up hba\n"); 10610 error = -ENODEV; 10611 goto out_remove_device; 10612 } 10613 10614 /* Wait 50ms for the interrupts of previous mailbox commands */ 10615 msleep(50); 10616 /* Check active interrupts on message signaled interrupts */ 10617 if (intr_mode == 0 || 10618 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 10619 /* Log the current active interrupt mode */ 10620 phba->intr_mode = intr_mode; 10621 lpfc_log_intr_mode(phba, intr_mode); 10622 break; 10623 } else { 10624 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10625 "0447 Configure interrupt mode (%d) " 10626 "failed active interrupt test.\n", 10627 intr_mode); 10628 /* Disable the current interrupt mode */ 10629 lpfc_sli_disable_intr(phba); 10630 /* Try next level of interrupt mode */ 10631 cfg_mode = --intr_mode; 10632 } 10633 } 10634 10635 /* Perform post initialization setup */ 10636 lpfc_post_init_setup(phba); 10637 10638 /* Check if there are static vports to be created. */ 10639 lpfc_create_static_vport(phba); 10640 10641 return 0; 10642 10643 out_remove_device: 10644 lpfc_unset_hba(phba); 10645 out_free_sysfs_attr: 10646 lpfc_free_sysfs_attr(vport); 10647 out_destroy_shost: 10648 lpfc_destroy_shost(phba); 10649 out_unset_driver_resource: 10650 lpfc_unset_driver_resource_phase2(phba); 10651 out_free_iocb_list: 10652 lpfc_free_iocb_list(phba); 10653 out_unset_driver_resource_s3: 10654 lpfc_sli_driver_resource_unset(phba); 10655 out_unset_pci_mem_s3: 10656 lpfc_sli_pci_mem_unset(phba); 10657 out_disable_pci_dev: 10658 lpfc_disable_pci_dev(phba); 10659 if (shost) 10660 scsi_host_put(shost); 10661 out_free_phba: 10662 lpfc_hba_free(phba); 10663 return error; 10664 } 10665 10666 /** 10667 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 10668 * @pdev: pointer to PCI device 10669 * 10670 * This routine is to be called to disattach a device with SLI-3 interface 10671 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 10672 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10673 * device to be removed from the PCI subsystem properly. 10674 **/ 10675 static void 10676 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 10677 { 10678 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10679 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10680 struct lpfc_vport **vports; 10681 struct lpfc_hba *phba = vport->phba; 10682 int i; 10683 10684 spin_lock_irq(&phba->hbalock); 10685 vport->load_flag |= FC_UNLOADING; 10686 spin_unlock_irq(&phba->hbalock); 10687 10688 lpfc_free_sysfs_attr(vport); 10689 10690 /* Release all the vports against this physical port */ 10691 vports = lpfc_create_vport_work_array(phba); 10692 if (vports != NULL) 10693 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10694 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10695 continue; 10696 fc_vport_terminate(vports[i]->fc_vport); 10697 } 10698 lpfc_destroy_vport_work_array(phba, vports); 10699 10700 /* Remove FC host and then SCSI host with the physical port */ 10701 fc_remove_host(shost); 10702 scsi_remove_host(shost); 10703 10704 lpfc_cleanup(vport); 10705 10706 /* 10707 * Bring down the SLI Layer. This step disable all interrupts, 10708 * clears the rings, discards all mailbox commands, and resets 10709 * the HBA. 10710 */ 10711 10712 /* HBA interrupt will be disabled after this call */ 10713 lpfc_sli_hba_down(phba); 10714 /* Stop kthread signal shall trigger work_done one more time */ 10715 kthread_stop(phba->worker_thread); 10716 /* Final cleanup of txcmplq and reset the HBA */ 10717 lpfc_sli_brdrestart(phba); 10718 10719 kfree(phba->vpi_bmask); 10720 kfree(phba->vpi_ids); 10721 10722 lpfc_stop_hba_timers(phba); 10723 spin_lock_irq(&phba->hbalock); 10724 list_del_init(&vport->listentry); 10725 spin_unlock_irq(&phba->hbalock); 10726 10727 lpfc_debugfs_terminate(vport); 10728 10729 /* Disable SR-IOV if enabled */ 10730 if (phba->cfg_sriov_nr_virtfn) 10731 pci_disable_sriov(pdev); 10732 10733 /* Disable interrupt */ 10734 lpfc_sli_disable_intr(phba); 10735 10736 scsi_host_put(shost); 10737 10738 /* 10739 * Call scsi_free before mem_free since scsi bufs are released to their 10740 * corresponding pools here. 10741 */ 10742 lpfc_scsi_free(phba); 10743 lpfc_mem_free_all(phba); 10744 10745 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 10746 phba->hbqslimp.virt, phba->hbqslimp.phys); 10747 10748 /* Free resources associated with SLI2 interface */ 10749 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 10750 phba->slim2p.virt, phba->slim2p.phys); 10751 10752 /* unmap adapter SLIM and Control Registers */ 10753 iounmap(phba->ctrl_regs_memmap_p); 10754 iounmap(phba->slim_memmap_p); 10755 10756 lpfc_hba_free(phba); 10757 10758 pci_release_mem_regions(pdev); 10759 pci_disable_device(pdev); 10760 } 10761 10762 /** 10763 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 10764 * @pdev: pointer to PCI device 10765 * @msg: power management message 10766 * 10767 * This routine is to be called from the kernel's PCI subsystem to support 10768 * system Power Management (PM) to device with SLI-3 interface spec. When 10769 * PM invokes this method, it quiesces the device by stopping the driver's 10770 * worker thread for the device, turning off device's interrupt and DMA, 10771 * and bring the device offline. Note that as the driver implements the 10772 * minimum PM requirements to a power-aware driver's PM support for the 10773 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10774 * to the suspend() method call will be treated as SUSPEND and the driver will 10775 * fully reinitialize its device during resume() method call, the driver will 10776 * set device to PCI_D3hot state in PCI config space instead of setting it 10777 * according to the @msg provided by the PM. 10778 * 10779 * Return code 10780 * 0 - driver suspended the device 10781 * Error otherwise 10782 **/ 10783 static int 10784 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 10785 { 10786 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10787 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10788 10789 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10790 "0473 PCI device Power Management suspend.\n"); 10791 10792 /* Bring down the device */ 10793 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10794 lpfc_offline(phba); 10795 kthread_stop(phba->worker_thread); 10796 10797 /* Disable interrupt from device */ 10798 lpfc_sli_disable_intr(phba); 10799 10800 /* Save device state to PCI config space */ 10801 pci_save_state(pdev); 10802 pci_set_power_state(pdev, PCI_D3hot); 10803 10804 return 0; 10805 } 10806 10807 /** 10808 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 10809 * @pdev: pointer to PCI device 10810 * 10811 * This routine is to be called from the kernel's PCI subsystem to support 10812 * system Power Management (PM) to device with SLI-3 interface spec. When PM 10813 * invokes this method, it restores the device's PCI config space state and 10814 * fully reinitializes the device and brings it online. Note that as the 10815 * driver implements the minimum PM requirements to a power-aware driver's 10816 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 10817 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 10818 * driver will fully reinitialize its device during resume() method call, 10819 * the device will be set to PCI_D0 directly in PCI config space before 10820 * restoring the state. 10821 * 10822 * Return code 10823 * 0 - driver suspended the device 10824 * Error otherwise 10825 **/ 10826 static int 10827 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 10828 { 10829 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10830 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10831 uint32_t intr_mode; 10832 int error; 10833 10834 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10835 "0452 PCI device Power Management resume.\n"); 10836 10837 /* Restore device state from PCI config space */ 10838 pci_set_power_state(pdev, PCI_D0); 10839 pci_restore_state(pdev); 10840 10841 /* 10842 * As the new kernel behavior of pci_restore_state() API call clears 10843 * device saved_state flag, need to save the restored state again. 10844 */ 10845 pci_save_state(pdev); 10846 10847 if (pdev->is_busmaster) 10848 pci_set_master(pdev); 10849 10850 /* Startup the kernel thread for this host adapter. */ 10851 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10852 "lpfc_worker_%d", phba->brd_no); 10853 if (IS_ERR(phba->worker_thread)) { 10854 error = PTR_ERR(phba->worker_thread); 10855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10856 "0434 PM resume failed to start worker " 10857 "thread: error=x%x.\n", error); 10858 return error; 10859 } 10860 10861 /* Configure and enable interrupt */ 10862 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10863 if (intr_mode == LPFC_INTR_ERROR) { 10864 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10865 "0430 PM resume Failed to enable interrupt\n"); 10866 return -EIO; 10867 } else 10868 phba->intr_mode = intr_mode; 10869 10870 /* Restart HBA and bring it online */ 10871 lpfc_sli_brdrestart(phba); 10872 lpfc_online(phba); 10873 10874 /* Log the current active interrupt mode */ 10875 lpfc_log_intr_mode(phba, phba->intr_mode); 10876 10877 return 0; 10878 } 10879 10880 /** 10881 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 10882 * @phba: pointer to lpfc hba data structure. 10883 * 10884 * This routine is called to prepare the SLI3 device for PCI slot recover. It 10885 * aborts all the outstanding SCSI I/Os to the pci device. 10886 **/ 10887 static void 10888 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 10889 { 10890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10891 "2723 PCI channel I/O abort preparing for recovery\n"); 10892 10893 /* 10894 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10895 * and let the SCSI mid-layer to retry them to recover. 10896 */ 10897 lpfc_sli_abort_fcp_rings(phba); 10898 } 10899 10900 /** 10901 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 10902 * @phba: pointer to lpfc hba data structure. 10903 * 10904 * This routine is called to prepare the SLI3 device for PCI slot reset. It 10905 * disables the device interrupt and pci device, and aborts the internal FCP 10906 * pending I/Os. 10907 **/ 10908 static void 10909 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 10910 { 10911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10912 "2710 PCI channel disable preparing for reset\n"); 10913 10914 /* Block any management I/Os to the device */ 10915 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 10916 10917 /* Block all SCSI devices' I/Os on the host */ 10918 lpfc_scsi_dev_block(phba); 10919 10920 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10921 lpfc_sli_flush_fcp_rings(phba); 10922 10923 /* stop all timers */ 10924 lpfc_stop_hba_timers(phba); 10925 10926 /* Disable interrupt and pci device */ 10927 lpfc_sli_disable_intr(phba); 10928 pci_disable_device(phba->pcidev); 10929 } 10930 10931 /** 10932 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 10933 * @phba: pointer to lpfc hba data structure. 10934 * 10935 * This routine is called to prepare the SLI3 device for PCI slot permanently 10936 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10937 * pending I/Os. 10938 **/ 10939 static void 10940 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10941 { 10942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10943 "2711 PCI channel permanent disable for failure\n"); 10944 /* Block all SCSI devices' I/Os on the host */ 10945 lpfc_scsi_dev_block(phba); 10946 10947 /* stop all timers */ 10948 lpfc_stop_hba_timers(phba); 10949 10950 /* Clean up all driver's outstanding SCSI I/Os */ 10951 lpfc_sli_flush_fcp_rings(phba); 10952 } 10953 10954 /** 10955 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 10956 * @pdev: pointer to PCI device. 10957 * @state: the current PCI connection state. 10958 * 10959 * This routine is called from the PCI subsystem for I/O error handling to 10960 * device with SLI-3 interface spec. This function is called by the PCI 10961 * subsystem after a PCI bus error affecting this device has been detected. 10962 * When this function is invoked, it will need to stop all the I/Os and 10963 * interrupt(s) to the device. Once that is done, it will return 10964 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 10965 * as desired. 10966 * 10967 * Return codes 10968 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 10969 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10970 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10971 **/ 10972 static pci_ers_result_t 10973 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 10974 { 10975 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10976 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10977 10978 switch (state) { 10979 case pci_channel_io_normal: 10980 /* Non-fatal error, prepare for recovery */ 10981 lpfc_sli_prep_dev_for_recover(phba); 10982 return PCI_ERS_RESULT_CAN_RECOVER; 10983 case pci_channel_io_frozen: 10984 /* Fatal error, prepare for slot reset */ 10985 lpfc_sli_prep_dev_for_reset(phba); 10986 return PCI_ERS_RESULT_NEED_RESET; 10987 case pci_channel_io_perm_failure: 10988 /* Permanent failure, prepare for device down */ 10989 lpfc_sli_prep_dev_for_perm_failure(phba); 10990 return PCI_ERS_RESULT_DISCONNECT; 10991 default: 10992 /* Unknown state, prepare and request slot reset */ 10993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10994 "0472 Unknown PCI error state: x%x\n", state); 10995 lpfc_sli_prep_dev_for_reset(phba); 10996 return PCI_ERS_RESULT_NEED_RESET; 10997 } 10998 } 10999 11000 /** 11001 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 11002 * @pdev: pointer to PCI device. 11003 * 11004 * This routine is called from the PCI subsystem for error handling to 11005 * device with SLI-3 interface spec. This is called after PCI bus has been 11006 * reset to restart the PCI card from scratch, as if from a cold-boot. 11007 * During the PCI subsystem error recovery, after driver returns 11008 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 11009 * recovery and then call this routine before calling the .resume method 11010 * to recover the device. This function will initialize the HBA device, 11011 * enable the interrupt, but it will just put the HBA to offline state 11012 * without passing any I/O traffic. 11013 * 11014 * Return codes 11015 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11016 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11017 */ 11018 static pci_ers_result_t 11019 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 11020 { 11021 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11022 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11023 struct lpfc_sli *psli = &phba->sli; 11024 uint32_t intr_mode; 11025 11026 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 11027 if (pci_enable_device_mem(pdev)) { 11028 printk(KERN_ERR "lpfc: Cannot re-enable " 11029 "PCI device after reset.\n"); 11030 return PCI_ERS_RESULT_DISCONNECT; 11031 } 11032 11033 pci_restore_state(pdev); 11034 11035 /* 11036 * As the new kernel behavior of pci_restore_state() API call clears 11037 * device saved_state flag, need to save the restored state again. 11038 */ 11039 pci_save_state(pdev); 11040 11041 if (pdev->is_busmaster) 11042 pci_set_master(pdev); 11043 11044 spin_lock_irq(&phba->hbalock); 11045 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 11046 spin_unlock_irq(&phba->hbalock); 11047 11048 /* Configure and enable interrupt */ 11049 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11050 if (intr_mode == LPFC_INTR_ERROR) { 11051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11052 "0427 Cannot re-enable interrupt after " 11053 "slot reset.\n"); 11054 return PCI_ERS_RESULT_DISCONNECT; 11055 } else 11056 phba->intr_mode = intr_mode; 11057 11058 /* Take device offline, it will perform cleanup */ 11059 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11060 lpfc_offline(phba); 11061 lpfc_sli_brdrestart(phba); 11062 11063 /* Log the current active interrupt mode */ 11064 lpfc_log_intr_mode(phba, phba->intr_mode); 11065 11066 return PCI_ERS_RESULT_RECOVERED; 11067 } 11068 11069 /** 11070 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 11071 * @pdev: pointer to PCI device 11072 * 11073 * This routine is called from the PCI subsystem for error handling to device 11074 * with SLI-3 interface spec. It is called when kernel error recovery tells 11075 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 11076 * error recovery. After this call, traffic can start to flow from this device 11077 * again. 11078 */ 11079 static void 11080 lpfc_io_resume_s3(struct pci_dev *pdev) 11081 { 11082 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11083 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11084 11085 /* Bring device online, it will be no-op for non-fatal error resume */ 11086 lpfc_online(phba); 11087 11088 /* Clean up Advanced Error Reporting (AER) if needed */ 11089 if (phba->hba_flag & HBA_AER_ENABLED) 11090 pci_cleanup_aer_uncorrect_error_status(pdev); 11091 } 11092 11093 /** 11094 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 11095 * @phba: pointer to lpfc hba data structure. 11096 * 11097 * returns the number of ELS/CT IOCBs to reserve 11098 **/ 11099 int 11100 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 11101 { 11102 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 11103 11104 if (phba->sli_rev == LPFC_SLI_REV4) { 11105 if (max_xri <= 100) 11106 return 10; 11107 else if (max_xri <= 256) 11108 return 25; 11109 else if (max_xri <= 512) 11110 return 50; 11111 else if (max_xri <= 1024) 11112 return 100; 11113 else if (max_xri <= 1536) 11114 return 150; 11115 else if (max_xri <= 2048) 11116 return 200; 11117 else 11118 return 250; 11119 } else 11120 return 0; 11121 } 11122 11123 /** 11124 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 11125 * @phba: pointer to lpfc hba data structure. 11126 * 11127 * returns the number of ELS/CT + NVMET IOCBs to reserve 11128 **/ 11129 int 11130 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 11131 { 11132 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 11133 11134 if (phba->nvmet_support) 11135 max_xri += LPFC_NVMET_BUF_POST; 11136 return max_xri; 11137 } 11138 11139 11140 /** 11141 * lpfc_write_firmware - attempt to write a firmware image to the port 11142 * @fw: pointer to firmware image returned from request_firmware. 11143 * @phba: pointer to lpfc hba data structure. 11144 * 11145 **/ 11146 static void 11147 lpfc_write_firmware(const struct firmware *fw, void *context) 11148 { 11149 struct lpfc_hba *phba = (struct lpfc_hba *)context; 11150 char fwrev[FW_REV_STR_SIZE]; 11151 struct lpfc_grp_hdr *image; 11152 struct list_head dma_buffer_list; 11153 int i, rc = 0; 11154 struct lpfc_dmabuf *dmabuf, *next; 11155 uint32_t offset = 0, temp_offset = 0; 11156 uint32_t magic_number, ftype, fid, fsize; 11157 11158 /* It can be null in no-wait mode, sanity check */ 11159 if (!fw) { 11160 rc = -ENXIO; 11161 goto out; 11162 } 11163 image = (struct lpfc_grp_hdr *)fw->data; 11164 11165 magic_number = be32_to_cpu(image->magic_number); 11166 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 11167 fid = bf_get_be32(lpfc_grp_hdr_id, image), 11168 fsize = be32_to_cpu(image->size); 11169 11170 INIT_LIST_HEAD(&dma_buffer_list); 11171 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 && 11172 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) || 11173 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) { 11174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11175 "3022 Invalid FW image found. " 11176 "Magic:%x Type:%x ID:%x Size %d %zd\n", 11177 magic_number, ftype, fid, fsize, fw->size); 11178 rc = -EINVAL; 11179 goto release_out; 11180 } 11181 lpfc_decode_firmware_rev(phba, fwrev, 1); 11182 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 11183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11184 "3023 Updating Firmware, Current Version:%s " 11185 "New Version:%s\n", 11186 fwrev, image->revision); 11187 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 11188 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 11189 GFP_KERNEL); 11190 if (!dmabuf) { 11191 rc = -ENOMEM; 11192 goto release_out; 11193 } 11194 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 11195 SLI4_PAGE_SIZE, 11196 &dmabuf->phys, 11197 GFP_KERNEL); 11198 if (!dmabuf->virt) { 11199 kfree(dmabuf); 11200 rc = -ENOMEM; 11201 goto release_out; 11202 } 11203 list_add_tail(&dmabuf->list, &dma_buffer_list); 11204 } 11205 while (offset < fw->size) { 11206 temp_offset = offset; 11207 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 11208 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 11209 memcpy(dmabuf->virt, 11210 fw->data + temp_offset, 11211 fw->size - temp_offset); 11212 temp_offset = fw->size; 11213 break; 11214 } 11215 memcpy(dmabuf->virt, fw->data + temp_offset, 11216 SLI4_PAGE_SIZE); 11217 temp_offset += SLI4_PAGE_SIZE; 11218 } 11219 rc = lpfc_wr_object(phba, &dma_buffer_list, 11220 (fw->size - offset), &offset); 11221 if (rc) 11222 goto release_out; 11223 } 11224 rc = offset; 11225 } 11226 11227 release_out: 11228 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 11229 list_del(&dmabuf->list); 11230 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 11231 dmabuf->virt, dmabuf->phys); 11232 kfree(dmabuf); 11233 } 11234 release_firmware(fw); 11235 out: 11236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11237 "3024 Firmware update done: %d.\n", rc); 11238 return; 11239 } 11240 11241 /** 11242 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 11243 * @phba: pointer to lpfc hba data structure. 11244 * 11245 * This routine is called to perform Linux generic firmware upgrade on device 11246 * that supports such feature. 11247 **/ 11248 int 11249 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 11250 { 11251 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 11252 int ret; 11253 const struct firmware *fw; 11254 11255 /* Only supported on SLI4 interface type 2 for now */ 11256 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11257 LPFC_SLI_INTF_IF_TYPE_2) 11258 return -EPERM; 11259 11260 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 11261 11262 if (fw_upgrade == INT_FW_UPGRADE) { 11263 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 11264 file_name, &phba->pcidev->dev, 11265 GFP_KERNEL, (void *)phba, 11266 lpfc_write_firmware); 11267 } else if (fw_upgrade == RUN_FW_UPGRADE) { 11268 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 11269 if (!ret) 11270 lpfc_write_firmware(fw, (void *)phba); 11271 } else { 11272 ret = -EINVAL; 11273 } 11274 11275 return ret; 11276 } 11277 11278 /** 11279 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 11280 * @pdev: pointer to PCI device 11281 * @pid: pointer to PCI device identifier 11282 * 11283 * This routine is called from the kernel's PCI subsystem to device with 11284 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 11285 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11286 * information of the device and driver to see if the driver state that it 11287 * can support this kind of device. If the match is successful, the driver 11288 * core invokes this routine. If this routine determines it can claim the HBA, 11289 * it does all the initialization that it needs to do to handle the HBA 11290 * properly. 11291 * 11292 * Return code 11293 * 0 - driver can claim the device 11294 * negative value - driver can not claim the device 11295 **/ 11296 static int 11297 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 11298 { 11299 struct lpfc_hba *phba; 11300 struct lpfc_vport *vport = NULL; 11301 struct Scsi_Host *shost = NULL; 11302 int error; 11303 uint32_t cfg_mode, intr_mode; 11304 11305 /* Allocate memory for HBA structure */ 11306 phba = lpfc_hba_alloc(pdev); 11307 if (!phba) 11308 return -ENOMEM; 11309 11310 /* Perform generic PCI device enabling operation */ 11311 error = lpfc_enable_pci_dev(phba); 11312 if (error) 11313 goto out_free_phba; 11314 11315 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 11316 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 11317 if (error) 11318 goto out_disable_pci_dev; 11319 11320 /* Set up SLI-4 specific device PCI memory space */ 11321 error = lpfc_sli4_pci_mem_setup(phba); 11322 if (error) { 11323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11324 "1410 Failed to set up pci memory space.\n"); 11325 goto out_disable_pci_dev; 11326 } 11327 11328 /* Set up SLI-4 Specific device driver resources */ 11329 error = lpfc_sli4_driver_resource_setup(phba); 11330 if (error) { 11331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11332 "1412 Failed to set up driver resource.\n"); 11333 goto out_unset_pci_mem_s4; 11334 } 11335 11336 INIT_LIST_HEAD(&phba->active_rrq_list); 11337 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 11338 11339 /* Set up common device driver resources */ 11340 error = lpfc_setup_driver_resource_phase2(phba); 11341 if (error) { 11342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11343 "1414 Failed to set up driver resource.\n"); 11344 goto out_unset_driver_resource_s4; 11345 } 11346 11347 /* Get the default values for Model Name and Description */ 11348 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11349 11350 /* Create SCSI host to the physical port */ 11351 error = lpfc_create_shost(phba); 11352 if (error) { 11353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11354 "1415 Failed to create scsi host.\n"); 11355 goto out_unset_driver_resource; 11356 } 11357 11358 /* Configure sysfs attributes */ 11359 vport = phba->pport; 11360 error = lpfc_alloc_sysfs_attr(vport); 11361 if (error) { 11362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11363 "1416 Failed to allocate sysfs attr\n"); 11364 goto out_destroy_shost; 11365 } 11366 11367 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11368 /* Now, trying to enable interrupt and bring up the device */ 11369 cfg_mode = phba->cfg_use_msi; 11370 11371 /* Put device to a known state before enabling interrupt */ 11372 lpfc_stop_port(phba); 11373 11374 /* Configure and enable interrupt */ 11375 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 11376 if (intr_mode == LPFC_INTR_ERROR) { 11377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11378 "0426 Failed to enable interrupt.\n"); 11379 error = -ENODEV; 11380 goto out_free_sysfs_attr; 11381 } 11382 /* Default to single EQ for non-MSI-X */ 11383 if (phba->intr_type != MSIX) { 11384 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 11385 phba->cfg_fcp_io_channel = 1; 11386 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11387 phba->cfg_nvme_io_channel = 1; 11388 if (phba->nvmet_support) 11389 phba->cfg_nvmet_mrq = 1; 11390 } 11391 phba->io_channel_irqs = 1; 11392 } 11393 11394 /* Set up SLI-4 HBA */ 11395 if (lpfc_sli4_hba_setup(phba)) { 11396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11397 "1421 Failed to set up hba\n"); 11398 error = -ENODEV; 11399 goto out_disable_intr; 11400 } 11401 11402 /* Log the current active interrupt mode */ 11403 phba->intr_mode = intr_mode; 11404 lpfc_log_intr_mode(phba, intr_mode); 11405 11406 /* Perform post initialization setup */ 11407 lpfc_post_init_setup(phba); 11408 11409 /* NVME support in FW earlier in the driver load corrects the 11410 * FC4 type making a check for nvme_support unnecessary. 11411 */ 11412 if ((phba->nvmet_support == 0) && 11413 (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 11414 /* Create NVME binding with nvme_fc_transport. This 11415 * ensures the vport is initialized. If the localport 11416 * create fails, it should not unload the driver to 11417 * support field issues. 11418 */ 11419 error = lpfc_nvme_create_localport(vport); 11420 if (error) { 11421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11422 "6004 NVME registration failed, " 11423 "error x%x\n", 11424 error); 11425 } 11426 } 11427 11428 /* check for firmware upgrade or downgrade */ 11429 if (phba->cfg_request_firmware_upgrade) 11430 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 11431 11432 /* Check if there are static vports to be created. */ 11433 lpfc_create_static_vport(phba); 11434 return 0; 11435 11436 out_disable_intr: 11437 lpfc_sli4_disable_intr(phba); 11438 out_free_sysfs_attr: 11439 lpfc_free_sysfs_attr(vport); 11440 out_destroy_shost: 11441 lpfc_destroy_shost(phba); 11442 out_unset_driver_resource: 11443 lpfc_unset_driver_resource_phase2(phba); 11444 out_unset_driver_resource_s4: 11445 lpfc_sli4_driver_resource_unset(phba); 11446 out_unset_pci_mem_s4: 11447 lpfc_sli4_pci_mem_unset(phba); 11448 out_disable_pci_dev: 11449 lpfc_disable_pci_dev(phba); 11450 if (shost) 11451 scsi_host_put(shost); 11452 out_free_phba: 11453 lpfc_hba_free(phba); 11454 return error; 11455 } 11456 11457 /** 11458 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 11459 * @pdev: pointer to PCI device 11460 * 11461 * This routine is called from the kernel's PCI subsystem to device with 11462 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 11463 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11464 * device to be removed from the PCI subsystem properly. 11465 **/ 11466 static void 11467 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 11468 { 11469 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11470 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11471 struct lpfc_vport **vports; 11472 struct lpfc_hba *phba = vport->phba; 11473 int i; 11474 11475 /* Mark the device unloading flag */ 11476 spin_lock_irq(&phba->hbalock); 11477 vport->load_flag |= FC_UNLOADING; 11478 spin_unlock_irq(&phba->hbalock); 11479 11480 /* Free the HBA sysfs attributes */ 11481 lpfc_free_sysfs_attr(vport); 11482 11483 /* Release all the vports against this physical port */ 11484 vports = lpfc_create_vport_work_array(phba); 11485 if (vports != NULL) 11486 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11487 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11488 continue; 11489 fc_vport_terminate(vports[i]->fc_vport); 11490 } 11491 lpfc_destroy_vport_work_array(phba, vports); 11492 11493 /* Remove FC host and then SCSI host with the physical port */ 11494 fc_remove_host(shost); 11495 scsi_remove_host(shost); 11496 /* 11497 * Bring down the SLI Layer. This step disables all interrupts, 11498 * clears the rings, discards all mailbox commands, and resets 11499 * the HBA FCoE function. 11500 */ 11501 lpfc_debugfs_terminate(vport); 11502 lpfc_sli4_hba_unset(phba); 11503 11504 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 11505 * localports are destroyed after to cleanup all transport memory. 11506 */ 11507 lpfc_cleanup(vport); 11508 lpfc_nvmet_destroy_targetport(phba); 11509 lpfc_nvme_destroy_localport(vport); 11510 11511 11512 lpfc_stop_hba_timers(phba); 11513 spin_lock_irq(&phba->hbalock); 11514 list_del_init(&vport->listentry); 11515 spin_unlock_irq(&phba->hbalock); 11516 11517 /* Perform scsi free before driver resource_unset since scsi 11518 * buffers are released to their corresponding pools here. 11519 */ 11520 lpfc_scsi_free(phba); 11521 lpfc_nvme_free(phba); 11522 lpfc_free_iocb_list(phba); 11523 11524 lpfc_sli4_driver_resource_unset(phba); 11525 11526 /* Unmap adapter Control and Doorbell registers */ 11527 lpfc_sli4_pci_mem_unset(phba); 11528 11529 /* Release PCI resources and disable device's PCI function */ 11530 scsi_host_put(shost); 11531 lpfc_disable_pci_dev(phba); 11532 11533 /* Finally, free the driver's device data structure */ 11534 lpfc_hba_free(phba); 11535 11536 return; 11537 } 11538 11539 /** 11540 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 11541 * @pdev: pointer to PCI device 11542 * @msg: power management message 11543 * 11544 * This routine is called from the kernel's PCI subsystem to support system 11545 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 11546 * this method, it quiesces the device by stopping the driver's worker 11547 * thread for the device, turning off device's interrupt and DMA, and bring 11548 * the device offline. Note that as the driver implements the minimum PM 11549 * requirements to a power-aware driver's PM support for suspend/resume -- all 11550 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 11551 * method call will be treated as SUSPEND and the driver will fully 11552 * reinitialize its device during resume() method call, the driver will set 11553 * device to PCI_D3hot state in PCI config space instead of setting it 11554 * according to the @msg provided by the PM. 11555 * 11556 * Return code 11557 * 0 - driver suspended the device 11558 * Error otherwise 11559 **/ 11560 static int 11561 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 11562 { 11563 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11564 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11565 11566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11567 "2843 PCI device Power Management suspend.\n"); 11568 11569 /* Bring down the device */ 11570 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11571 lpfc_offline(phba); 11572 kthread_stop(phba->worker_thread); 11573 11574 /* Disable interrupt from device */ 11575 lpfc_sli4_disable_intr(phba); 11576 lpfc_sli4_queue_destroy(phba); 11577 11578 /* Save device state to PCI config space */ 11579 pci_save_state(pdev); 11580 pci_set_power_state(pdev, PCI_D3hot); 11581 11582 return 0; 11583 } 11584 11585 /** 11586 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 11587 * @pdev: pointer to PCI device 11588 * 11589 * This routine is called from the kernel's PCI subsystem to support system 11590 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 11591 * this method, it restores the device's PCI config space state and fully 11592 * reinitializes the device and brings it online. Note that as the driver 11593 * implements the minimum PM requirements to a power-aware driver's PM for 11594 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11595 * to the suspend() method call will be treated as SUSPEND and the driver 11596 * will fully reinitialize its device during resume() method call, the device 11597 * will be set to PCI_D0 directly in PCI config space before restoring the 11598 * state. 11599 * 11600 * Return code 11601 * 0 - driver suspended the device 11602 * Error otherwise 11603 **/ 11604 static int 11605 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 11606 { 11607 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11608 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11609 uint32_t intr_mode; 11610 int error; 11611 11612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11613 "0292 PCI device Power Management resume.\n"); 11614 11615 /* Restore device state from PCI config space */ 11616 pci_set_power_state(pdev, PCI_D0); 11617 pci_restore_state(pdev); 11618 11619 /* 11620 * As the new kernel behavior of pci_restore_state() API call clears 11621 * device saved_state flag, need to save the restored state again. 11622 */ 11623 pci_save_state(pdev); 11624 11625 if (pdev->is_busmaster) 11626 pci_set_master(pdev); 11627 11628 /* Startup the kernel thread for this host adapter. */ 11629 phba->worker_thread = kthread_run(lpfc_do_work, phba, 11630 "lpfc_worker_%d", phba->brd_no); 11631 if (IS_ERR(phba->worker_thread)) { 11632 error = PTR_ERR(phba->worker_thread); 11633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11634 "0293 PM resume failed to start worker " 11635 "thread: error=x%x.\n", error); 11636 return error; 11637 } 11638 11639 /* Configure and enable interrupt */ 11640 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 11641 if (intr_mode == LPFC_INTR_ERROR) { 11642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11643 "0294 PM resume Failed to enable interrupt\n"); 11644 return -EIO; 11645 } else 11646 phba->intr_mode = intr_mode; 11647 11648 /* Restart HBA and bring it online */ 11649 lpfc_sli_brdrestart(phba); 11650 lpfc_online(phba); 11651 11652 /* Log the current active interrupt mode */ 11653 lpfc_log_intr_mode(phba, phba->intr_mode); 11654 11655 return 0; 11656 } 11657 11658 /** 11659 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 11660 * @phba: pointer to lpfc hba data structure. 11661 * 11662 * This routine is called to prepare the SLI4 device for PCI slot recover. It 11663 * aborts all the outstanding SCSI I/Os to the pci device. 11664 **/ 11665 static void 11666 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 11667 { 11668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11669 "2828 PCI channel I/O abort preparing for recovery\n"); 11670 /* 11671 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 11672 * and let the SCSI mid-layer to retry them to recover. 11673 */ 11674 lpfc_sli_abort_fcp_rings(phba); 11675 } 11676 11677 /** 11678 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 11679 * @phba: pointer to lpfc hba data structure. 11680 * 11681 * This routine is called to prepare the SLI4 device for PCI slot reset. It 11682 * disables the device interrupt and pci device, and aborts the internal FCP 11683 * pending I/Os. 11684 **/ 11685 static void 11686 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 11687 { 11688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11689 "2826 PCI channel disable preparing for reset\n"); 11690 11691 /* Block any management I/Os to the device */ 11692 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 11693 11694 /* Block all SCSI devices' I/Os on the host */ 11695 lpfc_scsi_dev_block(phba); 11696 11697 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11698 lpfc_sli_flush_fcp_rings(phba); 11699 11700 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 11701 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11702 lpfc_sli_flush_nvme_rings(phba); 11703 11704 /* stop all timers */ 11705 lpfc_stop_hba_timers(phba); 11706 11707 /* Disable interrupt and pci device */ 11708 lpfc_sli4_disable_intr(phba); 11709 lpfc_sli4_queue_destroy(phba); 11710 pci_disable_device(phba->pcidev); 11711 } 11712 11713 /** 11714 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 11715 * @phba: pointer to lpfc hba data structure. 11716 * 11717 * This routine is called to prepare the SLI4 device for PCI slot permanently 11718 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 11719 * pending I/Os. 11720 **/ 11721 static void 11722 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 11723 { 11724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11725 "2827 PCI channel permanent disable for failure\n"); 11726 11727 /* Block all SCSI devices' I/Os on the host */ 11728 lpfc_scsi_dev_block(phba); 11729 11730 /* stop all timers */ 11731 lpfc_stop_hba_timers(phba); 11732 11733 /* Clean up all driver's outstanding SCSI I/Os */ 11734 lpfc_sli_flush_fcp_rings(phba); 11735 11736 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 11737 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11738 lpfc_sli_flush_nvme_rings(phba); 11739 } 11740 11741 /** 11742 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 11743 * @pdev: pointer to PCI device. 11744 * @state: the current PCI connection state. 11745 * 11746 * This routine is called from the PCI subsystem for error handling to device 11747 * with SLI-4 interface spec. This function is called by the PCI subsystem 11748 * after a PCI bus error affecting this device has been detected. When this 11749 * function is invoked, it will need to stop all the I/Os and interrupt(s) 11750 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 11751 * for the PCI subsystem to perform proper recovery as desired. 11752 * 11753 * Return codes 11754 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11755 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11756 **/ 11757 static pci_ers_result_t 11758 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 11759 { 11760 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11761 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11762 11763 switch (state) { 11764 case pci_channel_io_normal: 11765 /* Non-fatal error, prepare for recovery */ 11766 lpfc_sli4_prep_dev_for_recover(phba); 11767 return PCI_ERS_RESULT_CAN_RECOVER; 11768 case pci_channel_io_frozen: 11769 /* Fatal error, prepare for slot reset */ 11770 lpfc_sli4_prep_dev_for_reset(phba); 11771 return PCI_ERS_RESULT_NEED_RESET; 11772 case pci_channel_io_perm_failure: 11773 /* Permanent failure, prepare for device down */ 11774 lpfc_sli4_prep_dev_for_perm_failure(phba); 11775 return PCI_ERS_RESULT_DISCONNECT; 11776 default: 11777 /* Unknown state, prepare and request slot reset */ 11778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11779 "2825 Unknown PCI error state: x%x\n", state); 11780 lpfc_sli4_prep_dev_for_reset(phba); 11781 return PCI_ERS_RESULT_NEED_RESET; 11782 } 11783 } 11784 11785 /** 11786 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 11787 * @pdev: pointer to PCI device. 11788 * 11789 * This routine is called from the PCI subsystem for error handling to device 11790 * with SLI-4 interface spec. It is called after PCI bus has been reset to 11791 * restart the PCI card from scratch, as if from a cold-boot. During the 11792 * PCI subsystem error recovery, after the driver returns 11793 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 11794 * recovery and then call this routine before calling the .resume method to 11795 * recover the device. This function will initialize the HBA device, enable 11796 * the interrupt, but it will just put the HBA to offline state without 11797 * passing any I/O traffic. 11798 * 11799 * Return codes 11800 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11801 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11802 */ 11803 static pci_ers_result_t 11804 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 11805 { 11806 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11807 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11808 struct lpfc_sli *psli = &phba->sli; 11809 uint32_t intr_mode; 11810 11811 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 11812 if (pci_enable_device_mem(pdev)) { 11813 printk(KERN_ERR "lpfc: Cannot re-enable " 11814 "PCI device after reset.\n"); 11815 return PCI_ERS_RESULT_DISCONNECT; 11816 } 11817 11818 pci_restore_state(pdev); 11819 11820 /* 11821 * As the new kernel behavior of pci_restore_state() API call clears 11822 * device saved_state flag, need to save the restored state again. 11823 */ 11824 pci_save_state(pdev); 11825 11826 if (pdev->is_busmaster) 11827 pci_set_master(pdev); 11828 11829 spin_lock_irq(&phba->hbalock); 11830 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 11831 spin_unlock_irq(&phba->hbalock); 11832 11833 /* Configure and enable interrupt */ 11834 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 11835 if (intr_mode == LPFC_INTR_ERROR) { 11836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11837 "2824 Cannot re-enable interrupt after " 11838 "slot reset.\n"); 11839 return PCI_ERS_RESULT_DISCONNECT; 11840 } else 11841 phba->intr_mode = intr_mode; 11842 11843 /* Log the current active interrupt mode */ 11844 lpfc_log_intr_mode(phba, phba->intr_mode); 11845 11846 return PCI_ERS_RESULT_RECOVERED; 11847 } 11848 11849 /** 11850 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 11851 * @pdev: pointer to PCI device 11852 * 11853 * This routine is called from the PCI subsystem for error handling to device 11854 * with SLI-4 interface spec. It is called when kernel error recovery tells 11855 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 11856 * error recovery. After this call, traffic can start to flow from this device 11857 * again. 11858 **/ 11859 static void 11860 lpfc_io_resume_s4(struct pci_dev *pdev) 11861 { 11862 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11864 11865 /* 11866 * In case of slot reset, as function reset is performed through 11867 * mailbox command which needs DMA to be enabled, this operation 11868 * has to be moved to the io resume phase. Taking device offline 11869 * will perform the necessary cleanup. 11870 */ 11871 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 11872 /* Perform device reset */ 11873 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11874 lpfc_offline(phba); 11875 lpfc_sli_brdrestart(phba); 11876 /* Bring the device back online */ 11877 lpfc_online(phba); 11878 } 11879 11880 /* Clean up Advanced Error Reporting (AER) if needed */ 11881 if (phba->hba_flag & HBA_AER_ENABLED) 11882 pci_cleanup_aer_uncorrect_error_status(pdev); 11883 } 11884 11885 /** 11886 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 11887 * @pdev: pointer to PCI device 11888 * @pid: pointer to PCI device identifier 11889 * 11890 * This routine is to be registered to the kernel's PCI subsystem. When an 11891 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 11892 * at PCI device-specific information of the device and driver to see if the 11893 * driver state that it can support this kind of device. If the match is 11894 * successful, the driver core invokes this routine. This routine dispatches 11895 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 11896 * do all the initialization that it needs to do to handle the HBA device 11897 * properly. 11898 * 11899 * Return code 11900 * 0 - driver can claim the device 11901 * negative value - driver can not claim the device 11902 **/ 11903 static int 11904 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 11905 { 11906 int rc; 11907 struct lpfc_sli_intf intf; 11908 11909 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 11910 return -ENODEV; 11911 11912 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 11913 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 11914 rc = lpfc_pci_probe_one_s4(pdev, pid); 11915 else 11916 rc = lpfc_pci_probe_one_s3(pdev, pid); 11917 11918 return rc; 11919 } 11920 11921 /** 11922 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 11923 * @pdev: pointer to PCI device 11924 * 11925 * This routine is to be registered to the kernel's PCI subsystem. When an 11926 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 11927 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 11928 * remove routine, which will perform all the necessary cleanup for the 11929 * device to be removed from the PCI subsystem properly. 11930 **/ 11931 static void 11932 lpfc_pci_remove_one(struct pci_dev *pdev) 11933 { 11934 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11935 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11936 11937 switch (phba->pci_dev_grp) { 11938 case LPFC_PCI_DEV_LP: 11939 lpfc_pci_remove_one_s3(pdev); 11940 break; 11941 case LPFC_PCI_DEV_OC: 11942 lpfc_pci_remove_one_s4(pdev); 11943 break; 11944 default: 11945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11946 "1424 Invalid PCI device group: 0x%x\n", 11947 phba->pci_dev_grp); 11948 break; 11949 } 11950 return; 11951 } 11952 11953 /** 11954 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 11955 * @pdev: pointer to PCI device 11956 * @msg: power management message 11957 * 11958 * This routine is to be registered to the kernel's PCI subsystem to support 11959 * system Power Management (PM). When PM invokes this method, it dispatches 11960 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 11961 * suspend the device. 11962 * 11963 * Return code 11964 * 0 - driver suspended the device 11965 * Error otherwise 11966 **/ 11967 static int 11968 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 11969 { 11970 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11971 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11972 int rc = -ENODEV; 11973 11974 switch (phba->pci_dev_grp) { 11975 case LPFC_PCI_DEV_LP: 11976 rc = lpfc_pci_suspend_one_s3(pdev, msg); 11977 break; 11978 case LPFC_PCI_DEV_OC: 11979 rc = lpfc_pci_suspend_one_s4(pdev, msg); 11980 break; 11981 default: 11982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11983 "1425 Invalid PCI device group: 0x%x\n", 11984 phba->pci_dev_grp); 11985 break; 11986 } 11987 return rc; 11988 } 11989 11990 /** 11991 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 11992 * @pdev: pointer to PCI device 11993 * 11994 * This routine is to be registered to the kernel's PCI subsystem to support 11995 * system Power Management (PM). When PM invokes this method, it dispatches 11996 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 11997 * resume the device. 11998 * 11999 * Return code 12000 * 0 - driver suspended the device 12001 * Error otherwise 12002 **/ 12003 static int 12004 lpfc_pci_resume_one(struct pci_dev *pdev) 12005 { 12006 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12007 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12008 int rc = -ENODEV; 12009 12010 switch (phba->pci_dev_grp) { 12011 case LPFC_PCI_DEV_LP: 12012 rc = lpfc_pci_resume_one_s3(pdev); 12013 break; 12014 case LPFC_PCI_DEV_OC: 12015 rc = lpfc_pci_resume_one_s4(pdev); 12016 break; 12017 default: 12018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12019 "1426 Invalid PCI device group: 0x%x\n", 12020 phba->pci_dev_grp); 12021 break; 12022 } 12023 return rc; 12024 } 12025 12026 /** 12027 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 12028 * @pdev: pointer to PCI device. 12029 * @state: the current PCI connection state. 12030 * 12031 * This routine is registered to the PCI subsystem for error handling. This 12032 * function is called by the PCI subsystem after a PCI bus error affecting 12033 * this device has been detected. When this routine is invoked, it dispatches 12034 * the action to the proper SLI-3 or SLI-4 device error detected handling 12035 * routine, which will perform the proper error detected operation. 12036 * 12037 * Return codes 12038 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12039 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12040 **/ 12041 static pci_ers_result_t 12042 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12043 { 12044 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12045 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12046 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12047 12048 switch (phba->pci_dev_grp) { 12049 case LPFC_PCI_DEV_LP: 12050 rc = lpfc_io_error_detected_s3(pdev, state); 12051 break; 12052 case LPFC_PCI_DEV_OC: 12053 rc = lpfc_io_error_detected_s4(pdev, state); 12054 break; 12055 default: 12056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12057 "1427 Invalid PCI device group: 0x%x\n", 12058 phba->pci_dev_grp); 12059 break; 12060 } 12061 return rc; 12062 } 12063 12064 /** 12065 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 12066 * @pdev: pointer to PCI device. 12067 * 12068 * This routine is registered to the PCI subsystem for error handling. This 12069 * function is called after PCI bus has been reset to restart the PCI card 12070 * from scratch, as if from a cold-boot. When this routine is invoked, it 12071 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 12072 * routine, which will perform the proper device reset. 12073 * 12074 * Return codes 12075 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12076 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12077 **/ 12078 static pci_ers_result_t 12079 lpfc_io_slot_reset(struct pci_dev *pdev) 12080 { 12081 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12082 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12083 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12084 12085 switch (phba->pci_dev_grp) { 12086 case LPFC_PCI_DEV_LP: 12087 rc = lpfc_io_slot_reset_s3(pdev); 12088 break; 12089 case LPFC_PCI_DEV_OC: 12090 rc = lpfc_io_slot_reset_s4(pdev); 12091 break; 12092 default: 12093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12094 "1428 Invalid PCI device group: 0x%x\n", 12095 phba->pci_dev_grp); 12096 break; 12097 } 12098 return rc; 12099 } 12100 12101 /** 12102 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 12103 * @pdev: pointer to PCI device 12104 * 12105 * This routine is registered to the PCI subsystem for error handling. It 12106 * is called when kernel error recovery tells the lpfc driver that it is 12107 * OK to resume normal PCI operation after PCI bus error recovery. When 12108 * this routine is invoked, it dispatches the action to the proper SLI-3 12109 * or SLI-4 device io_resume routine, which will resume the device operation. 12110 **/ 12111 static void 12112 lpfc_io_resume(struct pci_dev *pdev) 12113 { 12114 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12115 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12116 12117 switch (phba->pci_dev_grp) { 12118 case LPFC_PCI_DEV_LP: 12119 lpfc_io_resume_s3(pdev); 12120 break; 12121 case LPFC_PCI_DEV_OC: 12122 lpfc_io_resume_s4(pdev); 12123 break; 12124 default: 12125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12126 "1429 Invalid PCI device group: 0x%x\n", 12127 phba->pci_dev_grp); 12128 break; 12129 } 12130 return; 12131 } 12132 12133 /** 12134 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 12135 * @phba: pointer to lpfc hba data structure. 12136 * 12137 * This routine checks to see if OAS is supported for this adapter. If 12138 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 12139 * the enable oas flag is cleared and the pool created for OAS device data 12140 * is destroyed. 12141 * 12142 **/ 12143 void 12144 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 12145 { 12146 12147 if (!phba->cfg_EnableXLane) 12148 return; 12149 12150 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 12151 phba->cfg_fof = 1; 12152 } else { 12153 phba->cfg_fof = 0; 12154 if (phba->device_data_mem_pool) 12155 mempool_destroy(phba->device_data_mem_pool); 12156 phba->device_data_mem_pool = NULL; 12157 } 12158 12159 return; 12160 } 12161 12162 /** 12163 * lpfc_fof_queue_setup - Set up all the fof queues 12164 * @phba: pointer to lpfc hba data structure. 12165 * 12166 * This routine is invoked to set up all the fof queues for the FC HBA 12167 * operation. 12168 * 12169 * Return codes 12170 * 0 - successful 12171 * -ENOMEM - No available memory 12172 **/ 12173 int 12174 lpfc_fof_queue_setup(struct lpfc_hba *phba) 12175 { 12176 struct lpfc_sli_ring *pring; 12177 int rc; 12178 12179 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 12180 if (rc) 12181 return -ENOMEM; 12182 12183 if (phba->cfg_fof) { 12184 12185 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 12186 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 12187 if (rc) 12188 goto out_oas_cq; 12189 12190 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 12191 phba->sli4_hba.oas_cq, LPFC_FCP); 12192 if (rc) 12193 goto out_oas_wq; 12194 12195 /* Bind this CQ/WQ to the NVME ring */ 12196 pring = phba->sli4_hba.oas_wq->pring; 12197 pring->sli.sli4.wqp = 12198 (void *)phba->sli4_hba.oas_wq; 12199 phba->sli4_hba.oas_cq->pring = pring; 12200 } 12201 12202 return 0; 12203 12204 out_oas_wq: 12205 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 12206 out_oas_cq: 12207 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 12208 return rc; 12209 12210 } 12211 12212 /** 12213 * lpfc_fof_queue_create - Create all the fof queues 12214 * @phba: pointer to lpfc hba data structure. 12215 * 12216 * This routine is invoked to allocate all the fof queues for the FC HBA 12217 * operation. For each SLI4 queue type, the parameters such as queue entry 12218 * count (queue depth) shall be taken from the module parameter. For now, 12219 * we just use some constant number as place holder. 12220 * 12221 * Return codes 12222 * 0 - successful 12223 * -ENOMEM - No availble memory 12224 * -EIO - The mailbox failed to complete successfully. 12225 **/ 12226 int 12227 lpfc_fof_queue_create(struct lpfc_hba *phba) 12228 { 12229 struct lpfc_queue *qdesc; 12230 12231 /* Create FOF EQ */ 12232 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 12233 phba->sli4_hba.eq_esize, 12234 phba->sli4_hba.eq_ecount); 12235 if (!qdesc) 12236 goto out_error; 12237 12238 phba->sli4_hba.fof_eq = qdesc; 12239 12240 if (phba->cfg_fof) { 12241 12242 /* Create OAS CQ */ 12243 if (phba->fcp_embed_io) 12244 qdesc = lpfc_sli4_queue_alloc(phba, 12245 LPFC_EXPANDED_PAGE_SIZE, 12246 phba->sli4_hba.cq_esize, 12247 LPFC_CQE_EXP_COUNT); 12248 else 12249 qdesc = lpfc_sli4_queue_alloc(phba, 12250 LPFC_DEFAULT_PAGE_SIZE, 12251 phba->sli4_hba.cq_esize, 12252 phba->sli4_hba.cq_ecount); 12253 if (!qdesc) 12254 goto out_error; 12255 12256 phba->sli4_hba.oas_cq = qdesc; 12257 12258 /* Create OAS WQ */ 12259 if (phba->fcp_embed_io) 12260 qdesc = lpfc_sli4_queue_alloc(phba, 12261 LPFC_EXPANDED_PAGE_SIZE, 12262 LPFC_WQE128_SIZE, 12263 LPFC_WQE_EXP_COUNT); 12264 else 12265 qdesc = lpfc_sli4_queue_alloc(phba, 12266 LPFC_DEFAULT_PAGE_SIZE, 12267 phba->sli4_hba.wq_esize, 12268 phba->sli4_hba.wq_ecount); 12269 if (!qdesc) 12270 goto out_error; 12271 12272 phba->sli4_hba.oas_wq = qdesc; 12273 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 12274 12275 } 12276 return 0; 12277 12278 out_error: 12279 lpfc_fof_queue_destroy(phba); 12280 return -ENOMEM; 12281 } 12282 12283 /** 12284 * lpfc_fof_queue_destroy - Destroy all the fof queues 12285 * @phba: pointer to lpfc hba data structure. 12286 * 12287 * This routine is invoked to release all the SLI4 queues with the FC HBA 12288 * operation. 12289 * 12290 * Return codes 12291 * 0 - successful 12292 **/ 12293 int 12294 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 12295 { 12296 /* Release FOF Event queue */ 12297 if (phba->sli4_hba.fof_eq != NULL) { 12298 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 12299 phba->sli4_hba.fof_eq = NULL; 12300 } 12301 12302 /* Release OAS Completion queue */ 12303 if (phba->sli4_hba.oas_cq != NULL) { 12304 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 12305 phba->sli4_hba.oas_cq = NULL; 12306 } 12307 12308 /* Release OAS Work queue */ 12309 if (phba->sli4_hba.oas_wq != NULL) { 12310 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 12311 phba->sli4_hba.oas_wq = NULL; 12312 } 12313 return 0; 12314 } 12315 12316 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 12317 12318 static const struct pci_error_handlers lpfc_err_handler = { 12319 .error_detected = lpfc_io_error_detected, 12320 .slot_reset = lpfc_io_slot_reset, 12321 .resume = lpfc_io_resume, 12322 }; 12323 12324 static struct pci_driver lpfc_driver = { 12325 .name = LPFC_DRIVER_NAME, 12326 .id_table = lpfc_id_table, 12327 .probe = lpfc_pci_probe_one, 12328 .remove = lpfc_pci_remove_one, 12329 .shutdown = lpfc_pci_remove_one, 12330 .suspend = lpfc_pci_suspend_one, 12331 .resume = lpfc_pci_resume_one, 12332 .err_handler = &lpfc_err_handler, 12333 }; 12334 12335 static const struct file_operations lpfc_mgmt_fop = { 12336 .owner = THIS_MODULE, 12337 }; 12338 12339 static struct miscdevice lpfc_mgmt_dev = { 12340 .minor = MISC_DYNAMIC_MINOR, 12341 .name = "lpfcmgmt", 12342 .fops = &lpfc_mgmt_fop, 12343 }; 12344 12345 /** 12346 * lpfc_init - lpfc module initialization routine 12347 * 12348 * This routine is to be invoked when the lpfc module is loaded into the 12349 * kernel. The special kernel macro module_init() is used to indicate the 12350 * role of this routine to the kernel as lpfc module entry point. 12351 * 12352 * Return codes 12353 * 0 - successful 12354 * -ENOMEM - FC attach transport failed 12355 * all others - failed 12356 */ 12357 static int __init 12358 lpfc_init(void) 12359 { 12360 int error = 0; 12361 12362 printk(LPFC_MODULE_DESC "\n"); 12363 printk(LPFC_COPYRIGHT "\n"); 12364 12365 error = misc_register(&lpfc_mgmt_dev); 12366 if (error) 12367 printk(KERN_ERR "Could not register lpfcmgmt device, " 12368 "misc_register returned with status %d", error); 12369 12370 lpfc_transport_functions.vport_create = lpfc_vport_create; 12371 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 12372 lpfc_transport_template = 12373 fc_attach_transport(&lpfc_transport_functions); 12374 if (lpfc_transport_template == NULL) 12375 return -ENOMEM; 12376 lpfc_vport_transport_template = 12377 fc_attach_transport(&lpfc_vport_transport_functions); 12378 if (lpfc_vport_transport_template == NULL) { 12379 fc_release_transport(lpfc_transport_template); 12380 return -ENOMEM; 12381 } 12382 12383 /* Initialize in case vector mapping is needed */ 12384 lpfc_used_cpu = NULL; 12385 lpfc_present_cpu = num_present_cpus(); 12386 12387 error = pci_register_driver(&lpfc_driver); 12388 if (error) { 12389 fc_release_transport(lpfc_transport_template); 12390 fc_release_transport(lpfc_vport_transport_template); 12391 } 12392 12393 return error; 12394 } 12395 12396 /** 12397 * lpfc_exit - lpfc module removal routine 12398 * 12399 * This routine is invoked when the lpfc module is removed from the kernel. 12400 * The special kernel macro module_exit() is used to indicate the role of 12401 * this routine to the kernel as lpfc module exit point. 12402 */ 12403 static void __exit 12404 lpfc_exit(void) 12405 { 12406 misc_deregister(&lpfc_mgmt_dev); 12407 pci_unregister_driver(&lpfc_driver); 12408 fc_release_transport(lpfc_transport_template); 12409 fc_release_transport(lpfc_vport_transport_template); 12410 if (_dump_buf_data) { 12411 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 12412 "_dump_buf_data at 0x%p\n", 12413 (1L << _dump_buf_data_order), _dump_buf_data); 12414 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 12415 } 12416 12417 if (_dump_buf_dif) { 12418 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 12419 "_dump_buf_dif at 0x%p\n", 12420 (1L << _dump_buf_dif_order), _dump_buf_dif); 12421 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 12422 } 12423 kfree(lpfc_used_cpu); 12424 idr_destroy(&lpfc_hba_index); 12425 } 12426 12427 module_init(lpfc_init); 12428 module_exit(lpfc_exit); 12429 MODULE_LICENSE("GPL"); 12430 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 12431 MODULE_AUTHOR("Broadcom"); 12432 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 12433