1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 43 #include <scsi/scsi.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_transport_fc.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/fc/fc_fs.h> 49 50 #include <linux/nvme-fc-driver.h> 51 52 #include "lpfc_hw4.h" 53 #include "lpfc_hw.h" 54 #include "lpfc_sli.h" 55 #include "lpfc_sli4.h" 56 #include "lpfc_nl.h" 57 #include "lpfc_disc.h" 58 #include "lpfc.h" 59 #include "lpfc_scsi.h" 60 #include "lpfc_nvme.h" 61 #include "lpfc_nvmet.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 char *_dump_buf_data; 69 unsigned long _dump_buf_data_order; 70 char *_dump_buf_dif; 71 unsigned long _dump_buf_dif_order; 72 spinlock_t _dump_buf_lock; 73 74 /* Used when mapping IRQ vectors in a driver centric manner */ 75 uint32_t lpfc_present_cpu; 76 77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 78 static int lpfc_post_rcv_buf(struct lpfc_hba *); 79 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 81 static int lpfc_setup_endian_order(struct lpfc_hba *); 82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 83 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 85 static void lpfc_init_sgl_list(struct lpfc_hba *); 86 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 87 static void lpfc_free_active_sgl(struct lpfc_hba *); 88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 93 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 96 static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t); 97 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 104 /** 105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 106 * @phba: pointer to lpfc hba data structure. 107 * 108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 109 * mailbox command. It retrieves the revision information from the HBA and 110 * collects the Vital Product Data (VPD) about the HBA for preparing the 111 * configuration of the HBA. 112 * 113 * Return codes: 114 * 0 - success. 115 * -ERESTART - requests the SLI layer to reset the HBA and try again. 116 * Any other value - indicates an error. 117 **/ 118 int 119 lpfc_config_port_prep(struct lpfc_hba *phba) 120 { 121 lpfc_vpd_t *vp = &phba->vpd; 122 int i = 0, rc; 123 LPFC_MBOXQ_t *pmb; 124 MAILBOX_t *mb; 125 char *lpfc_vpd_data = NULL; 126 uint16_t offset = 0; 127 static char licensed[56] = 128 "key unlock for use with gnu public licensed code only\0"; 129 static int init_key = 1; 130 131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 132 if (!pmb) { 133 phba->link_state = LPFC_HBA_ERROR; 134 return -ENOMEM; 135 } 136 137 mb = &pmb->u.mb; 138 phba->link_state = LPFC_INIT_MBX_CMDS; 139 140 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 141 if (init_key) { 142 uint32_t *ptext = (uint32_t *) licensed; 143 144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 145 *ptext = cpu_to_be32(*ptext); 146 init_key = 0; 147 } 148 149 lpfc_read_nv(phba, pmb); 150 memset((char*)mb->un.varRDnvp.rsvd3, 0, 151 sizeof (mb->un.varRDnvp.rsvd3)); 152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 153 sizeof (licensed)); 154 155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 156 157 if (rc != MBX_SUCCESS) { 158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 159 "0324 Config Port initialization " 160 "error, mbxCmd x%x READ_NVPARM, " 161 "mbxStatus x%x\n", 162 mb->mbxCommand, mb->mbxStatus); 163 mempool_free(pmb, phba->mbox_mem_pool); 164 return -ERESTART; 165 } 166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 167 sizeof(phba->wwnn)); 168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 169 sizeof(phba->wwpn)); 170 } 171 172 /* 173 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 174 * which was already set in lpfc_get_cfgparam() 175 */ 176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 177 178 /* Setup and issue mailbox READ REV command */ 179 lpfc_read_rev(phba, pmb); 180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 181 if (rc != MBX_SUCCESS) { 182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 183 "0439 Adapter failed to init, mbxCmd x%x " 184 "READ_REV, mbxStatus x%x\n", 185 mb->mbxCommand, mb->mbxStatus); 186 mempool_free( pmb, phba->mbox_mem_pool); 187 return -ERESTART; 188 } 189 190 191 /* 192 * The value of rr must be 1 since the driver set the cv field to 1. 193 * This setting requires the FW to set all revision fields. 194 */ 195 if (mb->un.varRdRev.rr == 0) { 196 vp->rev.rBit = 0; 197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 198 "0440 Adapter failed to init, READ_REV has " 199 "missing revision information.\n"); 200 mempool_free(pmb, phba->mbox_mem_pool); 201 return -ERESTART; 202 } 203 204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 205 mempool_free(pmb, phba->mbox_mem_pool); 206 return -EINVAL; 207 } 208 209 /* Save information as VPD data */ 210 vp->rev.rBit = 1; 211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 216 vp->rev.biuRev = mb->un.varRdRev.biuRev; 217 vp->rev.smRev = mb->un.varRdRev.smRev; 218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 219 vp->rev.endecRev = mb->un.varRdRev.endecRev; 220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 226 227 /* If the sli feature level is less then 9, we must 228 * tear down all RPIs and VPIs on link down if NPIV 229 * is enabled. 230 */ 231 if (vp->rev.feaLevelHigh < 9) 232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 233 234 if (lpfc_is_LC_HBA(phba->pcidev->device)) 235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 236 sizeof (phba->RandomData)); 237 238 /* Get adapter VPD information */ 239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 240 if (!lpfc_vpd_data) 241 goto out_free_mbox; 242 do { 243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 245 246 if (rc != MBX_SUCCESS) { 247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 248 "0441 VPD not present on adapter, " 249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 250 mb->mbxCommand, mb->mbxStatus); 251 mb->un.varDmp.word_cnt = 0; 252 } 253 /* dump mem may return a zero when finished or we got a 254 * mailbox error, either way we are done. 255 */ 256 if (mb->un.varDmp.word_cnt == 0) 257 break; 258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 261 lpfc_vpd_data + offset, 262 mb->un.varDmp.word_cnt); 263 offset += mb->un.varDmp.word_cnt; 264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 266 267 kfree(lpfc_vpd_data); 268 out_free_mbox: 269 mempool_free(pmb, phba->mbox_mem_pool); 270 return 0; 271 } 272 273 /** 274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 275 * @phba: pointer to lpfc hba data structure. 276 * @pmboxq: pointer to the driver internal queue element for mailbox command. 277 * 278 * This is the completion handler for driver's configuring asynchronous event 279 * mailbox command to the device. If the mailbox command returns successfully, 280 * it will set internal async event support flag to 1; otherwise, it will 281 * set internal async event support flag to 0. 282 **/ 283 static void 284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 285 { 286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 287 phba->temp_sensor_support = 1; 288 else 289 phba->temp_sensor_support = 0; 290 mempool_free(pmboxq, phba->mbox_mem_pool); 291 return; 292 } 293 294 /** 295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 296 * @phba: pointer to lpfc hba data structure. 297 * @pmboxq: pointer to the driver internal queue element for mailbox command. 298 * 299 * This is the completion handler for dump mailbox command for getting 300 * wake up parameters. When this command complete, the response contain 301 * Option rom version of the HBA. This function translate the version number 302 * into a human readable string and store it in OptionROMVersion. 303 **/ 304 static void 305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 306 { 307 struct prog_id *prg; 308 uint32_t prog_id_word; 309 char dist = ' '; 310 /* character array used for decoding dist type. */ 311 char dist_char[] = "nabx"; 312 313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 314 mempool_free(pmboxq, phba->mbox_mem_pool); 315 return; 316 } 317 318 prg = (struct prog_id *) &prog_id_word; 319 320 /* word 7 contain option rom version */ 321 prog_id_word = pmboxq->u.mb.un.varWords[7]; 322 323 /* Decode the Option rom version word to a readable string */ 324 if (prg->dist < 4) 325 dist = dist_char[prg->dist]; 326 327 if ((prg->dist == 3) && (prg->num == 0)) 328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 329 prg->ver, prg->rev, prg->lev); 330 else 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 332 prg->ver, prg->rev, prg->lev, 333 dist, prg->num); 334 mempool_free(pmboxq, phba->mbox_mem_pool); 335 return; 336 } 337 338 /** 339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 340 * cfg_soft_wwnn, cfg_soft_wwpn 341 * @vport: pointer to lpfc vport data structure. 342 * 343 * 344 * Return codes 345 * None. 346 **/ 347 void 348 lpfc_update_vport_wwn(struct lpfc_vport *vport) 349 { 350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 352 353 /* If the soft name exists then update it using the service params */ 354 if (vport->phba->cfg_soft_wwnn) 355 u64_to_wwn(vport->phba->cfg_soft_wwnn, 356 vport->fc_sparam.nodeName.u.wwn); 357 if (vport->phba->cfg_soft_wwpn) 358 u64_to_wwn(vport->phba->cfg_soft_wwpn, 359 vport->fc_sparam.portName.u.wwn); 360 361 /* 362 * If the name is empty or there exists a soft name 363 * then copy the service params name, otherwise use the fc name 364 */ 365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 367 sizeof(struct lpfc_name)); 368 else 369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 370 sizeof(struct lpfc_name)); 371 372 /* 373 * If the port name has changed, then set the Param changes flag 374 * to unreg the login 375 */ 376 if (vport->fc_portname.u.wwn[0] != 0 && 377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 378 sizeof(struct lpfc_name))) 379 vport->vport_flag |= FAWWPN_PARAM_CHG; 380 381 if (vport->fc_portname.u.wwn[0] == 0 || 382 vport->phba->cfg_soft_wwpn || 383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 384 vport->vport_flag & FAWWPN_SET) { 385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 386 sizeof(struct lpfc_name)); 387 vport->vport_flag &= ~FAWWPN_SET; 388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 389 vport->vport_flag |= FAWWPN_SET; 390 } 391 else 392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 393 sizeof(struct lpfc_name)); 394 } 395 396 /** 397 * lpfc_config_port_post - Perform lpfc initialization after config port 398 * @phba: pointer to lpfc hba data structure. 399 * 400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 401 * command call. It performs all internal resource and state setups on the 402 * port: post IOCB buffers, enable appropriate host interrupt attentions, 403 * ELS ring timers, etc. 404 * 405 * Return codes 406 * 0 - success. 407 * Any other value - error. 408 **/ 409 int 410 lpfc_config_port_post(struct lpfc_hba *phba) 411 { 412 struct lpfc_vport *vport = phba->pport; 413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 414 LPFC_MBOXQ_t *pmb; 415 MAILBOX_t *mb; 416 struct lpfc_dmabuf *mp; 417 struct lpfc_sli *psli = &phba->sli; 418 uint32_t status, timeout; 419 int i, j; 420 int rc; 421 422 spin_lock_irq(&phba->hbalock); 423 /* 424 * If the Config port completed correctly the HBA is not 425 * over heated any more. 426 */ 427 if (phba->over_temp_state == HBA_OVER_TEMP) 428 phba->over_temp_state = HBA_NORMAL_TEMP; 429 spin_unlock_irq(&phba->hbalock); 430 431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 432 if (!pmb) { 433 phba->link_state = LPFC_HBA_ERROR; 434 return -ENOMEM; 435 } 436 mb = &pmb->u.mb; 437 438 /* Get login parameters for NID. */ 439 rc = lpfc_read_sparam(phba, pmb, 0); 440 if (rc) { 441 mempool_free(pmb, phba->mbox_mem_pool); 442 return -ENOMEM; 443 } 444 445 pmb->vport = vport; 446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 448 "0448 Adapter failed init, mbxCmd x%x " 449 "READ_SPARM mbxStatus x%x\n", 450 mb->mbxCommand, mb->mbxStatus); 451 phba->link_state = LPFC_HBA_ERROR; 452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 453 mempool_free(pmb, phba->mbox_mem_pool); 454 lpfc_mbuf_free(phba, mp->virt, mp->phys); 455 kfree(mp); 456 return -EIO; 457 } 458 459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 460 461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 462 lpfc_mbuf_free(phba, mp->virt, mp->phys); 463 kfree(mp); 464 pmb->ctx_buf = NULL; 465 lpfc_update_vport_wwn(vport); 466 467 /* Update the fc_host data structures with new wwn. */ 468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 470 fc_host_max_npiv_vports(shost) = phba->max_vpi; 471 472 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 473 /* This should be consolidated into parse_vpd ? - mr */ 474 if (phba->SerialNumber[0] == 0) { 475 uint8_t *outptr; 476 477 outptr = &vport->fc_nodename.u.s.IEEE[0]; 478 for (i = 0; i < 12; i++) { 479 status = *outptr++; 480 j = ((status & 0xf0) >> 4); 481 if (j <= 9) 482 phba->SerialNumber[i] = 483 (char)((uint8_t) 0x30 + (uint8_t) j); 484 else 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 487 i++; 488 j = (status & 0xf); 489 if (j <= 9) 490 phba->SerialNumber[i] = 491 (char)((uint8_t) 0x30 + (uint8_t) j); 492 else 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 495 } 496 } 497 498 lpfc_read_config(phba, pmb); 499 pmb->vport = vport; 500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 502 "0453 Adapter failed to init, mbxCmd x%x " 503 "READ_CONFIG, mbxStatus x%x\n", 504 mb->mbxCommand, mb->mbxStatus); 505 phba->link_state = LPFC_HBA_ERROR; 506 mempool_free( pmb, phba->mbox_mem_pool); 507 return -EIO; 508 } 509 510 /* Check if the port is disabled */ 511 lpfc_sli_read_link_ste(phba); 512 513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 514 i = (mb->un.varRdConfig.max_xri + 1); 515 if (phba->cfg_hba_queue_depth > i) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 517 "3359 HBA queue depth changed from %d to %d\n", 518 phba->cfg_hba_queue_depth, i); 519 phba->cfg_hba_queue_depth = i; 520 } 521 522 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 523 i = (mb->un.varRdConfig.max_xri >> 3); 524 if (phba->pport->cfg_lun_queue_depth > i) { 525 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 526 "3360 LUN queue depth changed from %d to %d\n", 527 phba->pport->cfg_lun_queue_depth, i); 528 phba->pport->cfg_lun_queue_depth = i; 529 } 530 531 phba->lmt = mb->un.varRdConfig.lmt; 532 533 /* Get the default values for Model Name and Description */ 534 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 535 536 phba->link_state = LPFC_LINK_DOWN; 537 538 /* Only process IOCBs on ELS ring till hba_state is READY */ 539 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 540 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 541 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 542 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 543 544 /* Post receive buffers for desired rings */ 545 if (phba->sli_rev != 3) 546 lpfc_post_rcv_buf(phba); 547 548 /* 549 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 550 */ 551 if (phba->intr_type == MSIX) { 552 rc = lpfc_config_msi(phba, pmb); 553 if (rc) { 554 mempool_free(pmb, phba->mbox_mem_pool); 555 return -EIO; 556 } 557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 558 if (rc != MBX_SUCCESS) { 559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 560 "0352 Config MSI mailbox command " 561 "failed, mbxCmd x%x, mbxStatus x%x\n", 562 pmb->u.mb.mbxCommand, 563 pmb->u.mb.mbxStatus); 564 mempool_free(pmb, phba->mbox_mem_pool); 565 return -EIO; 566 } 567 } 568 569 spin_lock_irq(&phba->hbalock); 570 /* Initialize ERATT handling flag */ 571 phba->hba_flag &= ~HBA_ERATT_HANDLED; 572 573 /* Enable appropriate host interrupts */ 574 if (lpfc_readl(phba->HCregaddr, &status)) { 575 spin_unlock_irq(&phba->hbalock); 576 return -EIO; 577 } 578 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 579 if (psli->num_rings > 0) 580 status |= HC_R0INT_ENA; 581 if (psli->num_rings > 1) 582 status |= HC_R1INT_ENA; 583 if (psli->num_rings > 2) 584 status |= HC_R2INT_ENA; 585 if (psli->num_rings > 3) 586 status |= HC_R3INT_ENA; 587 588 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 589 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 590 status &= ~(HC_R0INT_ENA); 591 592 writel(status, phba->HCregaddr); 593 readl(phba->HCregaddr); /* flush */ 594 spin_unlock_irq(&phba->hbalock); 595 596 /* Set up ring-0 (ELS) timer */ 597 timeout = phba->fc_ratov * 2; 598 mod_timer(&vport->els_tmofunc, 599 jiffies + msecs_to_jiffies(1000 * timeout)); 600 /* Set up heart beat (HB) timer */ 601 mod_timer(&phba->hb_tmofunc, 602 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 603 phba->hb_outstanding = 0; 604 phba->last_completion_time = jiffies; 605 /* Set up error attention (ERATT) polling timer */ 606 mod_timer(&phba->eratt_poll, 607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 608 609 if (phba->hba_flag & LINK_DISABLED) { 610 lpfc_printf_log(phba, 611 KERN_ERR, LOG_INIT, 612 "2598 Adapter Link is disabled.\n"); 613 lpfc_down_link(phba, pmb); 614 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 616 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 617 lpfc_printf_log(phba, 618 KERN_ERR, LOG_INIT, 619 "2599 Adapter failed to issue DOWN_LINK" 620 " mbox command rc 0x%x\n", rc); 621 622 mempool_free(pmb, phba->mbox_mem_pool); 623 return -EIO; 624 } 625 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 626 mempool_free(pmb, phba->mbox_mem_pool); 627 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 628 if (rc) 629 return rc; 630 } 631 /* MBOX buffer will be freed in mbox compl */ 632 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 633 if (!pmb) { 634 phba->link_state = LPFC_HBA_ERROR; 635 return -ENOMEM; 636 } 637 638 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 639 pmb->mbox_cmpl = lpfc_config_async_cmpl; 640 pmb->vport = phba->pport; 641 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 642 643 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 644 lpfc_printf_log(phba, 645 KERN_ERR, 646 LOG_INIT, 647 "0456 Adapter failed to issue " 648 "ASYNCEVT_ENABLE mbox status x%x\n", 649 rc); 650 mempool_free(pmb, phba->mbox_mem_pool); 651 } 652 653 /* Get Option rom version */ 654 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 655 if (!pmb) { 656 phba->link_state = LPFC_HBA_ERROR; 657 return -ENOMEM; 658 } 659 660 lpfc_dump_wakeup_param(phba, pmb); 661 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 662 pmb->vport = phba->pport; 663 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 664 665 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 667 "to get Option ROM version status x%x\n", rc); 668 mempool_free(pmb, phba->mbox_mem_pool); 669 } 670 671 return 0; 672 } 673 674 /** 675 * lpfc_hba_init_link - Initialize the FC link 676 * @phba: pointer to lpfc hba data structure. 677 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 678 * 679 * This routine will issue the INIT_LINK mailbox command call. 680 * It is available to other drivers through the lpfc_hba data 681 * structure for use as a delayed link up mechanism with the 682 * module parameter lpfc_suppress_link_up. 683 * 684 * Return code 685 * 0 - success 686 * Any other value - error 687 **/ 688 static int 689 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 690 { 691 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 692 } 693 694 /** 695 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 696 * @phba: pointer to lpfc hba data structure. 697 * @fc_topology: desired fc topology. 698 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 699 * 700 * This routine will issue the INIT_LINK mailbox command call. 701 * It is available to other drivers through the lpfc_hba data 702 * structure for use as a delayed link up mechanism with the 703 * module parameter lpfc_suppress_link_up. 704 * 705 * Return code 706 * 0 - success 707 * Any other value - error 708 **/ 709 int 710 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 711 uint32_t flag) 712 { 713 struct lpfc_vport *vport = phba->pport; 714 LPFC_MBOXQ_t *pmb; 715 MAILBOX_t *mb; 716 int rc; 717 718 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 719 if (!pmb) { 720 phba->link_state = LPFC_HBA_ERROR; 721 return -ENOMEM; 722 } 723 mb = &pmb->u.mb; 724 pmb->vport = vport; 725 726 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 728 !(phba->lmt & LMT_1Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 730 !(phba->lmt & LMT_2Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 732 !(phba->lmt & LMT_4Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 734 !(phba->lmt & LMT_8Gb)) || 735 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 736 !(phba->lmt & LMT_10Gb)) || 737 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 738 !(phba->lmt & LMT_16Gb)) || 739 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 740 !(phba->lmt & LMT_32Gb)) || 741 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 742 !(phba->lmt & LMT_64Gb))) { 743 /* Reset link speed to auto */ 744 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 745 "1302 Invalid speed for this board:%d " 746 "Reset link speed to auto.\n", 747 phba->cfg_link_speed); 748 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 749 } 750 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 752 if (phba->sli_rev < LPFC_SLI_REV4) 753 lpfc_set_loopback_flag(phba); 754 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 755 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 757 "0498 Adapter failed to init, mbxCmd x%x " 758 "INIT_LINK, mbxStatus x%x\n", 759 mb->mbxCommand, mb->mbxStatus); 760 if (phba->sli_rev <= LPFC_SLI_REV3) { 761 /* Clear all interrupt enable conditions */ 762 writel(0, phba->HCregaddr); 763 readl(phba->HCregaddr); /* flush */ 764 /* Clear all pending interrupts */ 765 writel(0xffffffff, phba->HAregaddr); 766 readl(phba->HAregaddr); /* flush */ 767 } 768 phba->link_state = LPFC_HBA_ERROR; 769 if (rc != MBX_BUSY || flag == MBX_POLL) 770 mempool_free(pmb, phba->mbox_mem_pool); 771 return -EIO; 772 } 773 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 774 if (flag == MBX_POLL) 775 mempool_free(pmb, phba->mbox_mem_pool); 776 777 return 0; 778 } 779 780 /** 781 * lpfc_hba_down_link - this routine downs the FC link 782 * @phba: pointer to lpfc hba data structure. 783 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 784 * 785 * This routine will issue the DOWN_LINK mailbox command call. 786 * It is available to other drivers through the lpfc_hba data 787 * structure for use to stop the link. 788 * 789 * Return code 790 * 0 - success 791 * Any other value - error 792 **/ 793 static int 794 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 795 { 796 LPFC_MBOXQ_t *pmb; 797 int rc; 798 799 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 800 if (!pmb) { 801 phba->link_state = LPFC_HBA_ERROR; 802 return -ENOMEM; 803 } 804 805 lpfc_printf_log(phba, 806 KERN_ERR, LOG_INIT, 807 "0491 Adapter Link is disabled.\n"); 808 lpfc_down_link(phba, pmb); 809 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 810 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 811 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 812 lpfc_printf_log(phba, 813 KERN_ERR, LOG_INIT, 814 "2522 Adapter failed to issue DOWN_LINK" 815 " mbox command rc 0x%x\n", rc); 816 817 mempool_free(pmb, phba->mbox_mem_pool); 818 return -EIO; 819 } 820 if (flag == MBX_POLL) 821 mempool_free(pmb, phba->mbox_mem_pool); 822 823 return 0; 824 } 825 826 /** 827 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 828 * @phba: pointer to lpfc HBA data structure. 829 * 830 * This routine will do LPFC uninitialization before the HBA is reset when 831 * bringing down the SLI Layer. 832 * 833 * Return codes 834 * 0 - success. 835 * Any other value - error. 836 **/ 837 int 838 lpfc_hba_down_prep(struct lpfc_hba *phba) 839 { 840 struct lpfc_vport **vports; 841 int i; 842 843 if (phba->sli_rev <= LPFC_SLI_REV3) { 844 /* Disable interrupts */ 845 writel(0, phba->HCregaddr); 846 readl(phba->HCregaddr); /* flush */ 847 } 848 849 if (phba->pport->load_flag & FC_UNLOADING) 850 lpfc_cleanup_discovery_resources(phba->pport); 851 else { 852 vports = lpfc_create_vport_work_array(phba); 853 if (vports != NULL) 854 for (i = 0; i <= phba->max_vports && 855 vports[i] != NULL; i++) 856 lpfc_cleanup_discovery_resources(vports[i]); 857 lpfc_destroy_vport_work_array(phba, vports); 858 } 859 return 0; 860 } 861 862 /** 863 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 864 * rspiocb which got deferred 865 * 866 * @phba: pointer to lpfc HBA data structure. 867 * 868 * This routine will cleanup completed slow path events after HBA is reset 869 * when bringing down the SLI Layer. 870 * 871 * 872 * Return codes 873 * void. 874 **/ 875 static void 876 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 877 { 878 struct lpfc_iocbq *rspiocbq; 879 struct hbq_dmabuf *dmabuf; 880 struct lpfc_cq_event *cq_event; 881 882 spin_lock_irq(&phba->hbalock); 883 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 884 spin_unlock_irq(&phba->hbalock); 885 886 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 887 /* Get the response iocb from the head of work queue */ 888 spin_lock_irq(&phba->hbalock); 889 list_remove_head(&phba->sli4_hba.sp_queue_event, 890 cq_event, struct lpfc_cq_event, list); 891 spin_unlock_irq(&phba->hbalock); 892 893 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 894 case CQE_CODE_COMPL_WQE: 895 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 896 cq_event); 897 lpfc_sli_release_iocbq(phba, rspiocbq); 898 break; 899 case CQE_CODE_RECEIVE: 900 case CQE_CODE_RECEIVE_V1: 901 dmabuf = container_of(cq_event, struct hbq_dmabuf, 902 cq_event); 903 lpfc_in_buf_free(phba, &dmabuf->dbuf); 904 } 905 } 906 } 907 908 /** 909 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 910 * @phba: pointer to lpfc HBA data structure. 911 * 912 * This routine will cleanup posted ELS buffers after the HBA is reset 913 * when bringing down the SLI Layer. 914 * 915 * 916 * Return codes 917 * void. 918 **/ 919 static void 920 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 921 { 922 struct lpfc_sli *psli = &phba->sli; 923 struct lpfc_sli_ring *pring; 924 struct lpfc_dmabuf *mp, *next_mp; 925 LIST_HEAD(buflist); 926 int count; 927 928 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 929 lpfc_sli_hbqbuf_free_all(phba); 930 else { 931 /* Cleanup preposted buffers on the ELS ring */ 932 pring = &psli->sli3_ring[LPFC_ELS_RING]; 933 spin_lock_irq(&phba->hbalock); 934 list_splice_init(&pring->postbufq, &buflist); 935 spin_unlock_irq(&phba->hbalock); 936 937 count = 0; 938 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 939 list_del(&mp->list); 940 count++; 941 lpfc_mbuf_free(phba, mp->virt, mp->phys); 942 kfree(mp); 943 } 944 945 spin_lock_irq(&phba->hbalock); 946 pring->postbufq_cnt -= count; 947 spin_unlock_irq(&phba->hbalock); 948 } 949 } 950 951 /** 952 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 953 * @phba: pointer to lpfc HBA data structure. 954 * 955 * This routine will cleanup the txcmplq after the HBA is reset when bringing 956 * down the SLI Layer. 957 * 958 * Return codes 959 * void 960 **/ 961 static void 962 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 963 { 964 struct lpfc_sli *psli = &phba->sli; 965 struct lpfc_queue *qp = NULL; 966 struct lpfc_sli_ring *pring; 967 LIST_HEAD(completions); 968 int i; 969 struct lpfc_iocbq *piocb, *next_iocb; 970 971 if (phba->sli_rev != LPFC_SLI_REV4) { 972 for (i = 0; i < psli->num_rings; i++) { 973 pring = &psli->sli3_ring[i]; 974 spin_lock_irq(&phba->hbalock); 975 /* At this point in time the HBA is either reset or DOA 976 * Nothing should be on txcmplq as it will 977 * NEVER complete. 978 */ 979 list_splice_init(&pring->txcmplq, &completions); 980 pring->txcmplq_cnt = 0; 981 spin_unlock_irq(&phba->hbalock); 982 983 lpfc_sli_abort_iocb_ring(phba, pring); 984 } 985 /* Cancel all the IOCBs from the completions list */ 986 lpfc_sli_cancel_iocbs(phba, &completions, 987 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 988 return; 989 } 990 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 991 pring = qp->pring; 992 if (!pring) 993 continue; 994 spin_lock_irq(&pring->ring_lock); 995 list_for_each_entry_safe(piocb, next_iocb, 996 &pring->txcmplq, list) 997 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 998 list_splice_init(&pring->txcmplq, &completions); 999 pring->txcmplq_cnt = 0; 1000 spin_unlock_irq(&pring->ring_lock); 1001 lpfc_sli_abort_iocb_ring(phba, pring); 1002 } 1003 /* Cancel all the IOCBs from the completions list */ 1004 lpfc_sli_cancel_iocbs(phba, &completions, 1005 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1006 } 1007 1008 /** 1009 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1010 int i; 1011 * @phba: pointer to lpfc HBA data structure. 1012 * 1013 * This routine will do uninitialization after the HBA is reset when bring 1014 * down the SLI Layer. 1015 * 1016 * Return codes 1017 * 0 - success. 1018 * Any other value - error. 1019 **/ 1020 static int 1021 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1022 { 1023 lpfc_hba_free_post_buf(phba); 1024 lpfc_hba_clean_txcmplq(phba); 1025 return 0; 1026 } 1027 1028 /** 1029 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1030 * @phba: pointer to lpfc HBA data structure. 1031 * 1032 * This routine will do uninitialization after the HBA is reset when bring 1033 * down the SLI Layer. 1034 * 1035 * Return codes 1036 * 0 - success. 1037 * Any other value - error. 1038 **/ 1039 static int 1040 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1041 { 1042 struct lpfc_io_buf *psb, *psb_next; 1043 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1044 struct lpfc_sli4_hdw_queue *qp; 1045 LIST_HEAD(aborts); 1046 LIST_HEAD(nvme_aborts); 1047 LIST_HEAD(nvmet_aborts); 1048 struct lpfc_sglq *sglq_entry = NULL; 1049 int cnt, idx; 1050 1051 1052 lpfc_sli_hbqbuf_free_all(phba); 1053 lpfc_hba_clean_txcmplq(phba); 1054 1055 /* At this point in time the HBA is either reset or DOA. Either 1056 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1057 * on the lpfc_els_sgl_list so that it can either be freed if the 1058 * driver is unloading or reposted if the driver is restarting 1059 * the port. 1060 */ 1061 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1062 /* scsl_buf_list */ 1063 /* sgl_list_lock required because worker thread uses this 1064 * list. 1065 */ 1066 spin_lock(&phba->sli4_hba.sgl_list_lock); 1067 list_for_each_entry(sglq_entry, 1068 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1069 sglq_entry->state = SGL_FREED; 1070 1071 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1072 &phba->sli4_hba.lpfc_els_sgl_list); 1073 1074 1075 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1076 1077 /* abts_xxxx_buf_list_lock required because worker thread uses this 1078 * list. 1079 */ 1080 cnt = 0; 1081 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1082 qp = &phba->sli4_hba.hdwq[idx]; 1083 1084 spin_lock(&qp->abts_scsi_buf_list_lock); 1085 list_splice_init(&qp->lpfc_abts_scsi_buf_list, 1086 &aborts); 1087 1088 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1089 psb->pCmd = NULL; 1090 psb->status = IOSTAT_SUCCESS; 1091 cnt++; 1092 } 1093 spin_lock(&qp->io_buf_list_put_lock); 1094 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1095 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1096 qp->abts_scsi_io_bufs = 0; 1097 spin_unlock(&qp->io_buf_list_put_lock); 1098 spin_unlock(&qp->abts_scsi_buf_list_lock); 1099 1100 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1101 spin_lock(&qp->abts_nvme_buf_list_lock); 1102 list_splice_init(&qp->lpfc_abts_nvme_buf_list, 1103 &nvme_aborts); 1104 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, 1105 list) { 1106 psb->pCmd = NULL; 1107 psb->status = IOSTAT_SUCCESS; 1108 cnt++; 1109 } 1110 spin_lock(&qp->io_buf_list_put_lock); 1111 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1112 qp->abts_nvme_io_bufs = 0; 1113 list_splice_init(&nvme_aborts, 1114 &qp->lpfc_io_buf_list_put); 1115 spin_unlock(&qp->io_buf_list_put_lock); 1116 spin_unlock(&qp->abts_nvme_buf_list_lock); 1117 1118 } 1119 } 1120 spin_unlock_irq(&phba->hbalock); 1121 1122 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1123 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1124 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1125 &nvmet_aborts); 1126 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1127 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1128 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1129 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1130 } 1131 } 1132 1133 lpfc_sli4_free_sp_events(phba); 1134 return cnt; 1135 } 1136 1137 /** 1138 * lpfc_hba_down_post - Wrapper func for hba down post routine 1139 * @phba: pointer to lpfc HBA data structure. 1140 * 1141 * This routine wraps the actual SLI3 or SLI4 routine for performing 1142 * uninitialization after the HBA is reset when bring down the SLI Layer. 1143 * 1144 * Return codes 1145 * 0 - success. 1146 * Any other value - error. 1147 **/ 1148 int 1149 lpfc_hba_down_post(struct lpfc_hba *phba) 1150 { 1151 return (*phba->lpfc_hba_down_post)(phba); 1152 } 1153 1154 /** 1155 * lpfc_hb_timeout - The HBA-timer timeout handler 1156 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1157 * 1158 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1159 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1160 * work-port-events bitmap and the worker thread is notified. This timeout 1161 * event will be used by the worker thread to invoke the actual timeout 1162 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1163 * be performed in the timeout handler and the HBA timeout event bit shall 1164 * be cleared by the worker thread after it has taken the event bitmap out. 1165 **/ 1166 static void 1167 lpfc_hb_timeout(struct timer_list *t) 1168 { 1169 struct lpfc_hba *phba; 1170 uint32_t tmo_posted; 1171 unsigned long iflag; 1172 1173 phba = from_timer(phba, t, hb_tmofunc); 1174 1175 /* Check for heart beat timeout conditions */ 1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1177 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1178 if (!tmo_posted) 1179 phba->pport->work_port_events |= WORKER_HB_TMO; 1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1181 1182 /* Tell the worker thread there is work to do */ 1183 if (!tmo_posted) 1184 lpfc_worker_wake_up(phba); 1185 return; 1186 } 1187 1188 /** 1189 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1190 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1191 * 1192 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1193 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1194 * work-port-events bitmap and the worker thread is notified. This timeout 1195 * event will be used by the worker thread to invoke the actual timeout 1196 * handler routine, lpfc_rrq_handler. Any periodical operations will 1197 * be performed in the timeout handler and the RRQ timeout event bit shall 1198 * be cleared by the worker thread after it has taken the event bitmap out. 1199 **/ 1200 static void 1201 lpfc_rrq_timeout(struct timer_list *t) 1202 { 1203 struct lpfc_hba *phba; 1204 unsigned long iflag; 1205 1206 phba = from_timer(phba, t, rrq_tmr); 1207 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1208 if (!(phba->pport->load_flag & FC_UNLOADING)) 1209 phba->hba_flag |= HBA_RRQ_ACTIVE; 1210 else 1211 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1212 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1213 1214 if (!(phba->pport->load_flag & FC_UNLOADING)) 1215 lpfc_worker_wake_up(phba); 1216 } 1217 1218 /** 1219 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1220 * @phba: pointer to lpfc hba data structure. 1221 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1222 * 1223 * This is the callback function to the lpfc heart-beat mailbox command. 1224 * If configured, the lpfc driver issues the heart-beat mailbox command to 1225 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1226 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1227 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1228 * heart-beat outstanding state. Once the mailbox command comes back and 1229 * no error conditions detected, the heart-beat mailbox command timer is 1230 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1231 * state is cleared for the next heart-beat. If the timer expired with the 1232 * heart-beat outstanding state set, the driver will put the HBA offline. 1233 **/ 1234 static void 1235 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1236 { 1237 unsigned long drvr_flag; 1238 1239 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1240 phba->hb_outstanding = 0; 1241 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1242 1243 /* Check and reset heart-beat timer is necessary */ 1244 mempool_free(pmboxq, phba->mbox_mem_pool); 1245 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1246 !(phba->link_state == LPFC_HBA_ERROR) && 1247 !(phba->pport->load_flag & FC_UNLOADING)) 1248 mod_timer(&phba->hb_tmofunc, 1249 jiffies + 1250 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1251 return; 1252 } 1253 1254 static void 1255 lpfc_hb_eq_delay_work(struct work_struct *work) 1256 { 1257 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1258 struct lpfc_hba, eq_delay_work); 1259 struct lpfc_eq_intr_info *eqi, *eqi_new; 1260 struct lpfc_queue *eq, *eq_next; 1261 unsigned char *eqcnt = NULL; 1262 uint32_t usdelay; 1263 int i; 1264 1265 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1266 return; 1267 1268 if (phba->link_state == LPFC_HBA_ERROR || 1269 phba->pport->fc_flag & FC_OFFLINE_MODE) 1270 goto requeue; 1271 1272 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), 1273 GFP_KERNEL); 1274 if (!eqcnt) 1275 goto requeue; 1276 1277 for (i = 0; i < phba->cfg_irq_chann; i++) { 1278 eq = phba->sli4_hba.hdwq[i].hba_eq; 1279 if (eq && eqcnt[eq->last_cpu] < 2) 1280 eqcnt[eq->last_cpu]++; 1281 continue; 1282 } 1283 1284 for_each_present_cpu(i) { 1285 if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2) 1286 continue; 1287 1288 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1289 1290 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * 1291 LPFC_EQ_DELAY_STEP; 1292 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1293 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1294 1295 eqi->icnt = 0; 1296 1297 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1298 if (eq->last_cpu != i) { 1299 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1300 eq->last_cpu); 1301 list_move_tail(&eq->cpu_list, &eqi_new->list); 1302 continue; 1303 } 1304 if (usdelay != eq->q_mode) 1305 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1306 usdelay); 1307 } 1308 } 1309 1310 kfree(eqcnt); 1311 1312 requeue: 1313 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1314 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1315 } 1316 1317 /** 1318 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1319 * @phba: pointer to lpfc hba data structure. 1320 * 1321 * For each heartbeat, this routine does some heuristic methods to adjust 1322 * XRI distribution. The goal is to fully utilize free XRIs. 1323 **/ 1324 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1325 { 1326 u32 i; 1327 u32 hwq_count; 1328 1329 hwq_count = phba->cfg_hdw_queue; 1330 for (i = 0; i < hwq_count; i++) { 1331 /* Adjust XRIs in private pool */ 1332 lpfc_adjust_pvt_pool_count(phba, i); 1333 1334 /* Adjust high watermark */ 1335 lpfc_adjust_high_watermark(phba, i); 1336 1337 #ifdef LPFC_MXP_STAT 1338 /* Snapshot pbl, pvt and busy count */ 1339 lpfc_snapshot_mxp(phba, i); 1340 #endif 1341 } 1342 } 1343 1344 /** 1345 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1346 * @phba: pointer to lpfc hba data structure. 1347 * 1348 * This is the actual HBA-timer timeout handler to be invoked by the worker 1349 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1350 * handler performs any periodic operations needed for the device. If such 1351 * periodic event has already been attended to either in the interrupt handler 1352 * or by processing slow-ring or fast-ring events within the HBA-timer 1353 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1354 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1355 * is configured and there is no heart-beat mailbox command outstanding, a 1356 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1357 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1358 * to offline. 1359 **/ 1360 void 1361 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1362 { 1363 struct lpfc_vport **vports; 1364 LPFC_MBOXQ_t *pmboxq; 1365 struct lpfc_dmabuf *buf_ptr; 1366 int retval, i; 1367 struct lpfc_sli *psli = &phba->sli; 1368 LIST_HEAD(completions); 1369 1370 if (phba->cfg_xri_rebalancing) { 1371 /* Multi-XRI pools handler */ 1372 lpfc_hb_mxp_handler(phba); 1373 } 1374 1375 vports = lpfc_create_vport_work_array(phba); 1376 if (vports != NULL) 1377 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1378 lpfc_rcv_seq_check_edtov(vports[i]); 1379 lpfc_fdmi_num_disc_check(vports[i]); 1380 } 1381 lpfc_destroy_vport_work_array(phba, vports); 1382 1383 if ((phba->link_state == LPFC_HBA_ERROR) || 1384 (phba->pport->load_flag & FC_UNLOADING) || 1385 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1386 return; 1387 1388 spin_lock_irq(&phba->pport->work_port_lock); 1389 1390 if (time_after(phba->last_completion_time + 1391 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1392 jiffies)) { 1393 spin_unlock_irq(&phba->pport->work_port_lock); 1394 if (!phba->hb_outstanding) 1395 mod_timer(&phba->hb_tmofunc, 1396 jiffies + 1397 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1398 else 1399 mod_timer(&phba->hb_tmofunc, 1400 jiffies + 1401 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1402 return; 1403 } 1404 spin_unlock_irq(&phba->pport->work_port_lock); 1405 1406 if (phba->elsbuf_cnt && 1407 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1408 spin_lock_irq(&phba->hbalock); 1409 list_splice_init(&phba->elsbuf, &completions); 1410 phba->elsbuf_cnt = 0; 1411 phba->elsbuf_prev_cnt = 0; 1412 spin_unlock_irq(&phba->hbalock); 1413 1414 while (!list_empty(&completions)) { 1415 list_remove_head(&completions, buf_ptr, 1416 struct lpfc_dmabuf, list); 1417 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1418 kfree(buf_ptr); 1419 } 1420 } 1421 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1422 1423 /* If there is no heart beat outstanding, issue a heartbeat command */ 1424 if (phba->cfg_enable_hba_heartbeat) { 1425 if (!phba->hb_outstanding) { 1426 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1427 (list_empty(&psli->mboxq))) { 1428 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1429 GFP_KERNEL); 1430 if (!pmboxq) { 1431 mod_timer(&phba->hb_tmofunc, 1432 jiffies + 1433 msecs_to_jiffies(1000 * 1434 LPFC_HB_MBOX_INTERVAL)); 1435 return; 1436 } 1437 1438 lpfc_heart_beat(phba, pmboxq); 1439 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1440 pmboxq->vport = phba->pport; 1441 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1442 MBX_NOWAIT); 1443 1444 if (retval != MBX_BUSY && 1445 retval != MBX_SUCCESS) { 1446 mempool_free(pmboxq, 1447 phba->mbox_mem_pool); 1448 mod_timer(&phba->hb_tmofunc, 1449 jiffies + 1450 msecs_to_jiffies(1000 * 1451 LPFC_HB_MBOX_INTERVAL)); 1452 return; 1453 } 1454 phba->skipped_hb = 0; 1455 phba->hb_outstanding = 1; 1456 } else if (time_before_eq(phba->last_completion_time, 1457 phba->skipped_hb)) { 1458 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1459 "2857 Last completion time not " 1460 " updated in %d ms\n", 1461 jiffies_to_msecs(jiffies 1462 - phba->last_completion_time)); 1463 } else 1464 phba->skipped_hb = jiffies; 1465 1466 mod_timer(&phba->hb_tmofunc, 1467 jiffies + 1468 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1469 return; 1470 } else { 1471 /* 1472 * If heart beat timeout called with hb_outstanding set 1473 * we need to give the hb mailbox cmd a chance to 1474 * complete or TMO. 1475 */ 1476 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1477 "0459 Adapter heartbeat still out" 1478 "standing:last compl time was %d ms.\n", 1479 jiffies_to_msecs(jiffies 1480 - phba->last_completion_time)); 1481 mod_timer(&phba->hb_tmofunc, 1482 jiffies + 1483 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1484 } 1485 } else { 1486 mod_timer(&phba->hb_tmofunc, 1487 jiffies + 1488 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1489 } 1490 } 1491 1492 /** 1493 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1494 * @phba: pointer to lpfc hba data structure. 1495 * 1496 * This routine is called to bring the HBA offline when HBA hardware error 1497 * other than Port Error 6 has been detected. 1498 **/ 1499 static void 1500 lpfc_offline_eratt(struct lpfc_hba *phba) 1501 { 1502 struct lpfc_sli *psli = &phba->sli; 1503 1504 spin_lock_irq(&phba->hbalock); 1505 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1506 spin_unlock_irq(&phba->hbalock); 1507 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1508 1509 lpfc_offline(phba); 1510 lpfc_reset_barrier(phba); 1511 spin_lock_irq(&phba->hbalock); 1512 lpfc_sli_brdreset(phba); 1513 spin_unlock_irq(&phba->hbalock); 1514 lpfc_hba_down_post(phba); 1515 lpfc_sli_brdready(phba, HS_MBRDY); 1516 lpfc_unblock_mgmt_io(phba); 1517 phba->link_state = LPFC_HBA_ERROR; 1518 return; 1519 } 1520 1521 /** 1522 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1523 * @phba: pointer to lpfc hba data structure. 1524 * 1525 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1526 * other than Port Error 6 has been detected. 1527 **/ 1528 void 1529 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1530 { 1531 spin_lock_irq(&phba->hbalock); 1532 phba->link_state = LPFC_HBA_ERROR; 1533 spin_unlock_irq(&phba->hbalock); 1534 1535 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1536 lpfc_offline(phba); 1537 lpfc_hba_down_post(phba); 1538 lpfc_unblock_mgmt_io(phba); 1539 } 1540 1541 /** 1542 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1543 * @phba: pointer to lpfc hba data structure. 1544 * 1545 * This routine is invoked to handle the deferred HBA hardware error 1546 * conditions. This type of error is indicated by HBA by setting ER1 1547 * and another ER bit in the host status register. The driver will 1548 * wait until the ER1 bit clears before handling the error condition. 1549 **/ 1550 static void 1551 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1552 { 1553 uint32_t old_host_status = phba->work_hs; 1554 struct lpfc_sli *psli = &phba->sli; 1555 1556 /* If the pci channel is offline, ignore possible errors, 1557 * since we cannot communicate with the pci card anyway. 1558 */ 1559 if (pci_channel_offline(phba->pcidev)) { 1560 spin_lock_irq(&phba->hbalock); 1561 phba->hba_flag &= ~DEFER_ERATT; 1562 spin_unlock_irq(&phba->hbalock); 1563 return; 1564 } 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1567 "0479 Deferred Adapter Hardware Error " 1568 "Data: x%x x%x x%x\n", 1569 phba->work_hs, 1570 phba->work_status[0], phba->work_status[1]); 1571 1572 spin_lock_irq(&phba->hbalock); 1573 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1574 spin_unlock_irq(&phba->hbalock); 1575 1576 1577 /* 1578 * Firmware stops when it triggred erratt. That could cause the I/Os 1579 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1580 * SCSI layer retry it after re-establishing link. 1581 */ 1582 lpfc_sli_abort_fcp_rings(phba); 1583 1584 /* 1585 * There was a firmware error. Take the hba offline and then 1586 * attempt to restart it. 1587 */ 1588 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1589 lpfc_offline(phba); 1590 1591 /* Wait for the ER1 bit to clear.*/ 1592 while (phba->work_hs & HS_FFER1) { 1593 msleep(100); 1594 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1595 phba->work_hs = UNPLUG_ERR ; 1596 break; 1597 } 1598 /* If driver is unloading let the worker thread continue */ 1599 if (phba->pport->load_flag & FC_UNLOADING) { 1600 phba->work_hs = 0; 1601 break; 1602 } 1603 } 1604 1605 /* 1606 * This is to ptrotect against a race condition in which 1607 * first write to the host attention register clear the 1608 * host status register. 1609 */ 1610 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1611 phba->work_hs = old_host_status & ~HS_FFER1; 1612 1613 spin_lock_irq(&phba->hbalock); 1614 phba->hba_flag &= ~DEFER_ERATT; 1615 spin_unlock_irq(&phba->hbalock); 1616 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1617 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1618 } 1619 1620 static void 1621 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1622 { 1623 struct lpfc_board_event_header board_event; 1624 struct Scsi_Host *shost; 1625 1626 board_event.event_type = FC_REG_BOARD_EVENT; 1627 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1628 shost = lpfc_shost_from_vport(phba->pport); 1629 fc_host_post_vendor_event(shost, fc_get_event_number(), 1630 sizeof(board_event), 1631 (char *) &board_event, 1632 LPFC_NL_VENDOR_ID); 1633 } 1634 1635 /** 1636 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1637 * @phba: pointer to lpfc hba data structure. 1638 * 1639 * This routine is invoked to handle the following HBA hardware error 1640 * conditions: 1641 * 1 - HBA error attention interrupt 1642 * 2 - DMA ring index out of range 1643 * 3 - Mailbox command came back as unknown 1644 **/ 1645 static void 1646 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1647 { 1648 struct lpfc_vport *vport = phba->pport; 1649 struct lpfc_sli *psli = &phba->sli; 1650 uint32_t event_data; 1651 unsigned long temperature; 1652 struct temp_event temp_event_data; 1653 struct Scsi_Host *shost; 1654 1655 /* If the pci channel is offline, ignore possible errors, 1656 * since we cannot communicate with the pci card anyway. 1657 */ 1658 if (pci_channel_offline(phba->pcidev)) { 1659 spin_lock_irq(&phba->hbalock); 1660 phba->hba_flag &= ~DEFER_ERATT; 1661 spin_unlock_irq(&phba->hbalock); 1662 return; 1663 } 1664 1665 /* If resets are disabled then leave the HBA alone and return */ 1666 if (!phba->cfg_enable_hba_reset) 1667 return; 1668 1669 /* Send an internal error event to mgmt application */ 1670 lpfc_board_errevt_to_mgmt(phba); 1671 1672 if (phba->hba_flag & DEFER_ERATT) 1673 lpfc_handle_deferred_eratt(phba); 1674 1675 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1676 if (phba->work_hs & HS_FFER6) 1677 /* Re-establishing Link */ 1678 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1679 "1301 Re-establishing Link " 1680 "Data: x%x x%x x%x\n", 1681 phba->work_hs, phba->work_status[0], 1682 phba->work_status[1]); 1683 if (phba->work_hs & HS_FFER8) 1684 /* Device Zeroization */ 1685 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1686 "2861 Host Authentication device " 1687 "zeroization Data:x%x x%x x%x\n", 1688 phba->work_hs, phba->work_status[0], 1689 phba->work_status[1]); 1690 1691 spin_lock_irq(&phba->hbalock); 1692 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1693 spin_unlock_irq(&phba->hbalock); 1694 1695 /* 1696 * Firmware stops when it triggled erratt with HS_FFER6. 1697 * That could cause the I/Os dropped by the firmware. 1698 * Error iocb (I/O) on txcmplq and let the SCSI layer 1699 * retry it after re-establishing link. 1700 */ 1701 lpfc_sli_abort_fcp_rings(phba); 1702 1703 /* 1704 * There was a firmware error. Take the hba offline and then 1705 * attempt to restart it. 1706 */ 1707 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1708 lpfc_offline(phba); 1709 lpfc_sli_brdrestart(phba); 1710 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1711 lpfc_unblock_mgmt_io(phba); 1712 return; 1713 } 1714 lpfc_unblock_mgmt_io(phba); 1715 } else if (phba->work_hs & HS_CRIT_TEMP) { 1716 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1717 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1718 temp_event_data.event_code = LPFC_CRIT_TEMP; 1719 temp_event_data.data = (uint32_t)temperature; 1720 1721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1722 "0406 Adapter maximum temperature exceeded " 1723 "(%ld), taking this port offline " 1724 "Data: x%x x%x x%x\n", 1725 temperature, phba->work_hs, 1726 phba->work_status[0], phba->work_status[1]); 1727 1728 shost = lpfc_shost_from_vport(phba->pport); 1729 fc_host_post_vendor_event(shost, fc_get_event_number(), 1730 sizeof(temp_event_data), 1731 (char *) &temp_event_data, 1732 SCSI_NL_VID_TYPE_PCI 1733 | PCI_VENDOR_ID_EMULEX); 1734 1735 spin_lock_irq(&phba->hbalock); 1736 phba->over_temp_state = HBA_OVER_TEMP; 1737 spin_unlock_irq(&phba->hbalock); 1738 lpfc_offline_eratt(phba); 1739 1740 } else { 1741 /* The if clause above forces this code path when the status 1742 * failure is a value other than FFER6. Do not call the offline 1743 * twice. This is the adapter hardware error path. 1744 */ 1745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1746 "0457 Adapter Hardware Error " 1747 "Data: x%x x%x x%x\n", 1748 phba->work_hs, 1749 phba->work_status[0], phba->work_status[1]); 1750 1751 event_data = FC_REG_DUMP_EVENT; 1752 shost = lpfc_shost_from_vport(vport); 1753 fc_host_post_vendor_event(shost, fc_get_event_number(), 1754 sizeof(event_data), (char *) &event_data, 1755 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1756 1757 lpfc_offline_eratt(phba); 1758 } 1759 return; 1760 } 1761 1762 /** 1763 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1764 * @phba: pointer to lpfc hba data structure. 1765 * @mbx_action: flag for mailbox shutdown action. 1766 * 1767 * This routine is invoked to perform an SLI4 port PCI function reset in 1768 * response to port status register polling attention. It waits for port 1769 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1770 * During this process, interrupt vectors are freed and later requested 1771 * for handling possible port resource change. 1772 **/ 1773 static int 1774 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1775 bool en_rn_msg) 1776 { 1777 int rc; 1778 uint32_t intr_mode; 1779 1780 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1781 LPFC_SLI_INTF_IF_TYPE_2) { 1782 /* 1783 * On error status condition, driver need to wait for port 1784 * ready before performing reset. 1785 */ 1786 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1787 if (rc) 1788 return rc; 1789 } 1790 1791 /* need reset: attempt for port recovery */ 1792 if (en_rn_msg) 1793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1794 "2887 Reset Needed: Attempting Port " 1795 "Recovery...\n"); 1796 lpfc_offline_prep(phba, mbx_action); 1797 lpfc_offline(phba); 1798 /* release interrupt for possible resource change */ 1799 lpfc_sli4_disable_intr(phba); 1800 rc = lpfc_sli_brdrestart(phba); 1801 if (rc) { 1802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1803 "6309 Failed to restart board\n"); 1804 return rc; 1805 } 1806 /* request and enable interrupt */ 1807 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1808 if (intr_mode == LPFC_INTR_ERROR) { 1809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1810 "3175 Failed to enable interrupt\n"); 1811 return -EIO; 1812 } 1813 phba->intr_mode = intr_mode; 1814 rc = lpfc_online(phba); 1815 if (rc == 0) 1816 lpfc_unblock_mgmt_io(phba); 1817 1818 return rc; 1819 } 1820 1821 /** 1822 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1823 * @phba: pointer to lpfc hba data structure. 1824 * 1825 * This routine is invoked to handle the SLI4 HBA hardware error attention 1826 * conditions. 1827 **/ 1828 static void 1829 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1830 { 1831 struct lpfc_vport *vport = phba->pport; 1832 uint32_t event_data; 1833 struct Scsi_Host *shost; 1834 uint32_t if_type; 1835 struct lpfc_register portstat_reg = {0}; 1836 uint32_t reg_err1, reg_err2; 1837 uint32_t uerrlo_reg, uemasklo_reg; 1838 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1839 bool en_rn_msg = true; 1840 struct temp_event temp_event_data; 1841 struct lpfc_register portsmphr_reg; 1842 int rc, i; 1843 1844 /* If the pci channel is offline, ignore possible errors, since 1845 * we cannot communicate with the pci card anyway. 1846 */ 1847 if (pci_channel_offline(phba->pcidev)) { 1848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1849 "3166 pci channel is offline\n"); 1850 lpfc_sli4_offline_eratt(phba); 1851 return; 1852 } 1853 1854 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1855 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1856 switch (if_type) { 1857 case LPFC_SLI_INTF_IF_TYPE_0: 1858 pci_rd_rc1 = lpfc_readl( 1859 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1860 &uerrlo_reg); 1861 pci_rd_rc2 = lpfc_readl( 1862 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1863 &uemasklo_reg); 1864 /* consider PCI bus read error as pci_channel_offline */ 1865 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1866 return; 1867 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1868 lpfc_sli4_offline_eratt(phba); 1869 return; 1870 } 1871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1872 "7623 Checking UE recoverable"); 1873 1874 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1875 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1876 &portsmphr_reg.word0)) 1877 continue; 1878 1879 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1880 &portsmphr_reg); 1881 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1882 LPFC_PORT_SEM_UE_RECOVERABLE) 1883 break; 1884 /*Sleep for 1Sec, before checking SEMAPHORE */ 1885 msleep(1000); 1886 } 1887 1888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1889 "4827 smphr_port_status x%x : Waited %dSec", 1890 smphr_port_status, i); 1891 1892 /* Recoverable UE, reset the HBA device */ 1893 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1894 LPFC_PORT_SEM_UE_RECOVERABLE) { 1895 for (i = 0; i < 20; i++) { 1896 msleep(1000); 1897 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1898 &portsmphr_reg.word0) && 1899 (LPFC_POST_STAGE_PORT_READY == 1900 bf_get(lpfc_port_smphr_port_status, 1901 &portsmphr_reg))) { 1902 rc = lpfc_sli4_port_sta_fn_reset(phba, 1903 LPFC_MBX_NO_WAIT, en_rn_msg); 1904 if (rc == 0) 1905 return; 1906 lpfc_printf_log(phba, 1907 KERN_ERR, LOG_INIT, 1908 "4215 Failed to recover UE"); 1909 break; 1910 } 1911 } 1912 } 1913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1914 "7624 Firmware not ready: Failing UE recovery," 1915 " waited %dSec", i); 1916 lpfc_sli4_offline_eratt(phba); 1917 break; 1918 1919 case LPFC_SLI_INTF_IF_TYPE_2: 1920 case LPFC_SLI_INTF_IF_TYPE_6: 1921 pci_rd_rc1 = lpfc_readl( 1922 phba->sli4_hba.u.if_type2.STATUSregaddr, 1923 &portstat_reg.word0); 1924 /* consider PCI bus read error as pci_channel_offline */ 1925 if (pci_rd_rc1 == -EIO) { 1926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1927 "3151 PCI bus read access failure: x%x\n", 1928 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1929 lpfc_sli4_offline_eratt(phba); 1930 return; 1931 } 1932 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1933 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1934 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1936 "2889 Port Overtemperature event, " 1937 "taking port offline Data: x%x x%x\n", 1938 reg_err1, reg_err2); 1939 1940 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1941 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1942 temp_event_data.event_code = LPFC_CRIT_TEMP; 1943 temp_event_data.data = 0xFFFFFFFF; 1944 1945 shost = lpfc_shost_from_vport(phba->pport); 1946 fc_host_post_vendor_event(shost, fc_get_event_number(), 1947 sizeof(temp_event_data), 1948 (char *)&temp_event_data, 1949 SCSI_NL_VID_TYPE_PCI 1950 | PCI_VENDOR_ID_EMULEX); 1951 1952 spin_lock_irq(&phba->hbalock); 1953 phba->over_temp_state = HBA_OVER_TEMP; 1954 spin_unlock_irq(&phba->hbalock); 1955 lpfc_sli4_offline_eratt(phba); 1956 return; 1957 } 1958 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1959 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1961 "3143 Port Down: Firmware Update " 1962 "Detected\n"); 1963 en_rn_msg = false; 1964 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1965 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1967 "3144 Port Down: Debug Dump\n"); 1968 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1969 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1971 "3145 Port Down: Provisioning\n"); 1972 1973 /* If resets are disabled then leave the HBA alone and return */ 1974 if (!phba->cfg_enable_hba_reset) 1975 return; 1976 1977 /* Check port status register for function reset */ 1978 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1979 en_rn_msg); 1980 if (rc == 0) { 1981 /* don't report event on forced debug dump */ 1982 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1983 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1984 return; 1985 else 1986 break; 1987 } 1988 /* fall through for not able to recover */ 1989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1990 "3152 Unrecoverable error, bring the port " 1991 "offline\n"); 1992 lpfc_sli4_offline_eratt(phba); 1993 break; 1994 case LPFC_SLI_INTF_IF_TYPE_1: 1995 default: 1996 break; 1997 } 1998 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1999 "3123 Report dump event to upper layer\n"); 2000 /* Send an internal error event to mgmt application */ 2001 lpfc_board_errevt_to_mgmt(phba); 2002 2003 event_data = FC_REG_DUMP_EVENT; 2004 shost = lpfc_shost_from_vport(vport); 2005 fc_host_post_vendor_event(shost, fc_get_event_number(), 2006 sizeof(event_data), (char *) &event_data, 2007 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2008 } 2009 2010 /** 2011 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2012 * @phba: pointer to lpfc HBA data structure. 2013 * 2014 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2015 * routine from the API jump table function pointer from the lpfc_hba struct. 2016 * 2017 * Return codes 2018 * 0 - success. 2019 * Any other value - error. 2020 **/ 2021 void 2022 lpfc_handle_eratt(struct lpfc_hba *phba) 2023 { 2024 (*phba->lpfc_handle_eratt)(phba); 2025 } 2026 2027 /** 2028 * lpfc_handle_latt - The HBA link event handler 2029 * @phba: pointer to lpfc hba data structure. 2030 * 2031 * This routine is invoked from the worker thread to handle a HBA host 2032 * attention link event. SLI3 only. 2033 **/ 2034 void 2035 lpfc_handle_latt(struct lpfc_hba *phba) 2036 { 2037 struct lpfc_vport *vport = phba->pport; 2038 struct lpfc_sli *psli = &phba->sli; 2039 LPFC_MBOXQ_t *pmb; 2040 volatile uint32_t control; 2041 struct lpfc_dmabuf *mp; 2042 int rc = 0; 2043 2044 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2045 if (!pmb) { 2046 rc = 1; 2047 goto lpfc_handle_latt_err_exit; 2048 } 2049 2050 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2051 if (!mp) { 2052 rc = 2; 2053 goto lpfc_handle_latt_free_pmb; 2054 } 2055 2056 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2057 if (!mp->virt) { 2058 rc = 3; 2059 goto lpfc_handle_latt_free_mp; 2060 } 2061 2062 /* Cleanup any outstanding ELS commands */ 2063 lpfc_els_flush_all_cmd(phba); 2064 2065 psli->slistat.link_event++; 2066 lpfc_read_topology(phba, pmb, mp); 2067 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2068 pmb->vport = vport; 2069 /* Block ELS IOCBs until we have processed this mbox command */ 2070 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2071 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2072 if (rc == MBX_NOT_FINISHED) { 2073 rc = 4; 2074 goto lpfc_handle_latt_free_mbuf; 2075 } 2076 2077 /* Clear Link Attention in HA REG */ 2078 spin_lock_irq(&phba->hbalock); 2079 writel(HA_LATT, phba->HAregaddr); 2080 readl(phba->HAregaddr); /* flush */ 2081 spin_unlock_irq(&phba->hbalock); 2082 2083 return; 2084 2085 lpfc_handle_latt_free_mbuf: 2086 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2087 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2088 lpfc_handle_latt_free_mp: 2089 kfree(mp); 2090 lpfc_handle_latt_free_pmb: 2091 mempool_free(pmb, phba->mbox_mem_pool); 2092 lpfc_handle_latt_err_exit: 2093 /* Enable Link attention interrupts */ 2094 spin_lock_irq(&phba->hbalock); 2095 psli->sli_flag |= LPFC_PROCESS_LA; 2096 control = readl(phba->HCregaddr); 2097 control |= HC_LAINT_ENA; 2098 writel(control, phba->HCregaddr); 2099 readl(phba->HCregaddr); /* flush */ 2100 2101 /* Clear Link Attention in HA REG */ 2102 writel(HA_LATT, phba->HAregaddr); 2103 readl(phba->HAregaddr); /* flush */ 2104 spin_unlock_irq(&phba->hbalock); 2105 lpfc_linkdown(phba); 2106 phba->link_state = LPFC_HBA_ERROR; 2107 2108 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2109 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2110 2111 return; 2112 } 2113 2114 /** 2115 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2116 * @phba: pointer to lpfc hba data structure. 2117 * @vpd: pointer to the vital product data. 2118 * @len: length of the vital product data in bytes. 2119 * 2120 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2121 * an array of characters. In this routine, the ModelName, ProgramType, and 2122 * ModelDesc, etc. fields of the phba data structure will be populated. 2123 * 2124 * Return codes 2125 * 0 - pointer to the VPD passed in is NULL 2126 * 1 - success 2127 **/ 2128 int 2129 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2130 { 2131 uint8_t lenlo, lenhi; 2132 int Length; 2133 int i, j; 2134 int finished = 0; 2135 int index = 0; 2136 2137 if (!vpd) 2138 return 0; 2139 2140 /* Vital Product */ 2141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2142 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2143 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2144 (uint32_t) vpd[3]); 2145 while (!finished && (index < (len - 4))) { 2146 switch (vpd[index]) { 2147 case 0x82: 2148 case 0x91: 2149 index += 1; 2150 lenlo = vpd[index]; 2151 index += 1; 2152 lenhi = vpd[index]; 2153 index += 1; 2154 i = ((((unsigned short)lenhi) << 8) + lenlo); 2155 index += i; 2156 break; 2157 case 0x90: 2158 index += 1; 2159 lenlo = vpd[index]; 2160 index += 1; 2161 lenhi = vpd[index]; 2162 index += 1; 2163 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2164 if (Length > len - index) 2165 Length = len - index; 2166 while (Length > 0) { 2167 /* Look for Serial Number */ 2168 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2169 index += 2; 2170 i = vpd[index]; 2171 index += 1; 2172 j = 0; 2173 Length -= (3+i); 2174 while(i--) { 2175 phba->SerialNumber[j++] = vpd[index++]; 2176 if (j == 31) 2177 break; 2178 } 2179 phba->SerialNumber[j] = 0; 2180 continue; 2181 } 2182 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2183 phba->vpd_flag |= VPD_MODEL_DESC; 2184 index += 2; 2185 i = vpd[index]; 2186 index += 1; 2187 j = 0; 2188 Length -= (3+i); 2189 while(i--) { 2190 phba->ModelDesc[j++] = vpd[index++]; 2191 if (j == 255) 2192 break; 2193 } 2194 phba->ModelDesc[j] = 0; 2195 continue; 2196 } 2197 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2198 phba->vpd_flag |= VPD_MODEL_NAME; 2199 index += 2; 2200 i = vpd[index]; 2201 index += 1; 2202 j = 0; 2203 Length -= (3+i); 2204 while(i--) { 2205 phba->ModelName[j++] = vpd[index++]; 2206 if (j == 79) 2207 break; 2208 } 2209 phba->ModelName[j] = 0; 2210 continue; 2211 } 2212 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2213 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2214 index += 2; 2215 i = vpd[index]; 2216 index += 1; 2217 j = 0; 2218 Length -= (3+i); 2219 while(i--) { 2220 phba->ProgramType[j++] = vpd[index++]; 2221 if (j == 255) 2222 break; 2223 } 2224 phba->ProgramType[j] = 0; 2225 continue; 2226 } 2227 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2228 phba->vpd_flag |= VPD_PORT; 2229 index += 2; 2230 i = vpd[index]; 2231 index += 1; 2232 j = 0; 2233 Length -= (3+i); 2234 while(i--) { 2235 if ((phba->sli_rev == LPFC_SLI_REV4) && 2236 (phba->sli4_hba.pport_name_sta == 2237 LPFC_SLI4_PPNAME_GET)) { 2238 j++; 2239 index++; 2240 } else 2241 phba->Port[j++] = vpd[index++]; 2242 if (j == 19) 2243 break; 2244 } 2245 if ((phba->sli_rev != LPFC_SLI_REV4) || 2246 (phba->sli4_hba.pport_name_sta == 2247 LPFC_SLI4_PPNAME_NON)) 2248 phba->Port[j] = 0; 2249 continue; 2250 } 2251 else { 2252 index += 2; 2253 i = vpd[index]; 2254 index += 1; 2255 index += i; 2256 Length -= (3 + i); 2257 } 2258 } 2259 finished = 0; 2260 break; 2261 case 0x78: 2262 finished = 1; 2263 break; 2264 default: 2265 index ++; 2266 break; 2267 } 2268 } 2269 2270 return(1); 2271 } 2272 2273 /** 2274 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2275 * @phba: pointer to lpfc hba data structure. 2276 * @mdp: pointer to the data structure to hold the derived model name. 2277 * @descp: pointer to the data structure to hold the derived description. 2278 * 2279 * This routine retrieves HBA's description based on its registered PCI device 2280 * ID. The @descp passed into this function points to an array of 256 chars. It 2281 * shall be returned with the model name, maximum speed, and the host bus type. 2282 * The @mdp passed into this function points to an array of 80 chars. When the 2283 * function returns, the @mdp will be filled with the model name. 2284 **/ 2285 static void 2286 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2287 { 2288 lpfc_vpd_t *vp; 2289 uint16_t dev_id = phba->pcidev->device; 2290 int max_speed; 2291 int GE = 0; 2292 int oneConnect = 0; /* default is not a oneConnect */ 2293 struct { 2294 char *name; 2295 char *bus; 2296 char *function; 2297 } m = {"<Unknown>", "", ""}; 2298 2299 if (mdp && mdp[0] != '\0' 2300 && descp && descp[0] != '\0') 2301 return; 2302 2303 if (phba->lmt & LMT_64Gb) 2304 max_speed = 64; 2305 else if (phba->lmt & LMT_32Gb) 2306 max_speed = 32; 2307 else if (phba->lmt & LMT_16Gb) 2308 max_speed = 16; 2309 else if (phba->lmt & LMT_10Gb) 2310 max_speed = 10; 2311 else if (phba->lmt & LMT_8Gb) 2312 max_speed = 8; 2313 else if (phba->lmt & LMT_4Gb) 2314 max_speed = 4; 2315 else if (phba->lmt & LMT_2Gb) 2316 max_speed = 2; 2317 else if (phba->lmt & LMT_1Gb) 2318 max_speed = 1; 2319 else 2320 max_speed = 0; 2321 2322 vp = &phba->vpd; 2323 2324 switch (dev_id) { 2325 case PCI_DEVICE_ID_FIREFLY: 2326 m = (typeof(m)){"LP6000", "PCI", 2327 "Obsolete, Unsupported Fibre Channel Adapter"}; 2328 break; 2329 case PCI_DEVICE_ID_SUPERFLY: 2330 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2331 m = (typeof(m)){"LP7000", "PCI", ""}; 2332 else 2333 m = (typeof(m)){"LP7000E", "PCI", ""}; 2334 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2335 break; 2336 case PCI_DEVICE_ID_DRAGONFLY: 2337 m = (typeof(m)){"LP8000", "PCI", 2338 "Obsolete, Unsupported Fibre Channel Adapter"}; 2339 break; 2340 case PCI_DEVICE_ID_CENTAUR: 2341 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2342 m = (typeof(m)){"LP9002", "PCI", ""}; 2343 else 2344 m = (typeof(m)){"LP9000", "PCI", ""}; 2345 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2346 break; 2347 case PCI_DEVICE_ID_RFLY: 2348 m = (typeof(m)){"LP952", "PCI", 2349 "Obsolete, Unsupported Fibre Channel Adapter"}; 2350 break; 2351 case PCI_DEVICE_ID_PEGASUS: 2352 m = (typeof(m)){"LP9802", "PCI-X", 2353 "Obsolete, Unsupported Fibre Channel Adapter"}; 2354 break; 2355 case PCI_DEVICE_ID_THOR: 2356 m = (typeof(m)){"LP10000", "PCI-X", 2357 "Obsolete, Unsupported Fibre Channel Adapter"}; 2358 break; 2359 case PCI_DEVICE_ID_VIPER: 2360 m = (typeof(m)){"LPX1000", "PCI-X", 2361 "Obsolete, Unsupported Fibre Channel Adapter"}; 2362 break; 2363 case PCI_DEVICE_ID_PFLY: 2364 m = (typeof(m)){"LP982", "PCI-X", 2365 "Obsolete, Unsupported Fibre Channel Adapter"}; 2366 break; 2367 case PCI_DEVICE_ID_TFLY: 2368 m = (typeof(m)){"LP1050", "PCI-X", 2369 "Obsolete, Unsupported Fibre Channel Adapter"}; 2370 break; 2371 case PCI_DEVICE_ID_HELIOS: 2372 m = (typeof(m)){"LP11000", "PCI-X2", 2373 "Obsolete, Unsupported Fibre Channel Adapter"}; 2374 break; 2375 case PCI_DEVICE_ID_HELIOS_SCSP: 2376 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2377 "Obsolete, Unsupported Fibre Channel Adapter"}; 2378 break; 2379 case PCI_DEVICE_ID_HELIOS_DCSP: 2380 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2381 "Obsolete, Unsupported Fibre Channel Adapter"}; 2382 break; 2383 case PCI_DEVICE_ID_NEPTUNE: 2384 m = (typeof(m)){"LPe1000", "PCIe", 2385 "Obsolete, Unsupported Fibre Channel Adapter"}; 2386 break; 2387 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2388 m = (typeof(m)){"LPe1000-SP", "PCIe", 2389 "Obsolete, Unsupported Fibre Channel Adapter"}; 2390 break; 2391 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2392 m = (typeof(m)){"LPe1002-SP", "PCIe", 2393 "Obsolete, Unsupported Fibre Channel Adapter"}; 2394 break; 2395 case PCI_DEVICE_ID_BMID: 2396 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2397 break; 2398 case PCI_DEVICE_ID_BSMB: 2399 m = (typeof(m)){"LP111", "PCI-X2", 2400 "Obsolete, Unsupported Fibre Channel Adapter"}; 2401 break; 2402 case PCI_DEVICE_ID_ZEPHYR: 2403 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2404 break; 2405 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2406 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2407 break; 2408 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2409 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2410 GE = 1; 2411 break; 2412 case PCI_DEVICE_ID_ZMID: 2413 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2414 break; 2415 case PCI_DEVICE_ID_ZSMB: 2416 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2417 break; 2418 case PCI_DEVICE_ID_LP101: 2419 m = (typeof(m)){"LP101", "PCI-X", 2420 "Obsolete, Unsupported Fibre Channel Adapter"}; 2421 break; 2422 case PCI_DEVICE_ID_LP10000S: 2423 m = (typeof(m)){"LP10000-S", "PCI", 2424 "Obsolete, Unsupported Fibre Channel Adapter"}; 2425 break; 2426 case PCI_DEVICE_ID_LP11000S: 2427 m = (typeof(m)){"LP11000-S", "PCI-X2", 2428 "Obsolete, Unsupported Fibre Channel Adapter"}; 2429 break; 2430 case PCI_DEVICE_ID_LPE11000S: 2431 m = (typeof(m)){"LPe11000-S", "PCIe", 2432 "Obsolete, Unsupported Fibre Channel Adapter"}; 2433 break; 2434 case PCI_DEVICE_ID_SAT: 2435 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2436 break; 2437 case PCI_DEVICE_ID_SAT_MID: 2438 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2439 break; 2440 case PCI_DEVICE_ID_SAT_SMB: 2441 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2442 break; 2443 case PCI_DEVICE_ID_SAT_DCSP: 2444 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2445 break; 2446 case PCI_DEVICE_ID_SAT_SCSP: 2447 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2448 break; 2449 case PCI_DEVICE_ID_SAT_S: 2450 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2451 break; 2452 case PCI_DEVICE_ID_HORNET: 2453 m = (typeof(m)){"LP21000", "PCIe", 2454 "Obsolete, Unsupported FCoE Adapter"}; 2455 GE = 1; 2456 break; 2457 case PCI_DEVICE_ID_PROTEUS_VF: 2458 m = (typeof(m)){"LPev12000", "PCIe IOV", 2459 "Obsolete, Unsupported Fibre Channel Adapter"}; 2460 break; 2461 case PCI_DEVICE_ID_PROTEUS_PF: 2462 m = (typeof(m)){"LPev12000", "PCIe IOV", 2463 "Obsolete, Unsupported Fibre Channel Adapter"}; 2464 break; 2465 case PCI_DEVICE_ID_PROTEUS_S: 2466 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2467 "Obsolete, Unsupported Fibre Channel Adapter"}; 2468 break; 2469 case PCI_DEVICE_ID_TIGERSHARK: 2470 oneConnect = 1; 2471 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2472 break; 2473 case PCI_DEVICE_ID_TOMCAT: 2474 oneConnect = 1; 2475 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2476 break; 2477 case PCI_DEVICE_ID_FALCON: 2478 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2479 "EmulexSecure Fibre"}; 2480 break; 2481 case PCI_DEVICE_ID_BALIUS: 2482 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2483 "Obsolete, Unsupported Fibre Channel Adapter"}; 2484 break; 2485 case PCI_DEVICE_ID_LANCER_FC: 2486 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2487 break; 2488 case PCI_DEVICE_ID_LANCER_FC_VF: 2489 m = (typeof(m)){"LPe16000", "PCIe", 2490 "Obsolete, Unsupported Fibre Channel Adapter"}; 2491 break; 2492 case PCI_DEVICE_ID_LANCER_FCOE: 2493 oneConnect = 1; 2494 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2495 break; 2496 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2497 oneConnect = 1; 2498 m = (typeof(m)){"OCe15100", "PCIe", 2499 "Obsolete, Unsupported FCoE"}; 2500 break; 2501 case PCI_DEVICE_ID_LANCER_G6_FC: 2502 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2503 break; 2504 case PCI_DEVICE_ID_LANCER_G7_FC: 2505 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2506 break; 2507 case PCI_DEVICE_ID_SKYHAWK: 2508 case PCI_DEVICE_ID_SKYHAWK_VF: 2509 oneConnect = 1; 2510 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2511 break; 2512 default: 2513 m = (typeof(m)){"Unknown", "", ""}; 2514 break; 2515 } 2516 2517 if (mdp && mdp[0] == '\0') 2518 snprintf(mdp, 79,"%s", m.name); 2519 /* 2520 * oneConnect hba requires special processing, they are all initiators 2521 * and we put the port number on the end 2522 */ 2523 if (descp && descp[0] == '\0') { 2524 if (oneConnect) 2525 snprintf(descp, 255, 2526 "Emulex OneConnect %s, %s Initiator %s", 2527 m.name, m.function, 2528 phba->Port); 2529 else if (max_speed == 0) 2530 snprintf(descp, 255, 2531 "Emulex %s %s %s", 2532 m.name, m.bus, m.function); 2533 else 2534 snprintf(descp, 255, 2535 "Emulex %s %d%s %s %s", 2536 m.name, max_speed, (GE) ? "GE" : "Gb", 2537 m.bus, m.function); 2538 } 2539 } 2540 2541 /** 2542 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2543 * @phba: pointer to lpfc hba data structure. 2544 * @pring: pointer to a IOCB ring. 2545 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2546 * 2547 * This routine posts a given number of IOCBs with the associated DMA buffer 2548 * descriptors specified by the cnt argument to the given IOCB ring. 2549 * 2550 * Return codes 2551 * The number of IOCBs NOT able to be posted to the IOCB ring. 2552 **/ 2553 int 2554 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2555 { 2556 IOCB_t *icmd; 2557 struct lpfc_iocbq *iocb; 2558 struct lpfc_dmabuf *mp1, *mp2; 2559 2560 cnt += pring->missbufcnt; 2561 2562 /* While there are buffers to post */ 2563 while (cnt > 0) { 2564 /* Allocate buffer for command iocb */ 2565 iocb = lpfc_sli_get_iocbq(phba); 2566 if (iocb == NULL) { 2567 pring->missbufcnt = cnt; 2568 return cnt; 2569 } 2570 icmd = &iocb->iocb; 2571 2572 /* 2 buffers can be posted per command */ 2573 /* Allocate buffer to post */ 2574 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2575 if (mp1) 2576 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2577 if (!mp1 || !mp1->virt) { 2578 kfree(mp1); 2579 lpfc_sli_release_iocbq(phba, iocb); 2580 pring->missbufcnt = cnt; 2581 return cnt; 2582 } 2583 2584 INIT_LIST_HEAD(&mp1->list); 2585 /* Allocate buffer to post */ 2586 if (cnt > 1) { 2587 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2588 if (mp2) 2589 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2590 &mp2->phys); 2591 if (!mp2 || !mp2->virt) { 2592 kfree(mp2); 2593 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2594 kfree(mp1); 2595 lpfc_sli_release_iocbq(phba, iocb); 2596 pring->missbufcnt = cnt; 2597 return cnt; 2598 } 2599 2600 INIT_LIST_HEAD(&mp2->list); 2601 } else { 2602 mp2 = NULL; 2603 } 2604 2605 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2606 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2607 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2608 icmd->ulpBdeCount = 1; 2609 cnt--; 2610 if (mp2) { 2611 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2612 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2613 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2614 cnt--; 2615 icmd->ulpBdeCount = 2; 2616 } 2617 2618 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2619 icmd->ulpLe = 1; 2620 2621 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2622 IOCB_ERROR) { 2623 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2624 kfree(mp1); 2625 cnt++; 2626 if (mp2) { 2627 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2628 kfree(mp2); 2629 cnt++; 2630 } 2631 lpfc_sli_release_iocbq(phba, iocb); 2632 pring->missbufcnt = cnt; 2633 return cnt; 2634 } 2635 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2636 if (mp2) 2637 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2638 } 2639 pring->missbufcnt = 0; 2640 return 0; 2641 } 2642 2643 /** 2644 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2645 * @phba: pointer to lpfc hba data structure. 2646 * 2647 * This routine posts initial receive IOCB buffers to the ELS ring. The 2648 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2649 * set to 64 IOCBs. SLI3 only. 2650 * 2651 * Return codes 2652 * 0 - success (currently always success) 2653 **/ 2654 static int 2655 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2656 { 2657 struct lpfc_sli *psli = &phba->sli; 2658 2659 /* Ring 0, ELS / CT buffers */ 2660 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2661 /* Ring 2 - FCP no buffers needed */ 2662 2663 return 0; 2664 } 2665 2666 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2667 2668 /** 2669 * lpfc_sha_init - Set up initial array of hash table entries 2670 * @HashResultPointer: pointer to an array as hash table. 2671 * 2672 * This routine sets up the initial values to the array of hash table entries 2673 * for the LC HBAs. 2674 **/ 2675 static void 2676 lpfc_sha_init(uint32_t * HashResultPointer) 2677 { 2678 HashResultPointer[0] = 0x67452301; 2679 HashResultPointer[1] = 0xEFCDAB89; 2680 HashResultPointer[2] = 0x98BADCFE; 2681 HashResultPointer[3] = 0x10325476; 2682 HashResultPointer[4] = 0xC3D2E1F0; 2683 } 2684 2685 /** 2686 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2687 * @HashResultPointer: pointer to an initial/result hash table. 2688 * @HashWorkingPointer: pointer to an working hash table. 2689 * 2690 * This routine iterates an initial hash table pointed by @HashResultPointer 2691 * with the values from the working hash table pointeed by @HashWorkingPointer. 2692 * The results are putting back to the initial hash table, returned through 2693 * the @HashResultPointer as the result hash table. 2694 **/ 2695 static void 2696 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2697 { 2698 int t; 2699 uint32_t TEMP; 2700 uint32_t A, B, C, D, E; 2701 t = 16; 2702 do { 2703 HashWorkingPointer[t] = 2704 S(1, 2705 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2706 8] ^ 2707 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2708 } while (++t <= 79); 2709 t = 0; 2710 A = HashResultPointer[0]; 2711 B = HashResultPointer[1]; 2712 C = HashResultPointer[2]; 2713 D = HashResultPointer[3]; 2714 E = HashResultPointer[4]; 2715 2716 do { 2717 if (t < 20) { 2718 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2719 } else if (t < 40) { 2720 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2721 } else if (t < 60) { 2722 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2723 } else { 2724 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2725 } 2726 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2727 E = D; 2728 D = C; 2729 C = S(30, B); 2730 B = A; 2731 A = TEMP; 2732 } while (++t <= 79); 2733 2734 HashResultPointer[0] += A; 2735 HashResultPointer[1] += B; 2736 HashResultPointer[2] += C; 2737 HashResultPointer[3] += D; 2738 HashResultPointer[4] += E; 2739 2740 } 2741 2742 /** 2743 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2744 * @RandomChallenge: pointer to the entry of host challenge random number array. 2745 * @HashWorking: pointer to the entry of the working hash array. 2746 * 2747 * This routine calculates the working hash array referred by @HashWorking 2748 * from the challenge random numbers associated with the host, referred by 2749 * @RandomChallenge. The result is put into the entry of the working hash 2750 * array and returned by reference through @HashWorking. 2751 **/ 2752 static void 2753 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2754 { 2755 *HashWorking = (*RandomChallenge ^ *HashWorking); 2756 } 2757 2758 /** 2759 * lpfc_hba_init - Perform special handling for LC HBA initialization 2760 * @phba: pointer to lpfc hba data structure. 2761 * @hbainit: pointer to an array of unsigned 32-bit integers. 2762 * 2763 * This routine performs the special handling for LC HBA initialization. 2764 **/ 2765 void 2766 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2767 { 2768 int t; 2769 uint32_t *HashWorking; 2770 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2771 2772 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2773 if (!HashWorking) 2774 return; 2775 2776 HashWorking[0] = HashWorking[78] = *pwwnn++; 2777 HashWorking[1] = HashWorking[79] = *pwwnn; 2778 2779 for (t = 0; t < 7; t++) 2780 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2781 2782 lpfc_sha_init(hbainit); 2783 lpfc_sha_iterate(hbainit, HashWorking); 2784 kfree(HashWorking); 2785 } 2786 2787 /** 2788 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2789 * @vport: pointer to a virtual N_Port data structure. 2790 * 2791 * This routine performs the necessary cleanups before deleting the @vport. 2792 * It invokes the discovery state machine to perform necessary state 2793 * transitions and to release the ndlps associated with the @vport. Note, 2794 * the physical port is treated as @vport 0. 2795 **/ 2796 void 2797 lpfc_cleanup(struct lpfc_vport *vport) 2798 { 2799 struct lpfc_hba *phba = vport->phba; 2800 struct lpfc_nodelist *ndlp, *next_ndlp; 2801 int i = 0; 2802 2803 if (phba->link_state > LPFC_LINK_DOWN) 2804 lpfc_port_link_failure(vport); 2805 2806 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2807 if (!NLP_CHK_NODE_ACT(ndlp)) { 2808 ndlp = lpfc_enable_node(vport, ndlp, 2809 NLP_STE_UNUSED_NODE); 2810 if (!ndlp) 2811 continue; 2812 spin_lock_irq(&phba->ndlp_lock); 2813 NLP_SET_FREE_REQ(ndlp); 2814 spin_unlock_irq(&phba->ndlp_lock); 2815 /* Trigger the release of the ndlp memory */ 2816 lpfc_nlp_put(ndlp); 2817 continue; 2818 } 2819 spin_lock_irq(&phba->ndlp_lock); 2820 if (NLP_CHK_FREE_REQ(ndlp)) { 2821 /* The ndlp should not be in memory free mode already */ 2822 spin_unlock_irq(&phba->ndlp_lock); 2823 continue; 2824 } else 2825 /* Indicate request for freeing ndlp memory */ 2826 NLP_SET_FREE_REQ(ndlp); 2827 spin_unlock_irq(&phba->ndlp_lock); 2828 2829 if (vport->port_type != LPFC_PHYSICAL_PORT && 2830 ndlp->nlp_DID == Fabric_DID) { 2831 /* Just free up ndlp with Fabric_DID for vports */ 2832 lpfc_nlp_put(ndlp); 2833 continue; 2834 } 2835 2836 /* take care of nodes in unused state before the state 2837 * machine taking action. 2838 */ 2839 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2840 lpfc_nlp_put(ndlp); 2841 continue; 2842 } 2843 2844 if (ndlp->nlp_type & NLP_FABRIC) 2845 lpfc_disc_state_machine(vport, ndlp, NULL, 2846 NLP_EVT_DEVICE_RECOVERY); 2847 2848 lpfc_disc_state_machine(vport, ndlp, NULL, 2849 NLP_EVT_DEVICE_RM); 2850 } 2851 2852 /* At this point, ALL ndlp's should be gone 2853 * because of the previous NLP_EVT_DEVICE_RM. 2854 * Lets wait for this to happen, if needed. 2855 */ 2856 while (!list_empty(&vport->fc_nodes)) { 2857 if (i++ > 3000) { 2858 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2859 "0233 Nodelist not empty\n"); 2860 list_for_each_entry_safe(ndlp, next_ndlp, 2861 &vport->fc_nodes, nlp_listp) { 2862 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2863 LOG_NODE, 2864 "0282 did:x%x ndlp:x%p " 2865 "usgmap:x%x refcnt:%d\n", 2866 ndlp->nlp_DID, (void *)ndlp, 2867 ndlp->nlp_usg_map, 2868 kref_read(&ndlp->kref)); 2869 } 2870 break; 2871 } 2872 2873 /* Wait for any activity on ndlps to settle */ 2874 msleep(10); 2875 } 2876 lpfc_cleanup_vports_rrqs(vport, NULL); 2877 } 2878 2879 /** 2880 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2881 * @vport: pointer to a virtual N_Port data structure. 2882 * 2883 * This routine stops all the timers associated with a @vport. This function 2884 * is invoked before disabling or deleting a @vport. Note that the physical 2885 * port is treated as @vport 0. 2886 **/ 2887 void 2888 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2889 { 2890 del_timer_sync(&vport->els_tmofunc); 2891 del_timer_sync(&vport->delayed_disc_tmo); 2892 lpfc_can_disctmo(vport); 2893 return; 2894 } 2895 2896 /** 2897 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2898 * @phba: pointer to lpfc hba data structure. 2899 * 2900 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2901 * caller of this routine should already hold the host lock. 2902 **/ 2903 void 2904 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2905 { 2906 /* Clear pending FCF rediscovery wait flag */ 2907 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2908 2909 /* Now, try to stop the timer */ 2910 del_timer(&phba->fcf.redisc_wait); 2911 } 2912 2913 /** 2914 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2915 * @phba: pointer to lpfc hba data structure. 2916 * 2917 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2918 * checks whether the FCF rediscovery wait timer is pending with the host 2919 * lock held before proceeding with disabling the timer and clearing the 2920 * wait timer pendig flag. 2921 **/ 2922 void 2923 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2924 { 2925 spin_lock_irq(&phba->hbalock); 2926 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2927 /* FCF rediscovery timer already fired or stopped */ 2928 spin_unlock_irq(&phba->hbalock); 2929 return; 2930 } 2931 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2932 /* Clear failover in progress flags */ 2933 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2934 spin_unlock_irq(&phba->hbalock); 2935 } 2936 2937 /** 2938 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2939 * @phba: pointer to lpfc hba data structure. 2940 * 2941 * This routine stops all the timers associated with a HBA. This function is 2942 * invoked before either putting a HBA offline or unloading the driver. 2943 **/ 2944 void 2945 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2946 { 2947 if (phba->pport) 2948 lpfc_stop_vport_timers(phba->pport); 2949 cancel_delayed_work_sync(&phba->eq_delay_work); 2950 del_timer_sync(&phba->sli.mbox_tmo); 2951 del_timer_sync(&phba->fabric_block_timer); 2952 del_timer_sync(&phba->eratt_poll); 2953 del_timer_sync(&phba->hb_tmofunc); 2954 if (phba->sli_rev == LPFC_SLI_REV4) { 2955 del_timer_sync(&phba->rrq_tmr); 2956 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2957 } 2958 phba->hb_outstanding = 0; 2959 2960 switch (phba->pci_dev_grp) { 2961 case LPFC_PCI_DEV_LP: 2962 /* Stop any LightPulse device specific driver timers */ 2963 del_timer_sync(&phba->fcp_poll_timer); 2964 break; 2965 case LPFC_PCI_DEV_OC: 2966 /* Stop any OneConnect device sepcific driver timers */ 2967 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2968 break; 2969 default: 2970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2971 "0297 Invalid device group (x%x)\n", 2972 phba->pci_dev_grp); 2973 break; 2974 } 2975 return; 2976 } 2977 2978 /** 2979 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2980 * @phba: pointer to lpfc hba data structure. 2981 * 2982 * This routine marks a HBA's management interface as blocked. Once the HBA's 2983 * management interface is marked as blocked, all the user space access to 2984 * the HBA, whether they are from sysfs interface or libdfc interface will 2985 * all be blocked. The HBA is set to block the management interface when the 2986 * driver prepares the HBA interface for online or offline. 2987 **/ 2988 static void 2989 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2990 { 2991 unsigned long iflag; 2992 uint8_t actcmd = MBX_HEARTBEAT; 2993 unsigned long timeout; 2994 2995 spin_lock_irqsave(&phba->hbalock, iflag); 2996 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2997 spin_unlock_irqrestore(&phba->hbalock, iflag); 2998 if (mbx_action == LPFC_MBX_NO_WAIT) 2999 return; 3000 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3001 spin_lock_irqsave(&phba->hbalock, iflag); 3002 if (phba->sli.mbox_active) { 3003 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3004 /* Determine how long we might wait for the active mailbox 3005 * command to be gracefully completed by firmware. 3006 */ 3007 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3008 phba->sli.mbox_active) * 1000) + jiffies; 3009 } 3010 spin_unlock_irqrestore(&phba->hbalock, iflag); 3011 3012 /* Wait for the outstnading mailbox command to complete */ 3013 while (phba->sli.mbox_active) { 3014 /* Check active mailbox complete status every 2ms */ 3015 msleep(2); 3016 if (time_after(jiffies, timeout)) { 3017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3018 "2813 Mgmt IO is Blocked %x " 3019 "- mbox cmd %x still active\n", 3020 phba->sli.sli_flag, actcmd); 3021 break; 3022 } 3023 } 3024 } 3025 3026 /** 3027 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3028 * @phba: pointer to lpfc hba data structure. 3029 * 3030 * Allocate RPIs for all active remote nodes. This is needed whenever 3031 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3032 * is to fixup the temporary rpi assignments. 3033 **/ 3034 void 3035 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3036 { 3037 struct lpfc_nodelist *ndlp, *next_ndlp; 3038 struct lpfc_vport **vports; 3039 int i, rpi; 3040 unsigned long flags; 3041 3042 if (phba->sli_rev != LPFC_SLI_REV4) 3043 return; 3044 3045 vports = lpfc_create_vport_work_array(phba); 3046 if (vports == NULL) 3047 return; 3048 3049 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3050 if (vports[i]->load_flag & FC_UNLOADING) 3051 continue; 3052 3053 list_for_each_entry_safe(ndlp, next_ndlp, 3054 &vports[i]->fc_nodes, 3055 nlp_listp) { 3056 if (!NLP_CHK_NODE_ACT(ndlp)) 3057 continue; 3058 rpi = lpfc_sli4_alloc_rpi(phba); 3059 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3060 spin_lock_irqsave(&phba->ndlp_lock, flags); 3061 NLP_CLR_NODE_ACT(ndlp); 3062 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3063 continue; 3064 } 3065 ndlp->nlp_rpi = rpi; 3066 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3067 "0009 rpi:%x DID:%x " 3068 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 3069 ndlp->nlp_DID, ndlp->nlp_flag, 3070 ndlp->nlp_usg_map, ndlp); 3071 } 3072 } 3073 lpfc_destroy_vport_work_array(phba, vports); 3074 } 3075 3076 /** 3077 * lpfc_create_expedite_pool - create expedite pool 3078 * @phba: pointer to lpfc hba data structure. 3079 * 3080 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3081 * to expedite pool. Mark them as expedite. 3082 **/ 3083 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3084 { 3085 struct lpfc_sli4_hdw_queue *qp; 3086 struct lpfc_io_buf *lpfc_ncmd; 3087 struct lpfc_io_buf *lpfc_ncmd_next; 3088 struct lpfc_epd_pool *epd_pool; 3089 unsigned long iflag; 3090 3091 epd_pool = &phba->epd_pool; 3092 qp = &phba->sli4_hba.hdwq[0]; 3093 3094 spin_lock_init(&epd_pool->lock); 3095 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3096 spin_lock(&epd_pool->lock); 3097 INIT_LIST_HEAD(&epd_pool->list); 3098 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3099 &qp->lpfc_io_buf_list_put, list) { 3100 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3101 lpfc_ncmd->expedite = true; 3102 qp->put_io_bufs--; 3103 epd_pool->count++; 3104 if (epd_pool->count >= XRI_BATCH) 3105 break; 3106 } 3107 spin_unlock(&epd_pool->lock); 3108 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3109 } 3110 3111 /** 3112 * lpfc_destroy_expedite_pool - destroy expedite pool 3113 * @phba: pointer to lpfc hba data structure. 3114 * 3115 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3116 * of HWQ 0. Clear the mark. 3117 **/ 3118 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3119 { 3120 struct lpfc_sli4_hdw_queue *qp; 3121 struct lpfc_io_buf *lpfc_ncmd; 3122 struct lpfc_io_buf *lpfc_ncmd_next; 3123 struct lpfc_epd_pool *epd_pool; 3124 unsigned long iflag; 3125 3126 epd_pool = &phba->epd_pool; 3127 qp = &phba->sli4_hba.hdwq[0]; 3128 3129 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3130 spin_lock(&epd_pool->lock); 3131 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3132 &epd_pool->list, list) { 3133 list_move_tail(&lpfc_ncmd->list, 3134 &qp->lpfc_io_buf_list_put); 3135 lpfc_ncmd->flags = false; 3136 qp->put_io_bufs++; 3137 epd_pool->count--; 3138 } 3139 spin_unlock(&epd_pool->lock); 3140 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3141 } 3142 3143 /** 3144 * lpfc_create_multixri_pools - create multi-XRI pools 3145 * @phba: pointer to lpfc hba data structure. 3146 * 3147 * This routine initialize public, private per HWQ. Then, move XRIs from 3148 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3149 * Initialized. 3150 **/ 3151 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3152 { 3153 u32 i, j; 3154 u32 hwq_count; 3155 u32 count_per_hwq; 3156 struct lpfc_io_buf *lpfc_ncmd; 3157 struct lpfc_io_buf *lpfc_ncmd_next; 3158 unsigned long iflag; 3159 struct lpfc_sli4_hdw_queue *qp; 3160 struct lpfc_multixri_pool *multixri_pool; 3161 struct lpfc_pbl_pool *pbl_pool; 3162 struct lpfc_pvt_pool *pvt_pool; 3163 3164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3165 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3166 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3167 phba->sli4_hba.io_xri_cnt); 3168 3169 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3170 lpfc_create_expedite_pool(phba); 3171 3172 hwq_count = phba->cfg_hdw_queue; 3173 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3174 3175 for (i = 0; i < hwq_count; i++) { 3176 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3177 3178 if (!multixri_pool) { 3179 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3180 "1238 Failed to allocate memory for " 3181 "multixri_pool\n"); 3182 3183 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3184 lpfc_destroy_expedite_pool(phba); 3185 3186 j = 0; 3187 while (j < i) { 3188 qp = &phba->sli4_hba.hdwq[j]; 3189 kfree(qp->p_multixri_pool); 3190 j++; 3191 } 3192 phba->cfg_xri_rebalancing = 0; 3193 return; 3194 } 3195 3196 qp = &phba->sli4_hba.hdwq[i]; 3197 qp->p_multixri_pool = multixri_pool; 3198 3199 multixri_pool->xri_limit = count_per_hwq; 3200 multixri_pool->rrb_next_hwqid = i; 3201 3202 /* Deal with public free xri pool */ 3203 pbl_pool = &multixri_pool->pbl_pool; 3204 spin_lock_init(&pbl_pool->lock); 3205 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3206 spin_lock(&pbl_pool->lock); 3207 INIT_LIST_HEAD(&pbl_pool->list); 3208 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3209 &qp->lpfc_io_buf_list_put, list) { 3210 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3211 qp->put_io_bufs--; 3212 pbl_pool->count++; 3213 } 3214 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3215 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3216 pbl_pool->count, i); 3217 spin_unlock(&pbl_pool->lock); 3218 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3219 3220 /* Deal with private free xri pool */ 3221 pvt_pool = &multixri_pool->pvt_pool; 3222 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3223 pvt_pool->low_watermark = XRI_BATCH; 3224 spin_lock_init(&pvt_pool->lock); 3225 spin_lock_irqsave(&pvt_pool->lock, iflag); 3226 INIT_LIST_HEAD(&pvt_pool->list); 3227 pvt_pool->count = 0; 3228 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3229 } 3230 } 3231 3232 /** 3233 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3234 * @phba: pointer to lpfc hba data structure. 3235 * 3236 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3237 **/ 3238 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3239 { 3240 u32 i; 3241 u32 hwq_count; 3242 struct lpfc_io_buf *lpfc_ncmd; 3243 struct lpfc_io_buf *lpfc_ncmd_next; 3244 unsigned long iflag; 3245 struct lpfc_sli4_hdw_queue *qp; 3246 struct lpfc_multixri_pool *multixri_pool; 3247 struct lpfc_pbl_pool *pbl_pool; 3248 struct lpfc_pvt_pool *pvt_pool; 3249 3250 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3251 lpfc_destroy_expedite_pool(phba); 3252 3253 if (!(phba->pport->load_flag & FC_UNLOADING)) { 3254 lpfc_sli_flush_fcp_rings(phba); 3255 3256 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3257 lpfc_sli_flush_nvme_rings(phba); 3258 } 3259 3260 hwq_count = phba->cfg_hdw_queue; 3261 3262 for (i = 0; i < hwq_count; i++) { 3263 qp = &phba->sli4_hba.hdwq[i]; 3264 multixri_pool = qp->p_multixri_pool; 3265 if (!multixri_pool) 3266 continue; 3267 3268 qp->p_multixri_pool = NULL; 3269 3270 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3271 3272 /* Deal with public free xri pool */ 3273 pbl_pool = &multixri_pool->pbl_pool; 3274 spin_lock(&pbl_pool->lock); 3275 3276 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3277 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3278 pbl_pool->count, i); 3279 3280 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3281 &pbl_pool->list, list) { 3282 list_move_tail(&lpfc_ncmd->list, 3283 &qp->lpfc_io_buf_list_put); 3284 qp->put_io_bufs++; 3285 pbl_pool->count--; 3286 } 3287 3288 INIT_LIST_HEAD(&pbl_pool->list); 3289 pbl_pool->count = 0; 3290 3291 spin_unlock(&pbl_pool->lock); 3292 3293 /* Deal with private free xri pool */ 3294 pvt_pool = &multixri_pool->pvt_pool; 3295 spin_lock(&pvt_pool->lock); 3296 3297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3298 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3299 pvt_pool->count, i); 3300 3301 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3302 &pvt_pool->list, list) { 3303 list_move_tail(&lpfc_ncmd->list, 3304 &qp->lpfc_io_buf_list_put); 3305 qp->put_io_bufs++; 3306 pvt_pool->count--; 3307 } 3308 3309 INIT_LIST_HEAD(&pvt_pool->list); 3310 pvt_pool->count = 0; 3311 3312 spin_unlock(&pvt_pool->lock); 3313 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3314 3315 kfree(multixri_pool); 3316 } 3317 } 3318 3319 /** 3320 * lpfc_online - Initialize and bring a HBA online 3321 * @phba: pointer to lpfc hba data structure. 3322 * 3323 * This routine initializes the HBA and brings a HBA online. During this 3324 * process, the management interface is blocked to prevent user space access 3325 * to the HBA interfering with the driver initialization. 3326 * 3327 * Return codes 3328 * 0 - successful 3329 * 1 - failed 3330 **/ 3331 int 3332 lpfc_online(struct lpfc_hba *phba) 3333 { 3334 struct lpfc_vport *vport; 3335 struct lpfc_vport **vports; 3336 int i, error = 0; 3337 bool vpis_cleared = false; 3338 3339 if (!phba) 3340 return 0; 3341 vport = phba->pport; 3342 3343 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3344 return 0; 3345 3346 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3347 "0458 Bring Adapter online\n"); 3348 3349 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3350 3351 if (phba->sli_rev == LPFC_SLI_REV4) { 3352 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3353 lpfc_unblock_mgmt_io(phba); 3354 return 1; 3355 } 3356 spin_lock_irq(&phba->hbalock); 3357 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3358 vpis_cleared = true; 3359 spin_unlock_irq(&phba->hbalock); 3360 3361 /* Reestablish the local initiator port. 3362 * The offline process destroyed the previous lport. 3363 */ 3364 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3365 !phba->nvmet_support) { 3366 error = lpfc_nvme_create_localport(phba->pport); 3367 if (error) 3368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3369 "6132 NVME restore reg failed " 3370 "on nvmei error x%x\n", error); 3371 } 3372 } else { 3373 lpfc_sli_queue_init(phba); 3374 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3375 lpfc_unblock_mgmt_io(phba); 3376 return 1; 3377 } 3378 } 3379 3380 vports = lpfc_create_vport_work_array(phba); 3381 if (vports != NULL) { 3382 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3383 struct Scsi_Host *shost; 3384 shost = lpfc_shost_from_vport(vports[i]); 3385 spin_lock_irq(shost->host_lock); 3386 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3387 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3388 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3389 if (phba->sli_rev == LPFC_SLI_REV4) { 3390 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3391 if ((vpis_cleared) && 3392 (vports[i]->port_type != 3393 LPFC_PHYSICAL_PORT)) 3394 vports[i]->vpi = 0; 3395 } 3396 spin_unlock_irq(shost->host_lock); 3397 } 3398 } 3399 lpfc_destroy_vport_work_array(phba, vports); 3400 3401 if (phba->cfg_xri_rebalancing) 3402 lpfc_create_multixri_pools(phba); 3403 3404 lpfc_unblock_mgmt_io(phba); 3405 return 0; 3406 } 3407 3408 /** 3409 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3410 * @phba: pointer to lpfc hba data structure. 3411 * 3412 * This routine marks a HBA's management interface as not blocked. Once the 3413 * HBA's management interface is marked as not blocked, all the user space 3414 * access to the HBA, whether they are from sysfs interface or libdfc 3415 * interface will be allowed. The HBA is set to block the management interface 3416 * when the driver prepares the HBA interface for online or offline and then 3417 * set to unblock the management interface afterwards. 3418 **/ 3419 void 3420 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3421 { 3422 unsigned long iflag; 3423 3424 spin_lock_irqsave(&phba->hbalock, iflag); 3425 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3426 spin_unlock_irqrestore(&phba->hbalock, iflag); 3427 } 3428 3429 /** 3430 * lpfc_offline_prep - Prepare a HBA to be brought offline 3431 * @phba: pointer to lpfc hba data structure. 3432 * 3433 * This routine is invoked to prepare a HBA to be brought offline. It performs 3434 * unregistration login to all the nodes on all vports and flushes the mailbox 3435 * queue to make it ready to be brought offline. 3436 **/ 3437 void 3438 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3439 { 3440 struct lpfc_vport *vport = phba->pport; 3441 struct lpfc_nodelist *ndlp, *next_ndlp; 3442 struct lpfc_vport **vports; 3443 struct Scsi_Host *shost; 3444 int i; 3445 3446 if (vport->fc_flag & FC_OFFLINE_MODE) 3447 return; 3448 3449 lpfc_block_mgmt_io(phba, mbx_action); 3450 3451 lpfc_linkdown(phba); 3452 3453 /* Issue an unreg_login to all nodes on all vports */ 3454 vports = lpfc_create_vport_work_array(phba); 3455 if (vports != NULL) { 3456 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3457 if (vports[i]->load_flag & FC_UNLOADING) 3458 continue; 3459 shost = lpfc_shost_from_vport(vports[i]); 3460 spin_lock_irq(shost->host_lock); 3461 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3462 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3463 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3464 spin_unlock_irq(shost->host_lock); 3465 3466 shost = lpfc_shost_from_vport(vports[i]); 3467 list_for_each_entry_safe(ndlp, next_ndlp, 3468 &vports[i]->fc_nodes, 3469 nlp_listp) { 3470 if (!NLP_CHK_NODE_ACT(ndlp)) 3471 continue; 3472 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3473 continue; 3474 if (ndlp->nlp_type & NLP_FABRIC) { 3475 lpfc_disc_state_machine(vports[i], ndlp, 3476 NULL, NLP_EVT_DEVICE_RECOVERY); 3477 lpfc_disc_state_machine(vports[i], ndlp, 3478 NULL, NLP_EVT_DEVICE_RM); 3479 } 3480 spin_lock_irq(shost->host_lock); 3481 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3482 spin_unlock_irq(shost->host_lock); 3483 /* 3484 * Whenever an SLI4 port goes offline, free the 3485 * RPI. Get a new RPI when the adapter port 3486 * comes back online. 3487 */ 3488 if (phba->sli_rev == LPFC_SLI_REV4) { 3489 lpfc_printf_vlog(ndlp->vport, 3490 KERN_INFO, LOG_NODE, 3491 "0011 lpfc_offline: " 3492 "ndlp:x%p did %x " 3493 "usgmap:x%x rpi:%x\n", 3494 ndlp, ndlp->nlp_DID, 3495 ndlp->nlp_usg_map, 3496 ndlp->nlp_rpi); 3497 3498 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3499 } 3500 lpfc_unreg_rpi(vports[i], ndlp); 3501 } 3502 } 3503 } 3504 lpfc_destroy_vport_work_array(phba, vports); 3505 3506 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3507 3508 if (phba->wq) 3509 flush_workqueue(phba->wq); 3510 } 3511 3512 /** 3513 * lpfc_offline - Bring a HBA offline 3514 * @phba: pointer to lpfc hba data structure. 3515 * 3516 * This routine actually brings a HBA offline. It stops all the timers 3517 * associated with the HBA, brings down the SLI layer, and eventually 3518 * marks the HBA as in offline state for the upper layer protocol. 3519 **/ 3520 void 3521 lpfc_offline(struct lpfc_hba *phba) 3522 { 3523 struct Scsi_Host *shost; 3524 struct lpfc_vport **vports; 3525 int i; 3526 3527 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3528 return; 3529 3530 /* stop port and all timers associated with this hba */ 3531 lpfc_stop_port(phba); 3532 3533 /* Tear down the local and target port registrations. The 3534 * nvme transports need to cleanup. 3535 */ 3536 lpfc_nvmet_destroy_targetport(phba); 3537 lpfc_nvme_destroy_localport(phba->pport); 3538 3539 vports = lpfc_create_vport_work_array(phba); 3540 if (vports != NULL) 3541 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3542 lpfc_stop_vport_timers(vports[i]); 3543 lpfc_destroy_vport_work_array(phba, vports); 3544 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3545 "0460 Bring Adapter offline\n"); 3546 /* Bring down the SLI Layer and cleanup. The HBA is offline 3547 now. */ 3548 lpfc_sli_hba_down(phba); 3549 spin_lock_irq(&phba->hbalock); 3550 phba->work_ha = 0; 3551 spin_unlock_irq(&phba->hbalock); 3552 vports = lpfc_create_vport_work_array(phba); 3553 if (vports != NULL) 3554 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3555 shost = lpfc_shost_from_vport(vports[i]); 3556 spin_lock_irq(shost->host_lock); 3557 vports[i]->work_port_events = 0; 3558 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3559 spin_unlock_irq(shost->host_lock); 3560 } 3561 lpfc_destroy_vport_work_array(phba, vports); 3562 3563 if (phba->cfg_xri_rebalancing) 3564 lpfc_destroy_multixri_pools(phba); 3565 } 3566 3567 /** 3568 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3569 * @phba: pointer to lpfc hba data structure. 3570 * 3571 * This routine is to free all the SCSI buffers and IOCBs from the driver 3572 * list back to kernel. It is called from lpfc_pci_remove_one to free 3573 * the internal resources before the device is removed from the system. 3574 **/ 3575 static void 3576 lpfc_scsi_free(struct lpfc_hba *phba) 3577 { 3578 struct lpfc_io_buf *sb, *sb_next; 3579 3580 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3581 return; 3582 3583 spin_lock_irq(&phba->hbalock); 3584 3585 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3586 3587 spin_lock(&phba->scsi_buf_list_put_lock); 3588 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3589 list) { 3590 list_del(&sb->list); 3591 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3592 sb->dma_handle); 3593 kfree(sb); 3594 phba->total_scsi_bufs--; 3595 } 3596 spin_unlock(&phba->scsi_buf_list_put_lock); 3597 3598 spin_lock(&phba->scsi_buf_list_get_lock); 3599 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3600 list) { 3601 list_del(&sb->list); 3602 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3603 sb->dma_handle); 3604 kfree(sb); 3605 phba->total_scsi_bufs--; 3606 } 3607 spin_unlock(&phba->scsi_buf_list_get_lock); 3608 spin_unlock_irq(&phba->hbalock); 3609 } 3610 3611 /** 3612 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3613 * @phba: pointer to lpfc hba data structure. 3614 * 3615 * This routine is to free all the IO buffers and IOCBs from the driver 3616 * list back to kernel. It is called from lpfc_pci_remove_one to free 3617 * the internal resources before the device is removed from the system. 3618 **/ 3619 void 3620 lpfc_io_free(struct lpfc_hba *phba) 3621 { 3622 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3623 struct lpfc_sli4_hdw_queue *qp; 3624 int idx; 3625 3626 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3627 qp = &phba->sli4_hba.hdwq[idx]; 3628 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3629 spin_lock(&qp->io_buf_list_put_lock); 3630 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3631 &qp->lpfc_io_buf_list_put, 3632 list) { 3633 list_del(&lpfc_ncmd->list); 3634 qp->put_io_bufs--; 3635 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3636 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3637 kfree(lpfc_ncmd); 3638 qp->total_io_bufs--; 3639 } 3640 spin_unlock(&qp->io_buf_list_put_lock); 3641 3642 spin_lock(&qp->io_buf_list_get_lock); 3643 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3644 &qp->lpfc_io_buf_list_get, 3645 list) { 3646 list_del(&lpfc_ncmd->list); 3647 qp->get_io_bufs--; 3648 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3649 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3650 kfree(lpfc_ncmd); 3651 qp->total_io_bufs--; 3652 } 3653 spin_unlock(&qp->io_buf_list_get_lock); 3654 } 3655 } 3656 3657 /** 3658 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3659 * @phba: pointer to lpfc hba data structure. 3660 * 3661 * This routine first calculates the sizes of the current els and allocated 3662 * scsi sgl lists, and then goes through all sgls to updates the physical 3663 * XRIs assigned due to port function reset. During port initialization, the 3664 * current els and allocated scsi sgl lists are 0s. 3665 * 3666 * Return codes 3667 * 0 - successful (for now, it always returns 0) 3668 **/ 3669 int 3670 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3671 { 3672 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3673 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3674 LIST_HEAD(els_sgl_list); 3675 int rc; 3676 3677 /* 3678 * update on pci function's els xri-sgl list 3679 */ 3680 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3681 3682 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3683 /* els xri-sgl expanded */ 3684 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3685 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3686 "3157 ELS xri-sgl count increased from " 3687 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3688 els_xri_cnt); 3689 /* allocate the additional els sgls */ 3690 for (i = 0; i < xri_cnt; i++) { 3691 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3692 GFP_KERNEL); 3693 if (sglq_entry == NULL) { 3694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3695 "2562 Failure to allocate an " 3696 "ELS sgl entry:%d\n", i); 3697 rc = -ENOMEM; 3698 goto out_free_mem; 3699 } 3700 sglq_entry->buff_type = GEN_BUFF_TYPE; 3701 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3702 &sglq_entry->phys); 3703 if (sglq_entry->virt == NULL) { 3704 kfree(sglq_entry); 3705 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3706 "2563 Failure to allocate an " 3707 "ELS mbuf:%d\n", i); 3708 rc = -ENOMEM; 3709 goto out_free_mem; 3710 } 3711 sglq_entry->sgl = sglq_entry->virt; 3712 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3713 sglq_entry->state = SGL_FREED; 3714 list_add_tail(&sglq_entry->list, &els_sgl_list); 3715 } 3716 spin_lock_irq(&phba->hbalock); 3717 spin_lock(&phba->sli4_hba.sgl_list_lock); 3718 list_splice_init(&els_sgl_list, 3719 &phba->sli4_hba.lpfc_els_sgl_list); 3720 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3721 spin_unlock_irq(&phba->hbalock); 3722 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3723 /* els xri-sgl shrinked */ 3724 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3725 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3726 "3158 ELS xri-sgl count decreased from " 3727 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3728 els_xri_cnt); 3729 spin_lock_irq(&phba->hbalock); 3730 spin_lock(&phba->sli4_hba.sgl_list_lock); 3731 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3732 &els_sgl_list); 3733 /* release extra els sgls from list */ 3734 for (i = 0; i < xri_cnt; i++) { 3735 list_remove_head(&els_sgl_list, 3736 sglq_entry, struct lpfc_sglq, list); 3737 if (sglq_entry) { 3738 __lpfc_mbuf_free(phba, sglq_entry->virt, 3739 sglq_entry->phys); 3740 kfree(sglq_entry); 3741 } 3742 } 3743 list_splice_init(&els_sgl_list, 3744 &phba->sli4_hba.lpfc_els_sgl_list); 3745 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3746 spin_unlock_irq(&phba->hbalock); 3747 } else 3748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3749 "3163 ELS xri-sgl count unchanged: %d\n", 3750 els_xri_cnt); 3751 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3752 3753 /* update xris to els sgls on the list */ 3754 sglq_entry = NULL; 3755 sglq_entry_next = NULL; 3756 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3757 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3758 lxri = lpfc_sli4_next_xritag(phba); 3759 if (lxri == NO_XRI) { 3760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3761 "2400 Failed to allocate xri for " 3762 "ELS sgl\n"); 3763 rc = -ENOMEM; 3764 goto out_free_mem; 3765 } 3766 sglq_entry->sli4_lxritag = lxri; 3767 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3768 } 3769 return 0; 3770 3771 out_free_mem: 3772 lpfc_free_els_sgl_list(phba); 3773 return rc; 3774 } 3775 3776 /** 3777 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3778 * @phba: pointer to lpfc hba data structure. 3779 * 3780 * This routine first calculates the sizes of the current els and allocated 3781 * scsi sgl lists, and then goes through all sgls to updates the physical 3782 * XRIs assigned due to port function reset. During port initialization, the 3783 * current els and allocated scsi sgl lists are 0s. 3784 * 3785 * Return codes 3786 * 0 - successful (for now, it always returns 0) 3787 **/ 3788 int 3789 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3790 { 3791 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3792 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3793 uint16_t nvmet_xri_cnt; 3794 LIST_HEAD(nvmet_sgl_list); 3795 int rc; 3796 3797 /* 3798 * update on pci function's nvmet xri-sgl list 3799 */ 3800 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3801 3802 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3803 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3804 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3805 /* els xri-sgl expanded */ 3806 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3807 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3808 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3809 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3810 /* allocate the additional nvmet sgls */ 3811 for (i = 0; i < xri_cnt; i++) { 3812 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3813 GFP_KERNEL); 3814 if (sglq_entry == NULL) { 3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3816 "6303 Failure to allocate an " 3817 "NVMET sgl entry:%d\n", i); 3818 rc = -ENOMEM; 3819 goto out_free_mem; 3820 } 3821 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3822 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3823 &sglq_entry->phys); 3824 if (sglq_entry->virt == NULL) { 3825 kfree(sglq_entry); 3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3827 "6304 Failure to allocate an " 3828 "NVMET buf:%d\n", i); 3829 rc = -ENOMEM; 3830 goto out_free_mem; 3831 } 3832 sglq_entry->sgl = sglq_entry->virt; 3833 memset(sglq_entry->sgl, 0, 3834 phba->cfg_sg_dma_buf_size); 3835 sglq_entry->state = SGL_FREED; 3836 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3837 } 3838 spin_lock_irq(&phba->hbalock); 3839 spin_lock(&phba->sli4_hba.sgl_list_lock); 3840 list_splice_init(&nvmet_sgl_list, 3841 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3842 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3843 spin_unlock_irq(&phba->hbalock); 3844 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3845 /* nvmet xri-sgl shrunk */ 3846 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3847 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3848 "6305 NVMET xri-sgl count decreased from " 3849 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3850 nvmet_xri_cnt); 3851 spin_lock_irq(&phba->hbalock); 3852 spin_lock(&phba->sli4_hba.sgl_list_lock); 3853 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3854 &nvmet_sgl_list); 3855 /* release extra nvmet sgls from list */ 3856 for (i = 0; i < xri_cnt; i++) { 3857 list_remove_head(&nvmet_sgl_list, 3858 sglq_entry, struct lpfc_sglq, list); 3859 if (sglq_entry) { 3860 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3861 sglq_entry->phys); 3862 kfree(sglq_entry); 3863 } 3864 } 3865 list_splice_init(&nvmet_sgl_list, 3866 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3867 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3868 spin_unlock_irq(&phba->hbalock); 3869 } else 3870 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3871 "6306 NVMET xri-sgl count unchanged: %d\n", 3872 nvmet_xri_cnt); 3873 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3874 3875 /* update xris to nvmet sgls on the list */ 3876 sglq_entry = NULL; 3877 sglq_entry_next = NULL; 3878 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3879 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3880 lxri = lpfc_sli4_next_xritag(phba); 3881 if (lxri == NO_XRI) { 3882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3883 "6307 Failed to allocate xri for " 3884 "NVMET sgl\n"); 3885 rc = -ENOMEM; 3886 goto out_free_mem; 3887 } 3888 sglq_entry->sli4_lxritag = lxri; 3889 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3890 } 3891 return 0; 3892 3893 out_free_mem: 3894 lpfc_free_nvmet_sgl_list(phba); 3895 return rc; 3896 } 3897 3898 int 3899 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3900 { 3901 LIST_HEAD(blist); 3902 struct lpfc_sli4_hdw_queue *qp; 3903 struct lpfc_io_buf *lpfc_cmd; 3904 struct lpfc_io_buf *iobufp, *prev_iobufp; 3905 int idx, cnt, xri, inserted; 3906 3907 cnt = 0; 3908 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3909 qp = &phba->sli4_hba.hdwq[idx]; 3910 spin_lock_irq(&qp->io_buf_list_get_lock); 3911 spin_lock(&qp->io_buf_list_put_lock); 3912 3913 /* Take everything off the get and put lists */ 3914 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3915 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3916 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3917 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3918 cnt += qp->get_io_bufs + qp->put_io_bufs; 3919 qp->get_io_bufs = 0; 3920 qp->put_io_bufs = 0; 3921 qp->total_io_bufs = 0; 3922 spin_unlock(&qp->io_buf_list_put_lock); 3923 spin_unlock_irq(&qp->io_buf_list_get_lock); 3924 } 3925 3926 /* 3927 * Take IO buffers off blist and put on cbuf sorted by XRI. 3928 * This is because POST_SGL takes a sequential range of XRIs 3929 * to post to the firmware. 3930 */ 3931 for (idx = 0; idx < cnt; idx++) { 3932 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3933 if (!lpfc_cmd) 3934 return cnt; 3935 if (idx == 0) { 3936 list_add_tail(&lpfc_cmd->list, cbuf); 3937 continue; 3938 } 3939 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3940 inserted = 0; 3941 prev_iobufp = NULL; 3942 list_for_each_entry(iobufp, cbuf, list) { 3943 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3944 if (prev_iobufp) 3945 list_add(&lpfc_cmd->list, 3946 &prev_iobufp->list); 3947 else 3948 list_add(&lpfc_cmd->list, cbuf); 3949 inserted = 1; 3950 break; 3951 } 3952 prev_iobufp = iobufp; 3953 } 3954 if (!inserted) 3955 list_add_tail(&lpfc_cmd->list, cbuf); 3956 } 3957 return cnt; 3958 } 3959 3960 int 3961 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3962 { 3963 struct lpfc_sli4_hdw_queue *qp; 3964 struct lpfc_io_buf *lpfc_cmd; 3965 int idx, cnt; 3966 3967 qp = phba->sli4_hba.hdwq; 3968 cnt = 0; 3969 while (!list_empty(cbuf)) { 3970 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3971 list_remove_head(cbuf, lpfc_cmd, 3972 struct lpfc_io_buf, list); 3973 if (!lpfc_cmd) 3974 return cnt; 3975 cnt++; 3976 qp = &phba->sli4_hba.hdwq[idx]; 3977 lpfc_cmd->hdwq_no = idx; 3978 lpfc_cmd->hdwq = qp; 3979 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3980 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3981 spin_lock(&qp->io_buf_list_put_lock); 3982 list_add_tail(&lpfc_cmd->list, 3983 &qp->lpfc_io_buf_list_put); 3984 qp->put_io_bufs++; 3985 qp->total_io_bufs++; 3986 spin_unlock(&qp->io_buf_list_put_lock); 3987 } 3988 } 3989 return cnt; 3990 } 3991 3992 /** 3993 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3994 * @phba: pointer to lpfc hba data structure. 3995 * 3996 * This routine first calculates the sizes of the current els and allocated 3997 * scsi sgl lists, and then goes through all sgls to updates the physical 3998 * XRIs assigned due to port function reset. During port initialization, the 3999 * current els and allocated scsi sgl lists are 0s. 4000 * 4001 * Return codes 4002 * 0 - successful (for now, it always returns 0) 4003 **/ 4004 int 4005 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4006 { 4007 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4008 uint16_t i, lxri, els_xri_cnt; 4009 uint16_t io_xri_cnt, io_xri_max; 4010 LIST_HEAD(io_sgl_list); 4011 int rc, cnt; 4012 4013 /* 4014 * update on pci function's allocated nvme xri-sgl list 4015 */ 4016 4017 /* maximum number of xris available for nvme buffers */ 4018 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4019 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4020 phba->sli4_hba.io_xri_max = io_xri_max; 4021 4022 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4023 "6074 Current allocated XRI sgl count:%d, " 4024 "maximum XRI count:%d\n", 4025 phba->sli4_hba.io_xri_cnt, 4026 phba->sli4_hba.io_xri_max); 4027 4028 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4029 4030 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4031 /* max nvme xri shrunk below the allocated nvme buffers */ 4032 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4033 phba->sli4_hba.io_xri_max; 4034 /* release the extra allocated nvme buffers */ 4035 for (i = 0; i < io_xri_cnt; i++) { 4036 list_remove_head(&io_sgl_list, lpfc_ncmd, 4037 struct lpfc_io_buf, list); 4038 if (lpfc_ncmd) { 4039 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4040 lpfc_ncmd->data, 4041 lpfc_ncmd->dma_handle); 4042 kfree(lpfc_ncmd); 4043 } 4044 } 4045 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4046 } 4047 4048 /* update xris associated to remaining allocated nvme buffers */ 4049 lpfc_ncmd = NULL; 4050 lpfc_ncmd_next = NULL; 4051 phba->sli4_hba.io_xri_cnt = cnt; 4052 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4053 &io_sgl_list, list) { 4054 lxri = lpfc_sli4_next_xritag(phba); 4055 if (lxri == NO_XRI) { 4056 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4057 "6075 Failed to allocate xri for " 4058 "nvme buffer\n"); 4059 rc = -ENOMEM; 4060 goto out_free_mem; 4061 } 4062 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4063 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4064 } 4065 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4066 return 0; 4067 4068 out_free_mem: 4069 lpfc_io_free(phba); 4070 return rc; 4071 } 4072 4073 /** 4074 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4075 * @vport: The virtual port for which this call being executed. 4076 * @num_to_allocate: The requested number of buffers to allocate. 4077 * 4078 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4079 * the nvme buffer contains all the necessary information needed to initiate 4080 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4081 * them on a list, it post them to the port by using SGL block post. 4082 * 4083 * Return codes: 4084 * int - number of IO buffers that were allocated and posted. 4085 * 0 = failure, less than num_to_alloc is a partial failure. 4086 **/ 4087 int 4088 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4089 { 4090 struct lpfc_io_buf *lpfc_ncmd; 4091 struct lpfc_iocbq *pwqeq; 4092 uint16_t iotag, lxri = 0; 4093 int bcnt, num_posted; 4094 LIST_HEAD(prep_nblist); 4095 LIST_HEAD(post_nblist); 4096 LIST_HEAD(nvme_nblist); 4097 4098 /* Sanity check to ensure our sizing is right for both SCSI and NVME */ 4099 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { 4100 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4101 "6426 Common buffer size %zd exceeds %d\n", 4102 sizeof(struct lpfc_io_buf), 4103 LPFC_COMMON_IO_BUF_SZ); 4104 return 0; 4105 } 4106 4107 phba->sli4_hba.io_xri_cnt = 0; 4108 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4109 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); 4110 if (!lpfc_ncmd) 4111 break; 4112 /* 4113 * Get memory from the pci pool to map the virt space to 4114 * pci bus space for an I/O. The DMA buffer includes the 4115 * number of SGE's necessary to support the sg_tablesize. 4116 */ 4117 lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 4118 GFP_KERNEL, 4119 &lpfc_ncmd->dma_handle); 4120 if (!lpfc_ncmd->data) { 4121 kfree(lpfc_ncmd); 4122 break; 4123 } 4124 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); 4125 4126 /* 4127 * 4K Page alignment is CRITICAL to BlockGuard, double check 4128 * to be sure. 4129 */ 4130 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4131 (((unsigned long)(lpfc_ncmd->data) & 4132 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4133 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4134 "3369 Memory alignment err: addr=%lx\n", 4135 (unsigned long)lpfc_ncmd->data); 4136 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4137 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4138 kfree(lpfc_ncmd); 4139 break; 4140 } 4141 4142 lxri = lpfc_sli4_next_xritag(phba); 4143 if (lxri == NO_XRI) { 4144 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4145 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4146 kfree(lpfc_ncmd); 4147 break; 4148 } 4149 pwqeq = &lpfc_ncmd->cur_iocbq; 4150 4151 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4152 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4153 if (iotag == 0) { 4154 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4155 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4156 kfree(lpfc_ncmd); 4157 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4158 "6121 Failed to allocate IOTAG for" 4159 " XRI:0x%x\n", lxri); 4160 lpfc_sli4_free_xri(phba, lxri); 4161 break; 4162 } 4163 pwqeq->sli4_lxritag = lxri; 4164 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4165 pwqeq->context1 = lpfc_ncmd; 4166 4167 /* Initialize local short-hand pointers. */ 4168 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4169 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4170 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4171 spin_lock_init(&lpfc_ncmd->buf_lock); 4172 4173 /* add the nvme buffer to a post list */ 4174 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4175 phba->sli4_hba.io_xri_cnt++; 4176 } 4177 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4178 "6114 Allocate %d out of %d requested new NVME " 4179 "buffers\n", bcnt, num_to_alloc); 4180 4181 /* post the list of nvme buffer sgls to port if available */ 4182 if (!list_empty(&post_nblist)) 4183 num_posted = lpfc_sli4_post_io_sgl_list( 4184 phba, &post_nblist, bcnt); 4185 else 4186 num_posted = 0; 4187 4188 return num_posted; 4189 } 4190 4191 static uint64_t 4192 lpfc_get_wwpn(struct lpfc_hba *phba) 4193 { 4194 uint64_t wwn; 4195 int rc; 4196 LPFC_MBOXQ_t *mboxq; 4197 MAILBOX_t *mb; 4198 4199 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4200 GFP_KERNEL); 4201 if (!mboxq) 4202 return (uint64_t)-1; 4203 4204 /* First get WWN of HBA instance */ 4205 lpfc_read_nv(phba, mboxq); 4206 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4207 if (rc != MBX_SUCCESS) { 4208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4209 "6019 Mailbox failed , mbxCmd x%x " 4210 "READ_NV, mbxStatus x%x\n", 4211 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4212 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4213 mempool_free(mboxq, phba->mbox_mem_pool); 4214 return (uint64_t) -1; 4215 } 4216 mb = &mboxq->u.mb; 4217 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4218 /* wwn is WWPN of HBA instance */ 4219 mempool_free(mboxq, phba->mbox_mem_pool); 4220 if (phba->sli_rev == LPFC_SLI_REV4) 4221 return be64_to_cpu(wwn); 4222 else 4223 return rol64(wwn, 32); 4224 } 4225 4226 /** 4227 * lpfc_create_port - Create an FC port 4228 * @phba: pointer to lpfc hba data structure. 4229 * @instance: a unique integer ID to this FC port. 4230 * @dev: pointer to the device data structure. 4231 * 4232 * This routine creates a FC port for the upper layer protocol. The FC port 4233 * can be created on top of either a physical port or a virtual port provided 4234 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4235 * and associates the FC port created before adding the shost into the SCSI 4236 * layer. 4237 * 4238 * Return codes 4239 * @vport - pointer to the virtual N_Port data structure. 4240 * NULL - port create failed. 4241 **/ 4242 struct lpfc_vport * 4243 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4244 { 4245 struct lpfc_vport *vport; 4246 struct Scsi_Host *shost = NULL; 4247 int error = 0; 4248 int i; 4249 uint64_t wwn; 4250 bool use_no_reset_hba = false; 4251 int rc; 4252 4253 if (lpfc_no_hba_reset_cnt) { 4254 if (phba->sli_rev < LPFC_SLI_REV4 && 4255 dev == &phba->pcidev->dev) { 4256 /* Reset the port first */ 4257 lpfc_sli_brdrestart(phba); 4258 rc = lpfc_sli_chipset_init(phba); 4259 if (rc) 4260 return NULL; 4261 } 4262 wwn = lpfc_get_wwpn(phba); 4263 } 4264 4265 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4266 if (wwn == lpfc_no_hba_reset[i]) { 4267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4268 "6020 Setting use_no_reset port=%llx\n", 4269 wwn); 4270 use_no_reset_hba = true; 4271 break; 4272 } 4273 } 4274 4275 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4276 if (dev != &phba->pcidev->dev) { 4277 shost = scsi_host_alloc(&lpfc_vport_template, 4278 sizeof(struct lpfc_vport)); 4279 } else { 4280 if (!use_no_reset_hba) 4281 shost = scsi_host_alloc(&lpfc_template, 4282 sizeof(struct lpfc_vport)); 4283 else 4284 shost = scsi_host_alloc(&lpfc_template_no_hr, 4285 sizeof(struct lpfc_vport)); 4286 } 4287 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4288 shost = scsi_host_alloc(&lpfc_template_nvme, 4289 sizeof(struct lpfc_vport)); 4290 } 4291 if (!shost) 4292 goto out; 4293 4294 vport = (struct lpfc_vport *) shost->hostdata; 4295 vport->phba = phba; 4296 vport->load_flag |= FC_LOADING; 4297 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4298 vport->fc_rscn_flush = 0; 4299 lpfc_get_vport_cfgparam(vport); 4300 4301 /* Adjust value in vport */ 4302 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4303 4304 shost->unique_id = instance; 4305 shost->max_id = LPFC_MAX_TARGET; 4306 shost->max_lun = vport->cfg_max_luns; 4307 shost->this_id = -1; 4308 shost->max_cmd_len = 16; 4309 4310 if (phba->sli_rev == LPFC_SLI_REV4) { 4311 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4312 shost->nr_hw_queues = phba->cfg_hdw_queue; 4313 else 4314 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4315 4316 shost->dma_boundary = 4317 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4318 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4319 } else 4320 /* SLI-3 has a limited number of hardware queues (3), 4321 * thus there is only one for FCP processing. 4322 */ 4323 shost->nr_hw_queues = 1; 4324 4325 /* 4326 * Set initial can_queue value since 0 is no longer supported and 4327 * scsi_add_host will fail. This will be adjusted later based on the 4328 * max xri value determined in hba setup. 4329 */ 4330 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4331 if (dev != &phba->pcidev->dev) { 4332 shost->transportt = lpfc_vport_transport_template; 4333 vport->port_type = LPFC_NPIV_PORT; 4334 } else { 4335 shost->transportt = lpfc_transport_template; 4336 vport->port_type = LPFC_PHYSICAL_PORT; 4337 } 4338 4339 /* Initialize all internally managed lists. */ 4340 INIT_LIST_HEAD(&vport->fc_nodes); 4341 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4342 spin_lock_init(&vport->work_port_lock); 4343 4344 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4345 4346 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4347 4348 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4349 4350 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4351 if (error) 4352 goto out_put_shost; 4353 4354 spin_lock_irq(&phba->port_list_lock); 4355 list_add_tail(&vport->listentry, &phba->port_list); 4356 spin_unlock_irq(&phba->port_list_lock); 4357 return vport; 4358 4359 out_put_shost: 4360 scsi_host_put(shost); 4361 out: 4362 return NULL; 4363 } 4364 4365 /** 4366 * destroy_port - destroy an FC port 4367 * @vport: pointer to an lpfc virtual N_Port data structure. 4368 * 4369 * This routine destroys a FC port from the upper layer protocol. All the 4370 * resources associated with the port are released. 4371 **/ 4372 void 4373 destroy_port(struct lpfc_vport *vport) 4374 { 4375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4376 struct lpfc_hba *phba = vport->phba; 4377 4378 lpfc_debugfs_terminate(vport); 4379 fc_remove_host(shost); 4380 scsi_remove_host(shost); 4381 4382 spin_lock_irq(&phba->port_list_lock); 4383 list_del_init(&vport->listentry); 4384 spin_unlock_irq(&phba->port_list_lock); 4385 4386 lpfc_cleanup(vport); 4387 return; 4388 } 4389 4390 /** 4391 * lpfc_get_instance - Get a unique integer ID 4392 * 4393 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4394 * uses the kernel idr facility to perform the task. 4395 * 4396 * Return codes: 4397 * instance - a unique integer ID allocated as the new instance. 4398 * -1 - lpfc get instance failed. 4399 **/ 4400 int 4401 lpfc_get_instance(void) 4402 { 4403 int ret; 4404 4405 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4406 return ret < 0 ? -1 : ret; 4407 } 4408 4409 /** 4410 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4411 * @shost: pointer to SCSI host data structure. 4412 * @time: elapsed time of the scan in jiffies. 4413 * 4414 * This routine is called by the SCSI layer with a SCSI host to determine 4415 * whether the scan host is finished. 4416 * 4417 * Note: there is no scan_start function as adapter initialization will have 4418 * asynchronously kicked off the link initialization. 4419 * 4420 * Return codes 4421 * 0 - SCSI host scan is not over yet. 4422 * 1 - SCSI host scan is over. 4423 **/ 4424 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4425 { 4426 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4427 struct lpfc_hba *phba = vport->phba; 4428 int stat = 0; 4429 4430 spin_lock_irq(shost->host_lock); 4431 4432 if (vport->load_flag & FC_UNLOADING) { 4433 stat = 1; 4434 goto finished; 4435 } 4436 if (time >= msecs_to_jiffies(30 * 1000)) { 4437 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4438 "0461 Scanning longer than 30 " 4439 "seconds. Continuing initialization\n"); 4440 stat = 1; 4441 goto finished; 4442 } 4443 if (time >= msecs_to_jiffies(15 * 1000) && 4444 phba->link_state <= LPFC_LINK_DOWN) { 4445 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4446 "0465 Link down longer than 15 " 4447 "seconds. Continuing initialization\n"); 4448 stat = 1; 4449 goto finished; 4450 } 4451 4452 if (vport->port_state != LPFC_VPORT_READY) 4453 goto finished; 4454 if (vport->num_disc_nodes || vport->fc_prli_sent) 4455 goto finished; 4456 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4457 goto finished; 4458 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4459 goto finished; 4460 4461 stat = 1; 4462 4463 finished: 4464 spin_unlock_irq(shost->host_lock); 4465 return stat; 4466 } 4467 4468 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4469 { 4470 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4471 struct lpfc_hba *phba = vport->phba; 4472 4473 fc_host_supported_speeds(shost) = 0; 4474 if (phba->lmt & LMT_128Gb) 4475 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4476 if (phba->lmt & LMT_64Gb) 4477 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4478 if (phba->lmt & LMT_32Gb) 4479 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4480 if (phba->lmt & LMT_16Gb) 4481 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4482 if (phba->lmt & LMT_10Gb) 4483 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4484 if (phba->lmt & LMT_8Gb) 4485 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4486 if (phba->lmt & LMT_4Gb) 4487 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4488 if (phba->lmt & LMT_2Gb) 4489 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4490 if (phba->lmt & LMT_1Gb) 4491 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4492 } 4493 4494 /** 4495 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4496 * @shost: pointer to SCSI host data structure. 4497 * 4498 * This routine initializes a given SCSI host attributes on a FC port. The 4499 * SCSI host can be either on top of a physical port or a virtual port. 4500 **/ 4501 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4502 { 4503 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4504 struct lpfc_hba *phba = vport->phba; 4505 /* 4506 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4507 */ 4508 4509 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4510 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4511 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4512 4513 memset(fc_host_supported_fc4s(shost), 0, 4514 sizeof(fc_host_supported_fc4s(shost))); 4515 fc_host_supported_fc4s(shost)[2] = 1; 4516 fc_host_supported_fc4s(shost)[7] = 1; 4517 4518 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4519 sizeof fc_host_symbolic_name(shost)); 4520 4521 lpfc_host_supported_speeds_set(shost); 4522 4523 fc_host_maxframe_size(shost) = 4524 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4525 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4526 4527 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4528 4529 /* This value is also unchanging */ 4530 memset(fc_host_active_fc4s(shost), 0, 4531 sizeof(fc_host_active_fc4s(shost))); 4532 fc_host_active_fc4s(shost)[2] = 1; 4533 fc_host_active_fc4s(shost)[7] = 1; 4534 4535 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4536 spin_lock_irq(shost->host_lock); 4537 vport->load_flag &= ~FC_LOADING; 4538 spin_unlock_irq(shost->host_lock); 4539 } 4540 4541 /** 4542 * lpfc_stop_port_s3 - Stop SLI3 device port 4543 * @phba: pointer to lpfc hba data structure. 4544 * 4545 * This routine is invoked to stop an SLI3 device port, it stops the device 4546 * from generating interrupts and stops the device driver's timers for the 4547 * device. 4548 **/ 4549 static void 4550 lpfc_stop_port_s3(struct lpfc_hba *phba) 4551 { 4552 /* Clear all interrupt enable conditions */ 4553 writel(0, phba->HCregaddr); 4554 readl(phba->HCregaddr); /* flush */ 4555 /* Clear all pending interrupts */ 4556 writel(0xffffffff, phba->HAregaddr); 4557 readl(phba->HAregaddr); /* flush */ 4558 4559 /* Reset some HBA SLI setup states */ 4560 lpfc_stop_hba_timers(phba); 4561 phba->pport->work_port_events = 0; 4562 } 4563 4564 /** 4565 * lpfc_stop_port_s4 - Stop SLI4 device port 4566 * @phba: pointer to lpfc hba data structure. 4567 * 4568 * This routine is invoked to stop an SLI4 device port, it stops the device 4569 * from generating interrupts and stops the device driver's timers for the 4570 * device. 4571 **/ 4572 static void 4573 lpfc_stop_port_s4(struct lpfc_hba *phba) 4574 { 4575 /* Reset some HBA SLI4 setup states */ 4576 lpfc_stop_hba_timers(phba); 4577 if (phba->pport) 4578 phba->pport->work_port_events = 0; 4579 phba->sli4_hba.intr_enable = 0; 4580 } 4581 4582 /** 4583 * lpfc_stop_port - Wrapper function for stopping hba port 4584 * @phba: Pointer to HBA context object. 4585 * 4586 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4587 * the API jump table function pointer from the lpfc_hba struct. 4588 **/ 4589 void 4590 lpfc_stop_port(struct lpfc_hba *phba) 4591 { 4592 phba->lpfc_stop_port(phba); 4593 4594 if (phba->wq) 4595 flush_workqueue(phba->wq); 4596 } 4597 4598 /** 4599 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4600 * @phba: Pointer to hba for which this call is being executed. 4601 * 4602 * This routine starts the timer waiting for the FCF rediscovery to complete. 4603 **/ 4604 void 4605 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4606 { 4607 unsigned long fcf_redisc_wait_tmo = 4608 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4609 /* Start fcf rediscovery wait period timer */ 4610 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4611 spin_lock_irq(&phba->hbalock); 4612 /* Allow action to new fcf asynchronous event */ 4613 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4614 /* Mark the FCF rediscovery pending state */ 4615 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4616 spin_unlock_irq(&phba->hbalock); 4617 } 4618 4619 /** 4620 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4621 * @ptr: Map to lpfc_hba data structure pointer. 4622 * 4623 * This routine is invoked when waiting for FCF table rediscover has been 4624 * timed out. If new FCF record(s) has (have) been discovered during the 4625 * wait period, a new FCF event shall be added to the FCOE async event 4626 * list, and then worker thread shall be waked up for processing from the 4627 * worker thread context. 4628 **/ 4629 static void 4630 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4631 { 4632 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4633 4634 /* Don't send FCF rediscovery event if timer cancelled */ 4635 spin_lock_irq(&phba->hbalock); 4636 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4637 spin_unlock_irq(&phba->hbalock); 4638 return; 4639 } 4640 /* Clear FCF rediscovery timer pending flag */ 4641 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4642 /* FCF rediscovery event to worker thread */ 4643 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4644 spin_unlock_irq(&phba->hbalock); 4645 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4646 "2776 FCF rediscover quiescent timer expired\n"); 4647 /* wake up worker thread */ 4648 lpfc_worker_wake_up(phba); 4649 } 4650 4651 /** 4652 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4653 * @phba: pointer to lpfc hba data structure. 4654 * @acqe_link: pointer to the async link completion queue entry. 4655 * 4656 * This routine is to parse the SLI4 link-attention link fault code. 4657 **/ 4658 static void 4659 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4660 struct lpfc_acqe_link *acqe_link) 4661 { 4662 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4663 case LPFC_ASYNC_LINK_FAULT_NONE: 4664 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4665 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4666 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4667 break; 4668 default: 4669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4670 "0398 Unknown link fault code: x%x\n", 4671 bf_get(lpfc_acqe_link_fault, acqe_link)); 4672 break; 4673 } 4674 } 4675 4676 /** 4677 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4678 * @phba: pointer to lpfc hba data structure. 4679 * @acqe_link: pointer to the async link completion queue entry. 4680 * 4681 * This routine is to parse the SLI4 link attention type and translate it 4682 * into the base driver's link attention type coding. 4683 * 4684 * Return: Link attention type in terms of base driver's coding. 4685 **/ 4686 static uint8_t 4687 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4688 struct lpfc_acqe_link *acqe_link) 4689 { 4690 uint8_t att_type; 4691 4692 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4693 case LPFC_ASYNC_LINK_STATUS_DOWN: 4694 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4695 att_type = LPFC_ATT_LINK_DOWN; 4696 break; 4697 case LPFC_ASYNC_LINK_STATUS_UP: 4698 /* Ignore physical link up events - wait for logical link up */ 4699 att_type = LPFC_ATT_RESERVED; 4700 break; 4701 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4702 att_type = LPFC_ATT_LINK_UP; 4703 break; 4704 default: 4705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4706 "0399 Invalid link attention type: x%x\n", 4707 bf_get(lpfc_acqe_link_status, acqe_link)); 4708 att_type = LPFC_ATT_RESERVED; 4709 break; 4710 } 4711 return att_type; 4712 } 4713 4714 /** 4715 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4716 * @phba: pointer to lpfc hba data structure. 4717 * 4718 * This routine is to get an SLI3 FC port's link speed in Mbps. 4719 * 4720 * Return: link speed in terms of Mbps. 4721 **/ 4722 uint32_t 4723 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4724 { 4725 uint32_t link_speed; 4726 4727 if (!lpfc_is_link_up(phba)) 4728 return 0; 4729 4730 if (phba->sli_rev <= LPFC_SLI_REV3) { 4731 switch (phba->fc_linkspeed) { 4732 case LPFC_LINK_SPEED_1GHZ: 4733 link_speed = 1000; 4734 break; 4735 case LPFC_LINK_SPEED_2GHZ: 4736 link_speed = 2000; 4737 break; 4738 case LPFC_LINK_SPEED_4GHZ: 4739 link_speed = 4000; 4740 break; 4741 case LPFC_LINK_SPEED_8GHZ: 4742 link_speed = 8000; 4743 break; 4744 case LPFC_LINK_SPEED_10GHZ: 4745 link_speed = 10000; 4746 break; 4747 case LPFC_LINK_SPEED_16GHZ: 4748 link_speed = 16000; 4749 break; 4750 default: 4751 link_speed = 0; 4752 } 4753 } else { 4754 if (phba->sli4_hba.link_state.logical_speed) 4755 link_speed = 4756 phba->sli4_hba.link_state.logical_speed; 4757 else 4758 link_speed = phba->sli4_hba.link_state.speed; 4759 } 4760 return link_speed; 4761 } 4762 4763 /** 4764 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4765 * @phba: pointer to lpfc hba data structure. 4766 * @evt_code: asynchronous event code. 4767 * @speed_code: asynchronous event link speed code. 4768 * 4769 * This routine is to parse the giving SLI4 async event link speed code into 4770 * value of Mbps for the link speed. 4771 * 4772 * Return: link speed in terms of Mbps. 4773 **/ 4774 static uint32_t 4775 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4776 uint8_t speed_code) 4777 { 4778 uint32_t port_speed; 4779 4780 switch (evt_code) { 4781 case LPFC_TRAILER_CODE_LINK: 4782 switch (speed_code) { 4783 case LPFC_ASYNC_LINK_SPEED_ZERO: 4784 port_speed = 0; 4785 break; 4786 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4787 port_speed = 10; 4788 break; 4789 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4790 port_speed = 100; 4791 break; 4792 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4793 port_speed = 1000; 4794 break; 4795 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4796 port_speed = 10000; 4797 break; 4798 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4799 port_speed = 20000; 4800 break; 4801 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4802 port_speed = 25000; 4803 break; 4804 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4805 port_speed = 40000; 4806 break; 4807 default: 4808 port_speed = 0; 4809 } 4810 break; 4811 case LPFC_TRAILER_CODE_FC: 4812 switch (speed_code) { 4813 case LPFC_FC_LA_SPEED_UNKNOWN: 4814 port_speed = 0; 4815 break; 4816 case LPFC_FC_LA_SPEED_1G: 4817 port_speed = 1000; 4818 break; 4819 case LPFC_FC_LA_SPEED_2G: 4820 port_speed = 2000; 4821 break; 4822 case LPFC_FC_LA_SPEED_4G: 4823 port_speed = 4000; 4824 break; 4825 case LPFC_FC_LA_SPEED_8G: 4826 port_speed = 8000; 4827 break; 4828 case LPFC_FC_LA_SPEED_10G: 4829 port_speed = 10000; 4830 break; 4831 case LPFC_FC_LA_SPEED_16G: 4832 port_speed = 16000; 4833 break; 4834 case LPFC_FC_LA_SPEED_32G: 4835 port_speed = 32000; 4836 break; 4837 case LPFC_FC_LA_SPEED_64G: 4838 port_speed = 64000; 4839 break; 4840 case LPFC_FC_LA_SPEED_128G: 4841 port_speed = 128000; 4842 break; 4843 default: 4844 port_speed = 0; 4845 } 4846 break; 4847 default: 4848 port_speed = 0; 4849 } 4850 return port_speed; 4851 } 4852 4853 /** 4854 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4855 * @phba: pointer to lpfc hba data structure. 4856 * @acqe_link: pointer to the async link completion queue entry. 4857 * 4858 * This routine is to handle the SLI4 asynchronous FCoE link event. 4859 **/ 4860 static void 4861 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4862 struct lpfc_acqe_link *acqe_link) 4863 { 4864 struct lpfc_dmabuf *mp; 4865 LPFC_MBOXQ_t *pmb; 4866 MAILBOX_t *mb; 4867 struct lpfc_mbx_read_top *la; 4868 uint8_t att_type; 4869 int rc; 4870 4871 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4872 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4873 return; 4874 phba->fcoe_eventtag = acqe_link->event_tag; 4875 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4876 if (!pmb) { 4877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4878 "0395 The mboxq allocation failed\n"); 4879 return; 4880 } 4881 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4882 if (!mp) { 4883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4884 "0396 The lpfc_dmabuf allocation failed\n"); 4885 goto out_free_pmb; 4886 } 4887 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4888 if (!mp->virt) { 4889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4890 "0397 The mbuf allocation failed\n"); 4891 goto out_free_dmabuf; 4892 } 4893 4894 /* Cleanup any outstanding ELS commands */ 4895 lpfc_els_flush_all_cmd(phba); 4896 4897 /* Block ELS IOCBs until we have done process link event */ 4898 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4899 4900 /* Update link event statistics */ 4901 phba->sli.slistat.link_event++; 4902 4903 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4904 lpfc_read_topology(phba, pmb, mp); 4905 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4906 pmb->vport = phba->pport; 4907 4908 /* Keep the link status for extra SLI4 state machine reference */ 4909 phba->sli4_hba.link_state.speed = 4910 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4911 bf_get(lpfc_acqe_link_speed, acqe_link)); 4912 phba->sli4_hba.link_state.duplex = 4913 bf_get(lpfc_acqe_link_duplex, acqe_link); 4914 phba->sli4_hba.link_state.status = 4915 bf_get(lpfc_acqe_link_status, acqe_link); 4916 phba->sli4_hba.link_state.type = 4917 bf_get(lpfc_acqe_link_type, acqe_link); 4918 phba->sli4_hba.link_state.number = 4919 bf_get(lpfc_acqe_link_number, acqe_link); 4920 phba->sli4_hba.link_state.fault = 4921 bf_get(lpfc_acqe_link_fault, acqe_link); 4922 phba->sli4_hba.link_state.logical_speed = 4923 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4924 4925 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4926 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4927 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4928 "Logical speed:%dMbps Fault:%d\n", 4929 phba->sli4_hba.link_state.speed, 4930 phba->sli4_hba.link_state.topology, 4931 phba->sli4_hba.link_state.status, 4932 phba->sli4_hba.link_state.type, 4933 phba->sli4_hba.link_state.number, 4934 phba->sli4_hba.link_state.logical_speed, 4935 phba->sli4_hba.link_state.fault); 4936 /* 4937 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4938 * topology info. Note: Optional for non FC-AL ports. 4939 */ 4940 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4941 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4942 if (rc == MBX_NOT_FINISHED) 4943 goto out_free_dmabuf; 4944 return; 4945 } 4946 /* 4947 * For FCoE Mode: fill in all the topology information we need and call 4948 * the READ_TOPOLOGY completion routine to continue without actually 4949 * sending the READ_TOPOLOGY mailbox command to the port. 4950 */ 4951 /* Initialize completion status */ 4952 mb = &pmb->u.mb; 4953 mb->mbxStatus = MBX_SUCCESS; 4954 4955 /* Parse port fault information field */ 4956 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4957 4958 /* Parse and translate link attention fields */ 4959 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4960 la->eventTag = acqe_link->event_tag; 4961 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4962 bf_set(lpfc_mbx_read_top_link_spd, la, 4963 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4964 4965 /* Fake the the following irrelvant fields */ 4966 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4967 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4968 bf_set(lpfc_mbx_read_top_il, la, 0); 4969 bf_set(lpfc_mbx_read_top_pb, la, 0); 4970 bf_set(lpfc_mbx_read_top_fa, la, 0); 4971 bf_set(lpfc_mbx_read_top_mm, la, 0); 4972 4973 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4974 lpfc_mbx_cmpl_read_topology(phba, pmb); 4975 4976 return; 4977 4978 out_free_dmabuf: 4979 kfree(mp); 4980 out_free_pmb: 4981 mempool_free(pmb, phba->mbox_mem_pool); 4982 } 4983 4984 /** 4985 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 4986 * topology. 4987 * @phba: pointer to lpfc hba data structure. 4988 * @evt_code: asynchronous event code. 4989 * @speed_code: asynchronous event link speed code. 4990 * 4991 * This routine is to parse the giving SLI4 async event link speed code into 4992 * value of Read topology link speed. 4993 * 4994 * Return: link speed in terms of Read topology. 4995 **/ 4996 static uint8_t 4997 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 4998 { 4999 uint8_t port_speed; 5000 5001 switch (speed_code) { 5002 case LPFC_FC_LA_SPEED_1G: 5003 port_speed = LPFC_LINK_SPEED_1GHZ; 5004 break; 5005 case LPFC_FC_LA_SPEED_2G: 5006 port_speed = LPFC_LINK_SPEED_2GHZ; 5007 break; 5008 case LPFC_FC_LA_SPEED_4G: 5009 port_speed = LPFC_LINK_SPEED_4GHZ; 5010 break; 5011 case LPFC_FC_LA_SPEED_8G: 5012 port_speed = LPFC_LINK_SPEED_8GHZ; 5013 break; 5014 case LPFC_FC_LA_SPEED_16G: 5015 port_speed = LPFC_LINK_SPEED_16GHZ; 5016 break; 5017 case LPFC_FC_LA_SPEED_32G: 5018 port_speed = LPFC_LINK_SPEED_32GHZ; 5019 break; 5020 case LPFC_FC_LA_SPEED_64G: 5021 port_speed = LPFC_LINK_SPEED_64GHZ; 5022 break; 5023 case LPFC_FC_LA_SPEED_128G: 5024 port_speed = LPFC_LINK_SPEED_128GHZ; 5025 break; 5026 case LPFC_FC_LA_SPEED_256G: 5027 port_speed = LPFC_LINK_SPEED_256GHZ; 5028 break; 5029 default: 5030 port_speed = 0; 5031 break; 5032 } 5033 5034 return port_speed; 5035 } 5036 5037 #define trunk_link_status(__idx)\ 5038 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5039 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5040 "Link up" : "Link down") : "NA" 5041 /* Did port __idx reported an error */ 5042 #define trunk_port_fault(__idx)\ 5043 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5044 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5045 5046 static void 5047 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5048 struct lpfc_acqe_fc_la *acqe_fc) 5049 { 5050 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5051 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5052 5053 phba->sli4_hba.link_state.speed = 5054 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5055 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5056 5057 phba->sli4_hba.link_state.logical_speed = 5058 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 5059 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5060 phba->fc_linkspeed = 5061 lpfc_async_link_speed_to_read_top( 5062 phba, 5063 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5064 5065 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5066 phba->trunk_link.link0.state = 5067 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5068 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5069 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5070 } 5071 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5072 phba->trunk_link.link1.state = 5073 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5074 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5075 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5076 } 5077 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5078 phba->trunk_link.link2.state = 5079 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5080 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5081 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5082 } 5083 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5084 phba->trunk_link.link3.state = 5085 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5086 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5087 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5088 } 5089 5090 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5091 "2910 Async FC Trunking Event - Speed:%d\n" 5092 "\tLogical speed:%d " 5093 "port0: %s port1: %s port2: %s port3: %s\n", 5094 phba->sli4_hba.link_state.speed, 5095 phba->sli4_hba.link_state.logical_speed, 5096 trunk_link_status(0), trunk_link_status(1), 5097 trunk_link_status(2), trunk_link_status(3)); 5098 5099 if (port_fault) 5100 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5101 "3202 trunk error:0x%x (%s) seen on port0:%s " 5102 /* 5103 * SLI-4: We have only 0xA error codes 5104 * defined as of now. print an appropriate 5105 * message in case driver needs to be updated. 5106 */ 5107 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5108 "UNDEFINED. update driver." : trunk_errmsg[err], 5109 trunk_port_fault(0), trunk_port_fault(1), 5110 trunk_port_fault(2), trunk_port_fault(3)); 5111 } 5112 5113 5114 /** 5115 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5116 * @phba: pointer to lpfc hba data structure. 5117 * @acqe_fc: pointer to the async fc completion queue entry. 5118 * 5119 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5120 * that the event was received and then issue a read_topology mailbox command so 5121 * that the rest of the driver will treat it the same as SLI3. 5122 **/ 5123 static void 5124 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5125 { 5126 struct lpfc_dmabuf *mp; 5127 LPFC_MBOXQ_t *pmb; 5128 MAILBOX_t *mb; 5129 struct lpfc_mbx_read_top *la; 5130 int rc; 5131 5132 if (bf_get(lpfc_trailer_type, acqe_fc) != 5133 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5134 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5135 "2895 Non FC link Event detected.(%d)\n", 5136 bf_get(lpfc_trailer_type, acqe_fc)); 5137 return; 5138 } 5139 5140 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5141 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5142 lpfc_update_trunk_link_status(phba, acqe_fc); 5143 return; 5144 } 5145 5146 /* Keep the link status for extra SLI4 state machine reference */ 5147 phba->sli4_hba.link_state.speed = 5148 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5149 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5150 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5151 phba->sli4_hba.link_state.topology = 5152 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5153 phba->sli4_hba.link_state.status = 5154 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5155 phba->sli4_hba.link_state.type = 5156 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5157 phba->sli4_hba.link_state.number = 5158 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5159 phba->sli4_hba.link_state.fault = 5160 bf_get(lpfc_acqe_link_fault, acqe_fc); 5161 phba->sli4_hba.link_state.logical_speed = 5162 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5163 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5164 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5165 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5166 "%dMbps Fault:%d\n", 5167 phba->sli4_hba.link_state.speed, 5168 phba->sli4_hba.link_state.topology, 5169 phba->sli4_hba.link_state.status, 5170 phba->sli4_hba.link_state.type, 5171 phba->sli4_hba.link_state.number, 5172 phba->sli4_hba.link_state.logical_speed, 5173 phba->sli4_hba.link_state.fault); 5174 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5175 if (!pmb) { 5176 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5177 "2897 The mboxq allocation failed\n"); 5178 return; 5179 } 5180 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5181 if (!mp) { 5182 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5183 "2898 The lpfc_dmabuf allocation failed\n"); 5184 goto out_free_pmb; 5185 } 5186 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5187 if (!mp->virt) { 5188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5189 "2899 The mbuf allocation failed\n"); 5190 goto out_free_dmabuf; 5191 } 5192 5193 /* Cleanup any outstanding ELS commands */ 5194 lpfc_els_flush_all_cmd(phba); 5195 5196 /* Block ELS IOCBs until we have done process link event */ 5197 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5198 5199 /* Update link event statistics */ 5200 phba->sli.slistat.link_event++; 5201 5202 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5203 lpfc_read_topology(phba, pmb, mp); 5204 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5205 pmb->vport = phba->pport; 5206 5207 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5208 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5209 5210 switch (phba->sli4_hba.link_state.status) { 5211 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5212 phba->link_flag |= LS_MDS_LINK_DOWN; 5213 break; 5214 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5215 phba->link_flag |= LS_MDS_LOOPBACK; 5216 break; 5217 default: 5218 break; 5219 } 5220 5221 /* Initialize completion status */ 5222 mb = &pmb->u.mb; 5223 mb->mbxStatus = MBX_SUCCESS; 5224 5225 /* Parse port fault information field */ 5226 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5227 5228 /* Parse and translate link attention fields */ 5229 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5230 la->eventTag = acqe_fc->event_tag; 5231 5232 if (phba->sli4_hba.link_state.status == 5233 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5234 bf_set(lpfc_mbx_read_top_att_type, la, 5235 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5236 } else { 5237 bf_set(lpfc_mbx_read_top_att_type, la, 5238 LPFC_FC_LA_TYPE_LINK_DOWN); 5239 } 5240 /* Invoke the mailbox command callback function */ 5241 lpfc_mbx_cmpl_read_topology(phba, pmb); 5242 5243 return; 5244 } 5245 5246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5247 if (rc == MBX_NOT_FINISHED) 5248 goto out_free_dmabuf; 5249 return; 5250 5251 out_free_dmabuf: 5252 kfree(mp); 5253 out_free_pmb: 5254 mempool_free(pmb, phba->mbox_mem_pool); 5255 } 5256 5257 /** 5258 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5259 * @phba: pointer to lpfc hba data structure. 5260 * @acqe_fc: pointer to the async SLI completion queue entry. 5261 * 5262 * This routine is to handle the SLI4 asynchronous SLI events. 5263 **/ 5264 static void 5265 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5266 { 5267 char port_name; 5268 char message[128]; 5269 uint8_t status; 5270 uint8_t evt_type; 5271 uint8_t operational = 0; 5272 struct temp_event temp_event_data; 5273 struct lpfc_acqe_misconfigured_event *misconfigured; 5274 struct Scsi_Host *shost; 5275 struct lpfc_vport **vports; 5276 int rc, i; 5277 5278 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5279 5280 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5281 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 5282 "x%08x SLI Event Type:%d\n", 5283 acqe_sli->event_data1, acqe_sli->event_data2, 5284 evt_type); 5285 5286 port_name = phba->Port[0]; 5287 if (port_name == 0x00) 5288 port_name = '?'; /* get port name is empty */ 5289 5290 switch (evt_type) { 5291 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5292 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5293 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5294 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5295 5296 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5297 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5298 acqe_sli->event_data1, port_name); 5299 5300 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5301 shost = lpfc_shost_from_vport(phba->pport); 5302 fc_host_post_vendor_event(shost, fc_get_event_number(), 5303 sizeof(temp_event_data), 5304 (char *)&temp_event_data, 5305 SCSI_NL_VID_TYPE_PCI 5306 | PCI_VENDOR_ID_EMULEX); 5307 break; 5308 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5309 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5310 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5311 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5312 5313 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5314 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5315 acqe_sli->event_data1, port_name); 5316 5317 shost = lpfc_shost_from_vport(phba->pport); 5318 fc_host_post_vendor_event(shost, fc_get_event_number(), 5319 sizeof(temp_event_data), 5320 (char *)&temp_event_data, 5321 SCSI_NL_VID_TYPE_PCI 5322 | PCI_VENDOR_ID_EMULEX); 5323 break; 5324 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5325 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5326 &acqe_sli->event_data1; 5327 5328 /* fetch the status for this port */ 5329 switch (phba->sli4_hba.lnk_info.lnk_no) { 5330 case LPFC_LINK_NUMBER_0: 5331 status = bf_get(lpfc_sli_misconfigured_port0_state, 5332 &misconfigured->theEvent); 5333 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5334 &misconfigured->theEvent); 5335 break; 5336 case LPFC_LINK_NUMBER_1: 5337 status = bf_get(lpfc_sli_misconfigured_port1_state, 5338 &misconfigured->theEvent); 5339 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5340 &misconfigured->theEvent); 5341 break; 5342 case LPFC_LINK_NUMBER_2: 5343 status = bf_get(lpfc_sli_misconfigured_port2_state, 5344 &misconfigured->theEvent); 5345 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5346 &misconfigured->theEvent); 5347 break; 5348 case LPFC_LINK_NUMBER_3: 5349 status = bf_get(lpfc_sli_misconfigured_port3_state, 5350 &misconfigured->theEvent); 5351 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5352 &misconfigured->theEvent); 5353 break; 5354 default: 5355 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5356 "3296 " 5357 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5358 "event: Invalid link %d", 5359 phba->sli4_hba.lnk_info.lnk_no); 5360 return; 5361 } 5362 5363 /* Skip if optic state unchanged */ 5364 if (phba->sli4_hba.lnk_info.optic_state == status) 5365 return; 5366 5367 switch (status) { 5368 case LPFC_SLI_EVENT_STATUS_VALID: 5369 sprintf(message, "Physical Link is functional"); 5370 break; 5371 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5372 sprintf(message, "Optics faulted/incorrectly " 5373 "installed/not installed - Reseat optics, " 5374 "if issue not resolved, replace."); 5375 break; 5376 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5377 sprintf(message, 5378 "Optics of two types installed - Remove one " 5379 "optic or install matching pair of optics."); 5380 break; 5381 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5382 sprintf(message, "Incompatible optics - Replace with " 5383 "compatible optics for card to function."); 5384 break; 5385 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5386 sprintf(message, "Unqualified optics - Replace with " 5387 "Avago optics for Warranty and Technical " 5388 "Support - Link is%s operational", 5389 (operational) ? " not" : ""); 5390 break; 5391 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5392 sprintf(message, "Uncertified optics - Replace with " 5393 "Avago-certified optics to enable link " 5394 "operation - Link is%s operational", 5395 (operational) ? " not" : ""); 5396 break; 5397 default: 5398 /* firmware is reporting a status we don't know about */ 5399 sprintf(message, "Unknown event status x%02x", status); 5400 break; 5401 } 5402 5403 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5404 rc = lpfc_sli4_read_config(phba); 5405 if (rc) { 5406 phba->lmt = 0; 5407 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5408 "3194 Unable to retrieve supported " 5409 "speeds, rc = 0x%x\n", rc); 5410 } 5411 vports = lpfc_create_vport_work_array(phba); 5412 if (vports != NULL) { 5413 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5414 i++) { 5415 shost = lpfc_shost_from_vport(vports[i]); 5416 lpfc_host_supported_speeds_set(shost); 5417 } 5418 } 5419 lpfc_destroy_vport_work_array(phba, vports); 5420 5421 phba->sli4_hba.lnk_info.optic_state = status; 5422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5423 "3176 Port Name %c %s\n", port_name, message); 5424 break; 5425 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5426 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5427 "3192 Remote DPort Test Initiated - " 5428 "Event Data1:x%08x Event Data2: x%08x\n", 5429 acqe_sli->event_data1, acqe_sli->event_data2); 5430 break; 5431 default: 5432 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5433 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 5434 "x%08x SLI Event Type:%d\n", 5435 acqe_sli->event_data1, acqe_sli->event_data2, 5436 evt_type); 5437 break; 5438 } 5439 } 5440 5441 /** 5442 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5443 * @vport: pointer to vport data structure. 5444 * 5445 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5446 * response to a CVL event. 5447 * 5448 * Return the pointer to the ndlp with the vport if successful, otherwise 5449 * return NULL. 5450 **/ 5451 static struct lpfc_nodelist * 5452 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5453 { 5454 struct lpfc_nodelist *ndlp; 5455 struct Scsi_Host *shost; 5456 struct lpfc_hba *phba; 5457 5458 if (!vport) 5459 return NULL; 5460 phba = vport->phba; 5461 if (!phba) 5462 return NULL; 5463 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5464 if (!ndlp) { 5465 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5466 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5467 if (!ndlp) 5468 return 0; 5469 /* Set the node type */ 5470 ndlp->nlp_type |= NLP_FABRIC; 5471 /* Put ndlp onto node list */ 5472 lpfc_enqueue_node(vport, ndlp); 5473 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5474 /* re-setup ndlp without removing from node list */ 5475 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5476 if (!ndlp) 5477 return 0; 5478 } 5479 if ((phba->pport->port_state < LPFC_FLOGI) && 5480 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5481 return NULL; 5482 /* If virtual link is not yet instantiated ignore CVL */ 5483 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5484 && (vport->port_state != LPFC_VPORT_FAILED)) 5485 return NULL; 5486 shost = lpfc_shost_from_vport(vport); 5487 if (!shost) 5488 return NULL; 5489 lpfc_linkdown_port(vport); 5490 lpfc_cleanup_pending_mbox(vport); 5491 spin_lock_irq(shost->host_lock); 5492 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5493 spin_unlock_irq(shost->host_lock); 5494 5495 return ndlp; 5496 } 5497 5498 /** 5499 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5500 * @vport: pointer to lpfc hba data structure. 5501 * 5502 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5503 * response to a FCF dead event. 5504 **/ 5505 static void 5506 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5507 { 5508 struct lpfc_vport **vports; 5509 int i; 5510 5511 vports = lpfc_create_vport_work_array(phba); 5512 if (vports) 5513 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5514 lpfc_sli4_perform_vport_cvl(vports[i]); 5515 lpfc_destroy_vport_work_array(phba, vports); 5516 } 5517 5518 /** 5519 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5520 * @phba: pointer to lpfc hba data structure. 5521 * @acqe_link: pointer to the async fcoe completion queue entry. 5522 * 5523 * This routine is to handle the SLI4 asynchronous fcoe event. 5524 **/ 5525 static void 5526 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5527 struct lpfc_acqe_fip *acqe_fip) 5528 { 5529 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5530 int rc; 5531 struct lpfc_vport *vport; 5532 struct lpfc_nodelist *ndlp; 5533 struct Scsi_Host *shost; 5534 int active_vlink_present; 5535 struct lpfc_vport **vports; 5536 int i; 5537 5538 phba->fc_eventTag = acqe_fip->event_tag; 5539 phba->fcoe_eventtag = acqe_fip->event_tag; 5540 switch (event_type) { 5541 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5542 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5543 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5544 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5545 LOG_DISCOVERY, 5546 "2546 New FCF event, evt_tag:x%x, " 5547 "index:x%x\n", 5548 acqe_fip->event_tag, 5549 acqe_fip->index); 5550 else 5551 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5552 LOG_DISCOVERY, 5553 "2788 FCF param modified event, " 5554 "evt_tag:x%x, index:x%x\n", 5555 acqe_fip->event_tag, 5556 acqe_fip->index); 5557 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5558 /* 5559 * During period of FCF discovery, read the FCF 5560 * table record indexed by the event to update 5561 * FCF roundrobin failover eligible FCF bmask. 5562 */ 5563 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5564 LOG_DISCOVERY, 5565 "2779 Read FCF (x%x) for updating " 5566 "roundrobin FCF failover bmask\n", 5567 acqe_fip->index); 5568 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5569 } 5570 5571 /* If the FCF discovery is in progress, do nothing. */ 5572 spin_lock_irq(&phba->hbalock); 5573 if (phba->hba_flag & FCF_TS_INPROG) { 5574 spin_unlock_irq(&phba->hbalock); 5575 break; 5576 } 5577 /* If fast FCF failover rescan event is pending, do nothing */ 5578 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5579 spin_unlock_irq(&phba->hbalock); 5580 break; 5581 } 5582 5583 /* If the FCF has been in discovered state, do nothing. */ 5584 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5585 spin_unlock_irq(&phba->hbalock); 5586 break; 5587 } 5588 spin_unlock_irq(&phba->hbalock); 5589 5590 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5591 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5592 "2770 Start FCF table scan per async FCF " 5593 "event, evt_tag:x%x, index:x%x\n", 5594 acqe_fip->event_tag, acqe_fip->index); 5595 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5596 LPFC_FCOE_FCF_GET_FIRST); 5597 if (rc) 5598 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5599 "2547 Issue FCF scan read FCF mailbox " 5600 "command failed (x%x)\n", rc); 5601 break; 5602 5603 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5605 "2548 FCF Table full count 0x%x tag 0x%x\n", 5606 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5607 acqe_fip->event_tag); 5608 break; 5609 5610 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5611 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5612 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5613 "2549 FCF (x%x) disconnected from network, " 5614 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5615 /* 5616 * If we are in the middle of FCF failover process, clear 5617 * the corresponding FCF bit in the roundrobin bitmap. 5618 */ 5619 spin_lock_irq(&phba->hbalock); 5620 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5621 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5622 spin_unlock_irq(&phba->hbalock); 5623 /* Update FLOGI FCF failover eligible FCF bmask */ 5624 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5625 break; 5626 } 5627 spin_unlock_irq(&phba->hbalock); 5628 5629 /* If the event is not for currently used fcf do nothing */ 5630 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5631 break; 5632 5633 /* 5634 * Otherwise, request the port to rediscover the entire FCF 5635 * table for a fast recovery from case that the current FCF 5636 * is no longer valid as we are not in the middle of FCF 5637 * failover process already. 5638 */ 5639 spin_lock_irq(&phba->hbalock); 5640 /* Mark the fast failover process in progress */ 5641 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5642 spin_unlock_irq(&phba->hbalock); 5643 5644 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5645 "2771 Start FCF fast failover process due to " 5646 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5647 "\n", acqe_fip->event_tag, acqe_fip->index); 5648 rc = lpfc_sli4_redisc_fcf_table(phba); 5649 if (rc) { 5650 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5651 LOG_DISCOVERY, 5652 "2772 Issue FCF rediscover mailbox " 5653 "command failed, fail through to FCF " 5654 "dead event\n"); 5655 spin_lock_irq(&phba->hbalock); 5656 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5657 spin_unlock_irq(&phba->hbalock); 5658 /* 5659 * Last resort will fail over by treating this 5660 * as a link down to FCF registration. 5661 */ 5662 lpfc_sli4_fcf_dead_failthrough(phba); 5663 } else { 5664 /* Reset FCF roundrobin bmask for new discovery */ 5665 lpfc_sli4_clear_fcf_rr_bmask(phba); 5666 /* 5667 * Handling fast FCF failover to a DEAD FCF event is 5668 * considered equalivant to receiving CVL to all vports. 5669 */ 5670 lpfc_sli4_perform_all_vport_cvl(phba); 5671 } 5672 break; 5673 case LPFC_FIP_EVENT_TYPE_CVL: 5674 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5675 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5676 "2718 Clear Virtual Link Received for VPI 0x%x" 5677 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5678 5679 vport = lpfc_find_vport_by_vpid(phba, 5680 acqe_fip->index); 5681 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5682 if (!ndlp) 5683 break; 5684 active_vlink_present = 0; 5685 5686 vports = lpfc_create_vport_work_array(phba); 5687 if (vports) { 5688 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5689 i++) { 5690 if ((!(vports[i]->fc_flag & 5691 FC_VPORT_CVL_RCVD)) && 5692 (vports[i]->port_state > LPFC_FDISC)) { 5693 active_vlink_present = 1; 5694 break; 5695 } 5696 } 5697 lpfc_destroy_vport_work_array(phba, vports); 5698 } 5699 5700 /* 5701 * Don't re-instantiate if vport is marked for deletion. 5702 * If we are here first then vport_delete is going to wait 5703 * for discovery to complete. 5704 */ 5705 if (!(vport->load_flag & FC_UNLOADING) && 5706 active_vlink_present) { 5707 /* 5708 * If there are other active VLinks present, 5709 * re-instantiate the Vlink using FDISC. 5710 */ 5711 mod_timer(&ndlp->nlp_delayfunc, 5712 jiffies + msecs_to_jiffies(1000)); 5713 shost = lpfc_shost_from_vport(vport); 5714 spin_lock_irq(shost->host_lock); 5715 ndlp->nlp_flag |= NLP_DELAY_TMO; 5716 spin_unlock_irq(shost->host_lock); 5717 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5718 vport->port_state = LPFC_FDISC; 5719 } else { 5720 /* 5721 * Otherwise, we request port to rediscover 5722 * the entire FCF table for a fast recovery 5723 * from possible case that the current FCF 5724 * is no longer valid if we are not already 5725 * in the FCF failover process. 5726 */ 5727 spin_lock_irq(&phba->hbalock); 5728 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5729 spin_unlock_irq(&phba->hbalock); 5730 break; 5731 } 5732 /* Mark the fast failover process in progress */ 5733 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5734 spin_unlock_irq(&phba->hbalock); 5735 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5736 LOG_DISCOVERY, 5737 "2773 Start FCF failover per CVL, " 5738 "evt_tag:x%x\n", acqe_fip->event_tag); 5739 rc = lpfc_sli4_redisc_fcf_table(phba); 5740 if (rc) { 5741 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5742 LOG_DISCOVERY, 5743 "2774 Issue FCF rediscover " 5744 "mailbox command failed, " 5745 "through to CVL event\n"); 5746 spin_lock_irq(&phba->hbalock); 5747 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5748 spin_unlock_irq(&phba->hbalock); 5749 /* 5750 * Last resort will be re-try on the 5751 * the current registered FCF entry. 5752 */ 5753 lpfc_retry_pport_discovery(phba); 5754 } else 5755 /* 5756 * Reset FCF roundrobin bmask for new 5757 * discovery. 5758 */ 5759 lpfc_sli4_clear_fcf_rr_bmask(phba); 5760 } 5761 break; 5762 default: 5763 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5764 "0288 Unknown FCoE event type 0x%x event tag " 5765 "0x%x\n", event_type, acqe_fip->event_tag); 5766 break; 5767 } 5768 } 5769 5770 /** 5771 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5772 * @phba: pointer to lpfc hba data structure. 5773 * @acqe_link: pointer to the async dcbx completion queue entry. 5774 * 5775 * This routine is to handle the SLI4 asynchronous dcbx event. 5776 **/ 5777 static void 5778 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5779 struct lpfc_acqe_dcbx *acqe_dcbx) 5780 { 5781 phba->fc_eventTag = acqe_dcbx->event_tag; 5782 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5783 "0290 The SLI4 DCBX asynchronous event is not " 5784 "handled yet\n"); 5785 } 5786 5787 /** 5788 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5789 * @phba: pointer to lpfc hba data structure. 5790 * @acqe_link: pointer to the async grp5 completion queue entry. 5791 * 5792 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5793 * is an asynchronous notified of a logical link speed change. The Port 5794 * reports the logical link speed in units of 10Mbps. 5795 **/ 5796 static void 5797 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5798 struct lpfc_acqe_grp5 *acqe_grp5) 5799 { 5800 uint16_t prev_ll_spd; 5801 5802 phba->fc_eventTag = acqe_grp5->event_tag; 5803 phba->fcoe_eventtag = acqe_grp5->event_tag; 5804 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5805 phba->sli4_hba.link_state.logical_speed = 5806 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5807 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5808 "2789 GRP5 Async Event: Updating logical link speed " 5809 "from %dMbps to %dMbps\n", prev_ll_spd, 5810 phba->sli4_hba.link_state.logical_speed); 5811 } 5812 5813 /** 5814 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5815 * @phba: pointer to lpfc hba data structure. 5816 * 5817 * This routine is invoked by the worker thread to process all the pending 5818 * SLI4 asynchronous events. 5819 **/ 5820 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5821 { 5822 struct lpfc_cq_event *cq_event; 5823 5824 /* First, declare the async event has been handled */ 5825 spin_lock_irq(&phba->hbalock); 5826 phba->hba_flag &= ~ASYNC_EVENT; 5827 spin_unlock_irq(&phba->hbalock); 5828 /* Now, handle all the async events */ 5829 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5830 /* Get the first event from the head of the event queue */ 5831 spin_lock_irq(&phba->hbalock); 5832 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5833 cq_event, struct lpfc_cq_event, list); 5834 spin_unlock_irq(&phba->hbalock); 5835 /* Process the asynchronous event */ 5836 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5837 case LPFC_TRAILER_CODE_LINK: 5838 lpfc_sli4_async_link_evt(phba, 5839 &cq_event->cqe.acqe_link); 5840 break; 5841 case LPFC_TRAILER_CODE_FCOE: 5842 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5843 break; 5844 case LPFC_TRAILER_CODE_DCBX: 5845 lpfc_sli4_async_dcbx_evt(phba, 5846 &cq_event->cqe.acqe_dcbx); 5847 break; 5848 case LPFC_TRAILER_CODE_GRP5: 5849 lpfc_sli4_async_grp5_evt(phba, 5850 &cq_event->cqe.acqe_grp5); 5851 break; 5852 case LPFC_TRAILER_CODE_FC: 5853 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5854 break; 5855 case LPFC_TRAILER_CODE_SLI: 5856 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5857 break; 5858 default: 5859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5860 "1804 Invalid asynchrous event code: " 5861 "x%x\n", bf_get(lpfc_trailer_code, 5862 &cq_event->cqe.mcqe_cmpl)); 5863 break; 5864 } 5865 /* Free the completion event processed to the free pool */ 5866 lpfc_sli4_cq_event_release(phba, cq_event); 5867 } 5868 } 5869 5870 /** 5871 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5872 * @phba: pointer to lpfc hba data structure. 5873 * 5874 * This routine is invoked by the worker thread to process FCF table 5875 * rediscovery pending completion event. 5876 **/ 5877 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5878 { 5879 int rc; 5880 5881 spin_lock_irq(&phba->hbalock); 5882 /* Clear FCF rediscovery timeout event */ 5883 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5884 /* Clear driver fast failover FCF record flag */ 5885 phba->fcf.failover_rec.flag = 0; 5886 /* Set state for FCF fast failover */ 5887 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5888 spin_unlock_irq(&phba->hbalock); 5889 5890 /* Scan FCF table from the first entry to re-discover SAN */ 5891 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5892 "2777 Start post-quiescent FCF table scan\n"); 5893 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5894 if (rc) 5895 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5896 "2747 Issue FCF scan read FCF mailbox " 5897 "command failed 0x%x\n", rc); 5898 } 5899 5900 /** 5901 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5902 * @phba: pointer to lpfc hba data structure. 5903 * @dev_grp: The HBA PCI-Device group number. 5904 * 5905 * This routine is invoked to set up the per HBA PCI-Device group function 5906 * API jump table entries. 5907 * 5908 * Return: 0 if success, otherwise -ENODEV 5909 **/ 5910 int 5911 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5912 { 5913 int rc; 5914 5915 /* Set up lpfc PCI-device group */ 5916 phba->pci_dev_grp = dev_grp; 5917 5918 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5919 if (dev_grp == LPFC_PCI_DEV_OC) 5920 phba->sli_rev = LPFC_SLI_REV4; 5921 5922 /* Set up device INIT API function jump table */ 5923 rc = lpfc_init_api_table_setup(phba, dev_grp); 5924 if (rc) 5925 return -ENODEV; 5926 /* Set up SCSI API function jump table */ 5927 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5928 if (rc) 5929 return -ENODEV; 5930 /* Set up SLI API function jump table */ 5931 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5932 if (rc) 5933 return -ENODEV; 5934 /* Set up MBOX API function jump table */ 5935 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5936 if (rc) 5937 return -ENODEV; 5938 5939 return 0; 5940 } 5941 5942 /** 5943 * lpfc_log_intr_mode - Log the active interrupt mode 5944 * @phba: pointer to lpfc hba data structure. 5945 * @intr_mode: active interrupt mode adopted. 5946 * 5947 * This routine it invoked to log the currently used active interrupt mode 5948 * to the device. 5949 **/ 5950 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5951 { 5952 switch (intr_mode) { 5953 case 0: 5954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5955 "0470 Enable INTx interrupt mode.\n"); 5956 break; 5957 case 1: 5958 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5959 "0481 Enabled MSI interrupt mode.\n"); 5960 break; 5961 case 2: 5962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5963 "0480 Enabled MSI-X interrupt mode.\n"); 5964 break; 5965 default: 5966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5967 "0482 Illegal interrupt mode.\n"); 5968 break; 5969 } 5970 return; 5971 } 5972 5973 /** 5974 * lpfc_enable_pci_dev - Enable a generic PCI device. 5975 * @phba: pointer to lpfc hba data structure. 5976 * 5977 * This routine is invoked to enable the PCI device that is common to all 5978 * PCI devices. 5979 * 5980 * Return codes 5981 * 0 - successful 5982 * other values - error 5983 **/ 5984 static int 5985 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5986 { 5987 struct pci_dev *pdev; 5988 5989 /* Obtain PCI device reference */ 5990 if (!phba->pcidev) 5991 goto out_error; 5992 else 5993 pdev = phba->pcidev; 5994 /* Enable PCI device */ 5995 if (pci_enable_device_mem(pdev)) 5996 goto out_error; 5997 /* Request PCI resource for the device */ 5998 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 5999 goto out_disable_device; 6000 /* Set up device as PCI master and save state for EEH */ 6001 pci_set_master(pdev); 6002 pci_try_set_mwi(pdev); 6003 pci_save_state(pdev); 6004 6005 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6006 if (pci_is_pcie(pdev)) 6007 pdev->needs_freset = 1; 6008 6009 return 0; 6010 6011 out_disable_device: 6012 pci_disable_device(pdev); 6013 out_error: 6014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6015 "1401 Failed to enable pci device\n"); 6016 return -ENODEV; 6017 } 6018 6019 /** 6020 * lpfc_disable_pci_dev - Disable a generic PCI device. 6021 * @phba: pointer to lpfc hba data structure. 6022 * 6023 * This routine is invoked to disable the PCI device that is common to all 6024 * PCI devices. 6025 **/ 6026 static void 6027 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6028 { 6029 struct pci_dev *pdev; 6030 6031 /* Obtain PCI device reference */ 6032 if (!phba->pcidev) 6033 return; 6034 else 6035 pdev = phba->pcidev; 6036 /* Release PCI resource and disable PCI device */ 6037 pci_release_mem_regions(pdev); 6038 pci_disable_device(pdev); 6039 6040 return; 6041 } 6042 6043 /** 6044 * lpfc_reset_hba - Reset a hba 6045 * @phba: pointer to lpfc hba data structure. 6046 * 6047 * This routine is invoked to reset a hba device. It brings the HBA 6048 * offline, performs a board restart, and then brings the board back 6049 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6050 * on outstanding mailbox commands. 6051 **/ 6052 void 6053 lpfc_reset_hba(struct lpfc_hba *phba) 6054 { 6055 /* If resets are disabled then set error state and return. */ 6056 if (!phba->cfg_enable_hba_reset) { 6057 phba->link_state = LPFC_HBA_ERROR; 6058 return; 6059 } 6060 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6061 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6062 else 6063 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6064 lpfc_offline(phba); 6065 lpfc_sli_brdrestart(phba); 6066 lpfc_online(phba); 6067 lpfc_unblock_mgmt_io(phba); 6068 } 6069 6070 /** 6071 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6072 * @phba: pointer to lpfc hba data structure. 6073 * 6074 * This function enables the PCI SR-IOV virtual functions to a physical 6075 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6076 * enable the number of virtual functions to the physical function. As 6077 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6078 * API call does not considered as an error condition for most of the device. 6079 **/ 6080 uint16_t 6081 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6082 { 6083 struct pci_dev *pdev = phba->pcidev; 6084 uint16_t nr_virtfn; 6085 int pos; 6086 6087 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6088 if (pos == 0) 6089 return 0; 6090 6091 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6092 return nr_virtfn; 6093 } 6094 6095 /** 6096 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6097 * @phba: pointer to lpfc hba data structure. 6098 * @nr_vfn: number of virtual functions to be enabled. 6099 * 6100 * This function enables the PCI SR-IOV virtual functions to a physical 6101 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6102 * enable the number of virtual functions to the physical function. As 6103 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6104 * API call does not considered as an error condition for most of the device. 6105 **/ 6106 int 6107 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6108 { 6109 struct pci_dev *pdev = phba->pcidev; 6110 uint16_t max_nr_vfn; 6111 int rc; 6112 6113 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6114 if (nr_vfn > max_nr_vfn) { 6115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6116 "3057 Requested vfs (%d) greater than " 6117 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6118 return -EINVAL; 6119 } 6120 6121 rc = pci_enable_sriov(pdev, nr_vfn); 6122 if (rc) { 6123 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6124 "2806 Failed to enable sriov on this device " 6125 "with vfn number nr_vf:%d, rc:%d\n", 6126 nr_vfn, rc); 6127 } else 6128 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6129 "2807 Successful enable sriov on this device " 6130 "with vfn number nr_vf:%d\n", nr_vfn); 6131 return rc; 6132 } 6133 6134 /** 6135 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6136 * @phba: pointer to lpfc hba data structure. 6137 * 6138 * This routine is invoked to set up the driver internal resources before the 6139 * device specific resource setup to support the HBA device it attached to. 6140 * 6141 * Return codes 6142 * 0 - successful 6143 * other values - error 6144 **/ 6145 static int 6146 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6147 { 6148 struct lpfc_sli *psli = &phba->sli; 6149 6150 /* 6151 * Driver resources common to all SLI revisions 6152 */ 6153 atomic_set(&phba->fast_event_count, 0); 6154 spin_lock_init(&phba->hbalock); 6155 6156 /* Initialize ndlp management spinlock */ 6157 spin_lock_init(&phba->ndlp_lock); 6158 6159 /* Initialize port_list spinlock */ 6160 spin_lock_init(&phba->port_list_lock); 6161 INIT_LIST_HEAD(&phba->port_list); 6162 6163 INIT_LIST_HEAD(&phba->work_list); 6164 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6165 6166 /* Initialize the wait queue head for the kernel thread */ 6167 init_waitqueue_head(&phba->work_waitq); 6168 6169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6170 "1403 Protocols supported %s %s %s\n", 6171 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6172 "SCSI" : " "), 6173 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6174 "NVME" : " "), 6175 (phba->nvmet_support ? "NVMET" : " ")); 6176 6177 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6178 spin_lock_init(&phba->scsi_buf_list_get_lock); 6179 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6180 spin_lock_init(&phba->scsi_buf_list_put_lock); 6181 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6182 6183 /* Initialize the fabric iocb list */ 6184 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6185 6186 /* Initialize list to save ELS buffers */ 6187 INIT_LIST_HEAD(&phba->elsbuf); 6188 6189 /* Initialize FCF connection rec list */ 6190 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6191 6192 /* Initialize OAS configuration list */ 6193 spin_lock_init(&phba->devicelock); 6194 INIT_LIST_HEAD(&phba->luns); 6195 6196 /* MBOX heartbeat timer */ 6197 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6198 /* Fabric block timer */ 6199 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6200 /* EA polling mode timer */ 6201 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6202 /* Heartbeat timer */ 6203 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6204 6205 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6206 6207 return 0; 6208 } 6209 6210 /** 6211 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6212 * @phba: pointer to lpfc hba data structure. 6213 * 6214 * This routine is invoked to set up the driver internal resources specific to 6215 * support the SLI-3 HBA device it attached to. 6216 * 6217 * Return codes 6218 * 0 - successful 6219 * other values - error 6220 **/ 6221 static int 6222 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6223 { 6224 int rc, entry_sz; 6225 6226 /* 6227 * Initialize timers used by driver 6228 */ 6229 6230 /* FCP polling mode timer */ 6231 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6232 6233 /* Host attention work mask setup */ 6234 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6235 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6236 6237 /* Get all the module params for configuring this host */ 6238 lpfc_get_cfgparam(phba); 6239 /* Set up phase-1 common device driver resources */ 6240 6241 rc = lpfc_setup_driver_resource_phase1(phba); 6242 if (rc) 6243 return -ENODEV; 6244 6245 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6246 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6247 /* check for menlo minimum sg count */ 6248 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6249 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6250 } 6251 6252 if (!phba->sli.sli3_ring) 6253 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6254 sizeof(struct lpfc_sli_ring), 6255 GFP_KERNEL); 6256 if (!phba->sli.sli3_ring) 6257 return -ENOMEM; 6258 6259 /* 6260 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6261 * used to create the sg_dma_buf_pool must be dynamically calculated. 6262 */ 6263 6264 /* Initialize the host templates the configured values. */ 6265 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6266 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6267 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6268 6269 if (phba->sli_rev == LPFC_SLI_REV4) 6270 entry_sz = sizeof(struct sli4_sge); 6271 else 6272 entry_sz = sizeof(struct ulp_bde64); 6273 6274 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6275 if (phba->cfg_enable_bg) { 6276 /* 6277 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6278 * the FCP rsp, and a BDE for each. Sice we have no control 6279 * over how many protection data segments the SCSI Layer 6280 * will hand us (ie: there could be one for every block 6281 * in the IO), we just allocate enough BDEs to accomidate 6282 * our max amount and we need to limit lpfc_sg_seg_cnt to 6283 * minimize the risk of running out. 6284 */ 6285 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6286 sizeof(struct fcp_rsp) + 6287 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6288 6289 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6290 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6291 6292 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6293 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6294 } else { 6295 /* 6296 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6297 * the FCP rsp, a BDE for each, and a BDE for up to 6298 * cfg_sg_seg_cnt data segments. 6299 */ 6300 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6301 sizeof(struct fcp_rsp) + 6302 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6303 6304 /* Total BDEs in BPL for scsi_sg_list */ 6305 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6306 } 6307 6308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6309 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6310 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6311 phba->cfg_total_seg_cnt); 6312 6313 phba->max_vpi = LPFC_MAX_VPI; 6314 /* This will be set to correct value after config_port mbox */ 6315 phba->max_vports = 0; 6316 6317 /* 6318 * Initialize the SLI Layer to run with lpfc HBAs. 6319 */ 6320 lpfc_sli_setup(phba); 6321 lpfc_sli_queue_init(phba); 6322 6323 /* Allocate device driver memory */ 6324 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6325 return -ENOMEM; 6326 6327 /* 6328 * Enable sr-iov virtual functions if supported and configured 6329 * through the module parameter. 6330 */ 6331 if (phba->cfg_sriov_nr_virtfn > 0) { 6332 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6333 phba->cfg_sriov_nr_virtfn); 6334 if (rc) { 6335 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6336 "2808 Requested number of SR-IOV " 6337 "virtual functions (%d) is not " 6338 "supported\n", 6339 phba->cfg_sriov_nr_virtfn); 6340 phba->cfg_sriov_nr_virtfn = 0; 6341 } 6342 } 6343 6344 return 0; 6345 } 6346 6347 /** 6348 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6349 * @phba: pointer to lpfc hba data structure. 6350 * 6351 * This routine is invoked to unset the driver internal resources set up 6352 * specific for supporting the SLI-3 HBA device it attached to. 6353 **/ 6354 static void 6355 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6356 { 6357 /* Free device driver memory allocated */ 6358 lpfc_mem_free_all(phba); 6359 6360 return; 6361 } 6362 6363 /** 6364 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6365 * @phba: pointer to lpfc hba data structure. 6366 * 6367 * This routine is invoked to set up the driver internal resources specific to 6368 * support the SLI-4 HBA device it attached to. 6369 * 6370 * Return codes 6371 * 0 - successful 6372 * other values - error 6373 **/ 6374 static int 6375 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6376 { 6377 LPFC_MBOXQ_t *mboxq; 6378 MAILBOX_t *mb; 6379 int rc, i, max_buf_size; 6380 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6381 struct lpfc_mqe *mqe; 6382 int longs; 6383 int extra; 6384 uint64_t wwn; 6385 u32 if_type; 6386 u32 if_fam; 6387 6388 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6389 phba->sli4_hba.num_possible_cpu = num_possible_cpus(); 6390 phba->sli4_hba.curr_disp_cpu = 0; 6391 6392 /* Get all the module params for configuring this host */ 6393 lpfc_get_cfgparam(phba); 6394 6395 /* Set up phase-1 common device driver resources */ 6396 rc = lpfc_setup_driver_resource_phase1(phba); 6397 if (rc) 6398 return -ENODEV; 6399 6400 /* Before proceed, wait for POST done and device ready */ 6401 rc = lpfc_sli4_post_status_check(phba); 6402 if (rc) 6403 return -ENODEV; 6404 6405 /* 6406 * Initialize timers used by driver 6407 */ 6408 6409 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6410 6411 /* FCF rediscover timer */ 6412 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6413 6414 /* 6415 * Control structure for handling external multi-buffer mailbox 6416 * command pass-through. 6417 */ 6418 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6419 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6420 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6421 6422 phba->max_vpi = LPFC_MAX_VPI; 6423 6424 /* This will be set to correct value after the read_config mbox */ 6425 phba->max_vports = 0; 6426 6427 /* Program the default value of vlan_id and fc_map */ 6428 phba->valid_vlan = 0; 6429 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6430 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6431 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6432 6433 /* 6434 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6435 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6436 * The WQ create will allocate the ring. 6437 */ 6438 6439 /* 6440 * 1 for cmd, 1 for rsp, NVME adds an extra one 6441 * for boundary conditions in its max_sgl_segment template. 6442 */ 6443 extra = 2; 6444 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6445 extra++; 6446 6447 /* 6448 * It doesn't matter what family our adapter is in, we are 6449 * limited to 2 Pages, 512 SGEs, for our SGL. 6450 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6451 */ 6452 max_buf_size = (2 * SLI4_PAGE_SIZE); 6453 6454 /* 6455 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6456 * used to create the sg_dma_buf_pool must be calculated. 6457 */ 6458 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6459 /* 6460 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6461 * the FCP rsp, and a SGE. Sice we have no control 6462 * over how many protection segments the SCSI Layer 6463 * will hand us (ie: there could be one for every block 6464 * in the IO), just allocate enough SGEs to accomidate 6465 * our max amount and we need to limit lpfc_sg_seg_cnt 6466 * to minimize the risk of running out. 6467 */ 6468 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6469 sizeof(struct fcp_rsp) + max_buf_size; 6470 6471 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6472 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6473 6474 /* 6475 * If supporting DIF, reduce the seg count for scsi to 6476 * allow room for the DIF sges. 6477 */ 6478 if (phba->cfg_enable_bg && 6479 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6480 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6481 else 6482 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6483 6484 } else { 6485 /* 6486 * The scsi_buf for a regular I/O holds the FCP cmnd, 6487 * the FCP rsp, a SGE for each, and a SGE for up to 6488 * cfg_sg_seg_cnt data segments. 6489 */ 6490 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6491 sizeof(struct fcp_rsp) + 6492 ((phba->cfg_sg_seg_cnt + extra) * 6493 sizeof(struct sli4_sge)); 6494 6495 /* Total SGEs for scsi_sg_list */ 6496 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6497 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6498 6499 /* 6500 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6501 * need to post 1 page for the SGL. 6502 */ 6503 } 6504 6505 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6506 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6507 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6508 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6509 "6300 Reducing NVME sg segment " 6510 "cnt to %d\n", 6511 LPFC_MAX_NVME_SEG_CNT); 6512 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6513 } else 6514 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6515 } 6516 6517 /* Initialize the host templates with the updated values. */ 6518 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6519 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6520 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6521 6522 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6523 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6524 else 6525 phba->cfg_sg_dma_buf_size = 6526 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6527 6528 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6529 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6530 "total:%d scsi:%d nvme:%d\n", 6531 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6532 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6533 phba->cfg_nvme_seg_cnt); 6534 6535 /* Initialize buffer queue management fields */ 6536 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6537 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6538 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6539 6540 /* 6541 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6542 */ 6543 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 6544 /* Initialize the Abort scsi buffer list used by driver */ 6545 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 6546 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 6547 } 6548 6549 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6550 /* Initialize the Abort nvme buffer list used by driver */ 6551 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6552 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6553 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6554 } 6555 6556 /* This abort list used by worker thread */ 6557 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6558 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6559 6560 /* 6561 * Initialize driver internal slow-path work queues 6562 */ 6563 6564 /* Driver internel slow-path CQ Event pool */ 6565 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6566 /* Response IOCB work queue list */ 6567 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6568 /* Asynchronous event CQ Event work queue list */ 6569 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6570 /* Fast-path XRI aborted CQ Event work queue list */ 6571 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6572 /* Slow-path XRI aborted CQ Event work queue list */ 6573 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6574 /* Receive queue CQ Event work queue list */ 6575 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6576 6577 /* Initialize extent block lists. */ 6578 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6579 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6580 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6581 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6582 6583 /* Initialize mboxq lists. If the early init routines fail 6584 * these lists need to be correctly initialized. 6585 */ 6586 INIT_LIST_HEAD(&phba->sli.mboxq); 6587 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6588 6589 /* initialize optic_state to 0xFF */ 6590 phba->sli4_hba.lnk_info.optic_state = 0xff; 6591 6592 /* Allocate device driver memory */ 6593 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6594 if (rc) 6595 return -ENOMEM; 6596 6597 /* IF Type 2 ports get initialized now. */ 6598 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6599 LPFC_SLI_INTF_IF_TYPE_2) { 6600 rc = lpfc_pci_function_reset(phba); 6601 if (unlikely(rc)) { 6602 rc = -ENODEV; 6603 goto out_free_mem; 6604 } 6605 phba->temp_sensor_support = 1; 6606 } 6607 6608 /* Create the bootstrap mailbox command */ 6609 rc = lpfc_create_bootstrap_mbox(phba); 6610 if (unlikely(rc)) 6611 goto out_free_mem; 6612 6613 /* Set up the host's endian order with the device. */ 6614 rc = lpfc_setup_endian_order(phba); 6615 if (unlikely(rc)) 6616 goto out_free_bsmbx; 6617 6618 /* Set up the hba's configuration parameters. */ 6619 rc = lpfc_sli4_read_config(phba); 6620 if (unlikely(rc)) 6621 goto out_free_bsmbx; 6622 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6623 if (unlikely(rc)) 6624 goto out_free_bsmbx; 6625 6626 /* IF Type 0 ports get initialized now. */ 6627 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6628 LPFC_SLI_INTF_IF_TYPE_0) { 6629 rc = lpfc_pci_function_reset(phba); 6630 if (unlikely(rc)) 6631 goto out_free_bsmbx; 6632 } 6633 6634 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6635 GFP_KERNEL); 6636 if (!mboxq) { 6637 rc = -ENOMEM; 6638 goto out_free_bsmbx; 6639 } 6640 6641 /* Check for NVMET being configured */ 6642 phba->nvmet_support = 0; 6643 if (lpfc_enable_nvmet_cnt) { 6644 6645 /* First get WWN of HBA instance */ 6646 lpfc_read_nv(phba, mboxq); 6647 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6648 if (rc != MBX_SUCCESS) { 6649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6650 "6016 Mailbox failed , mbxCmd x%x " 6651 "READ_NV, mbxStatus x%x\n", 6652 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6653 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6654 mempool_free(mboxq, phba->mbox_mem_pool); 6655 rc = -EIO; 6656 goto out_free_bsmbx; 6657 } 6658 mb = &mboxq->u.mb; 6659 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6660 sizeof(uint64_t)); 6661 wwn = cpu_to_be64(wwn); 6662 phba->sli4_hba.wwnn.u.name = wwn; 6663 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6664 sizeof(uint64_t)); 6665 /* wwn is WWPN of HBA instance */ 6666 wwn = cpu_to_be64(wwn); 6667 phba->sli4_hba.wwpn.u.name = wwn; 6668 6669 /* Check to see if it matches any module parameter */ 6670 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6671 if (wwn == lpfc_enable_nvmet[i]) { 6672 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6673 if (lpfc_nvmet_mem_alloc(phba)) 6674 break; 6675 6676 phba->nvmet_support = 1; /* a match */ 6677 6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6679 "6017 NVME Target %016llx\n", 6680 wwn); 6681 #else 6682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6683 "6021 Can't enable NVME Target." 6684 " NVME_TARGET_FC infrastructure" 6685 " is not in kernel\n"); 6686 #endif 6687 /* Not supported for NVMET */ 6688 phba->cfg_xri_rebalancing = 0; 6689 break; 6690 } 6691 } 6692 } 6693 6694 lpfc_nvme_mod_param_dep(phba); 6695 6696 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6697 lpfc_supported_pages(mboxq); 6698 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6699 if (!rc) { 6700 mqe = &mboxq->u.mqe; 6701 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6702 LPFC_MAX_SUPPORTED_PAGES); 6703 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6704 switch (pn_page[i]) { 6705 case LPFC_SLI4_PARAMETERS: 6706 phba->sli4_hba.pc_sli4_params.supported = 1; 6707 break; 6708 default: 6709 break; 6710 } 6711 } 6712 /* Read the port's SLI4 Parameters capabilities if supported. */ 6713 if (phba->sli4_hba.pc_sli4_params.supported) 6714 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6715 if (rc) { 6716 mempool_free(mboxq, phba->mbox_mem_pool); 6717 rc = -EIO; 6718 goto out_free_bsmbx; 6719 } 6720 } 6721 6722 /* 6723 * Get sli4 parameters that override parameters from Port capabilities. 6724 * If this call fails, it isn't critical unless the SLI4 parameters come 6725 * back in conflict. 6726 */ 6727 rc = lpfc_get_sli4_parameters(phba, mboxq); 6728 if (rc) { 6729 if_type = bf_get(lpfc_sli_intf_if_type, 6730 &phba->sli4_hba.sli_intf); 6731 if_fam = bf_get(lpfc_sli_intf_sli_family, 6732 &phba->sli4_hba.sli_intf); 6733 if (phba->sli4_hba.extents_in_use && 6734 phba->sli4_hba.rpi_hdrs_in_use) { 6735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6736 "2999 Unsupported SLI4 Parameters " 6737 "Extents and RPI headers enabled.\n"); 6738 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6739 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6740 mempool_free(mboxq, phba->mbox_mem_pool); 6741 rc = -EIO; 6742 goto out_free_bsmbx; 6743 } 6744 } 6745 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6746 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6747 mempool_free(mboxq, phba->mbox_mem_pool); 6748 rc = -EIO; 6749 goto out_free_bsmbx; 6750 } 6751 } 6752 6753 mempool_free(mboxq, phba->mbox_mem_pool); 6754 6755 /* Verify OAS is supported */ 6756 lpfc_sli4_oas_verify(phba); 6757 6758 /* Verify RAS support on adapter */ 6759 lpfc_sli4_ras_init(phba); 6760 6761 /* Verify all the SLI4 queues */ 6762 rc = lpfc_sli4_queue_verify(phba); 6763 if (rc) 6764 goto out_free_bsmbx; 6765 6766 /* Create driver internal CQE event pool */ 6767 rc = lpfc_sli4_cq_event_pool_create(phba); 6768 if (rc) 6769 goto out_free_bsmbx; 6770 6771 /* Initialize sgl lists per host */ 6772 lpfc_init_sgl_list(phba); 6773 6774 /* Allocate and initialize active sgl array */ 6775 rc = lpfc_init_active_sgl_array(phba); 6776 if (rc) { 6777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6778 "1430 Failed to initialize sgl list.\n"); 6779 goto out_destroy_cq_event_pool; 6780 } 6781 rc = lpfc_sli4_init_rpi_hdrs(phba); 6782 if (rc) { 6783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6784 "1432 Failed to initialize rpi headers.\n"); 6785 goto out_free_active_sgl; 6786 } 6787 6788 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6789 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6790 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6791 GFP_KERNEL); 6792 if (!phba->fcf.fcf_rr_bmask) { 6793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6794 "2759 Failed allocate memory for FCF round " 6795 "robin failover bmask\n"); 6796 rc = -ENOMEM; 6797 goto out_remove_rpi_hdrs; 6798 } 6799 6800 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6801 sizeof(struct lpfc_hba_eq_hdl), 6802 GFP_KERNEL); 6803 if (!phba->sli4_hba.hba_eq_hdl) { 6804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6805 "2572 Failed allocate memory for " 6806 "fast-path per-EQ handle array\n"); 6807 rc = -ENOMEM; 6808 goto out_free_fcf_rr_bmask; 6809 } 6810 6811 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6812 sizeof(struct lpfc_vector_map_info), 6813 GFP_KERNEL); 6814 if (!phba->sli4_hba.cpu_map) { 6815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6816 "3327 Failed allocate memory for msi-x " 6817 "interrupt vector mapping\n"); 6818 rc = -ENOMEM; 6819 goto out_free_hba_eq_hdl; 6820 } 6821 6822 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6823 if (!phba->sli4_hba.eq_info) { 6824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6825 "3321 Failed allocation for per_cpu stats\n"); 6826 rc = -ENOMEM; 6827 goto out_free_hba_cpu_map; 6828 } 6829 /* 6830 * Enable sr-iov virtual functions if supported and configured 6831 * through the module parameter. 6832 */ 6833 if (phba->cfg_sriov_nr_virtfn > 0) { 6834 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6835 phba->cfg_sriov_nr_virtfn); 6836 if (rc) { 6837 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6838 "3020 Requested number of SR-IOV " 6839 "virtual functions (%d) is not " 6840 "supported\n", 6841 phba->cfg_sriov_nr_virtfn); 6842 phba->cfg_sriov_nr_virtfn = 0; 6843 } 6844 } 6845 6846 return 0; 6847 6848 out_free_hba_cpu_map: 6849 kfree(phba->sli4_hba.cpu_map); 6850 out_free_hba_eq_hdl: 6851 kfree(phba->sli4_hba.hba_eq_hdl); 6852 out_free_fcf_rr_bmask: 6853 kfree(phba->fcf.fcf_rr_bmask); 6854 out_remove_rpi_hdrs: 6855 lpfc_sli4_remove_rpi_hdrs(phba); 6856 out_free_active_sgl: 6857 lpfc_free_active_sgl(phba); 6858 out_destroy_cq_event_pool: 6859 lpfc_sli4_cq_event_pool_destroy(phba); 6860 out_free_bsmbx: 6861 lpfc_destroy_bootstrap_mbox(phba); 6862 out_free_mem: 6863 lpfc_mem_free(phba); 6864 return rc; 6865 } 6866 6867 /** 6868 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6869 * @phba: pointer to lpfc hba data structure. 6870 * 6871 * This routine is invoked to unset the driver internal resources set up 6872 * specific for supporting the SLI-4 HBA device it attached to. 6873 **/ 6874 static void 6875 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6876 { 6877 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6878 6879 free_percpu(phba->sli4_hba.eq_info); 6880 6881 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6882 kfree(phba->sli4_hba.cpu_map); 6883 phba->sli4_hba.num_possible_cpu = 0; 6884 phba->sli4_hba.num_present_cpu = 0; 6885 phba->sli4_hba.curr_disp_cpu = 0; 6886 6887 /* Free memory allocated for fast-path work queue handles */ 6888 kfree(phba->sli4_hba.hba_eq_hdl); 6889 6890 /* Free the allocated rpi headers. */ 6891 lpfc_sli4_remove_rpi_hdrs(phba); 6892 lpfc_sli4_remove_rpis(phba); 6893 6894 /* Free eligible FCF index bmask */ 6895 kfree(phba->fcf.fcf_rr_bmask); 6896 6897 /* Free the ELS sgl list */ 6898 lpfc_free_active_sgl(phba); 6899 lpfc_free_els_sgl_list(phba); 6900 lpfc_free_nvmet_sgl_list(phba); 6901 6902 /* Free the completion queue EQ event pool */ 6903 lpfc_sli4_cq_event_release_all(phba); 6904 lpfc_sli4_cq_event_pool_destroy(phba); 6905 6906 /* Release resource identifiers. */ 6907 lpfc_sli4_dealloc_resource_identifiers(phba); 6908 6909 /* Free the bsmbx region. */ 6910 lpfc_destroy_bootstrap_mbox(phba); 6911 6912 /* Free the SLI Layer memory with SLI4 HBAs */ 6913 lpfc_mem_free_all(phba); 6914 6915 /* Free the current connect table */ 6916 list_for_each_entry_safe(conn_entry, next_conn_entry, 6917 &phba->fcf_conn_rec_list, list) { 6918 list_del_init(&conn_entry->list); 6919 kfree(conn_entry); 6920 } 6921 6922 return; 6923 } 6924 6925 /** 6926 * lpfc_init_api_table_setup - Set up init api function jump table 6927 * @phba: The hba struct for which this call is being executed. 6928 * @dev_grp: The HBA PCI-Device group number. 6929 * 6930 * This routine sets up the device INIT interface API function jump table 6931 * in @phba struct. 6932 * 6933 * Returns: 0 - success, -ENODEV - failure. 6934 **/ 6935 int 6936 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6937 { 6938 phba->lpfc_hba_init_link = lpfc_hba_init_link; 6939 phba->lpfc_hba_down_link = lpfc_hba_down_link; 6940 phba->lpfc_selective_reset = lpfc_selective_reset; 6941 switch (dev_grp) { 6942 case LPFC_PCI_DEV_LP: 6943 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 6944 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 6945 phba->lpfc_stop_port = lpfc_stop_port_s3; 6946 break; 6947 case LPFC_PCI_DEV_OC: 6948 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 6949 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 6950 phba->lpfc_stop_port = lpfc_stop_port_s4; 6951 break; 6952 default: 6953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6954 "1431 Invalid HBA PCI-device group: 0x%x\n", 6955 dev_grp); 6956 return -ENODEV; 6957 break; 6958 } 6959 return 0; 6960 } 6961 6962 /** 6963 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 6964 * @phba: pointer to lpfc hba data structure. 6965 * 6966 * This routine is invoked to set up the driver internal resources after the 6967 * device specific resource setup to support the HBA device it attached to. 6968 * 6969 * Return codes 6970 * 0 - successful 6971 * other values - error 6972 **/ 6973 static int 6974 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 6975 { 6976 int error; 6977 6978 /* Startup the kernel thread for this host adapter. */ 6979 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6980 "lpfc_worker_%d", phba->brd_no); 6981 if (IS_ERR(phba->worker_thread)) { 6982 error = PTR_ERR(phba->worker_thread); 6983 return error; 6984 } 6985 6986 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */ 6987 if (phba->sli_rev == LPFC_SLI_REV4) 6988 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6989 else 6990 phba->wq = NULL; 6991 6992 return 0; 6993 } 6994 6995 /** 6996 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 6997 * @phba: pointer to lpfc hba data structure. 6998 * 6999 * This routine is invoked to unset the driver internal resources set up after 7000 * the device specific resource setup for supporting the HBA device it 7001 * attached to. 7002 **/ 7003 static void 7004 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7005 { 7006 if (phba->wq) { 7007 flush_workqueue(phba->wq); 7008 destroy_workqueue(phba->wq); 7009 phba->wq = NULL; 7010 } 7011 7012 /* Stop kernel worker thread */ 7013 if (phba->worker_thread) 7014 kthread_stop(phba->worker_thread); 7015 } 7016 7017 /** 7018 * lpfc_free_iocb_list - Free iocb list. 7019 * @phba: pointer to lpfc hba data structure. 7020 * 7021 * This routine is invoked to free the driver's IOCB list and memory. 7022 **/ 7023 void 7024 lpfc_free_iocb_list(struct lpfc_hba *phba) 7025 { 7026 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7027 7028 spin_lock_irq(&phba->hbalock); 7029 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7030 &phba->lpfc_iocb_list, list) { 7031 list_del(&iocbq_entry->list); 7032 kfree(iocbq_entry); 7033 phba->total_iocbq_bufs--; 7034 } 7035 spin_unlock_irq(&phba->hbalock); 7036 7037 return; 7038 } 7039 7040 /** 7041 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7042 * @phba: pointer to lpfc hba data structure. 7043 * 7044 * This routine is invoked to allocate and initizlize the driver's IOCB 7045 * list and set up the IOCB tag array accordingly. 7046 * 7047 * Return codes 7048 * 0 - successful 7049 * other values - error 7050 **/ 7051 int 7052 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7053 { 7054 struct lpfc_iocbq *iocbq_entry = NULL; 7055 uint16_t iotag; 7056 int i; 7057 7058 /* Initialize and populate the iocb list per host. */ 7059 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7060 for (i = 0; i < iocb_count; i++) { 7061 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7062 if (iocbq_entry == NULL) { 7063 printk(KERN_ERR "%s: only allocated %d iocbs of " 7064 "expected %d count. Unloading driver.\n", 7065 __func__, i, LPFC_IOCB_LIST_CNT); 7066 goto out_free_iocbq; 7067 } 7068 7069 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7070 if (iotag == 0) { 7071 kfree(iocbq_entry); 7072 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7073 "Unloading driver.\n", __func__); 7074 goto out_free_iocbq; 7075 } 7076 iocbq_entry->sli4_lxritag = NO_XRI; 7077 iocbq_entry->sli4_xritag = NO_XRI; 7078 7079 spin_lock_irq(&phba->hbalock); 7080 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7081 phba->total_iocbq_bufs++; 7082 spin_unlock_irq(&phba->hbalock); 7083 } 7084 7085 return 0; 7086 7087 out_free_iocbq: 7088 lpfc_free_iocb_list(phba); 7089 7090 return -ENOMEM; 7091 } 7092 7093 /** 7094 * lpfc_free_sgl_list - Free a given sgl list. 7095 * @phba: pointer to lpfc hba data structure. 7096 * @sglq_list: pointer to the head of sgl list. 7097 * 7098 * This routine is invoked to free a give sgl list and memory. 7099 **/ 7100 void 7101 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7102 { 7103 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7104 7105 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7106 list_del(&sglq_entry->list); 7107 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7108 kfree(sglq_entry); 7109 } 7110 } 7111 7112 /** 7113 * lpfc_free_els_sgl_list - Free els sgl list. 7114 * @phba: pointer to lpfc hba data structure. 7115 * 7116 * This routine is invoked to free the driver's els sgl list and memory. 7117 **/ 7118 static void 7119 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7120 { 7121 LIST_HEAD(sglq_list); 7122 7123 /* Retrieve all els sgls from driver list */ 7124 spin_lock_irq(&phba->hbalock); 7125 spin_lock(&phba->sli4_hba.sgl_list_lock); 7126 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7127 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7128 spin_unlock_irq(&phba->hbalock); 7129 7130 /* Now free the sgl list */ 7131 lpfc_free_sgl_list(phba, &sglq_list); 7132 } 7133 7134 /** 7135 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7136 * @phba: pointer to lpfc hba data structure. 7137 * 7138 * This routine is invoked to free the driver's nvmet sgl list and memory. 7139 **/ 7140 static void 7141 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7142 { 7143 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7144 LIST_HEAD(sglq_list); 7145 7146 /* Retrieve all nvmet sgls from driver list */ 7147 spin_lock_irq(&phba->hbalock); 7148 spin_lock(&phba->sli4_hba.sgl_list_lock); 7149 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7150 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7151 spin_unlock_irq(&phba->hbalock); 7152 7153 /* Now free the sgl list */ 7154 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7155 list_del(&sglq_entry->list); 7156 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7157 kfree(sglq_entry); 7158 } 7159 7160 /* Update the nvmet_xri_cnt to reflect no current sgls. 7161 * The next initialization cycle sets the count and allocates 7162 * the sgls over again. 7163 */ 7164 phba->sli4_hba.nvmet_xri_cnt = 0; 7165 } 7166 7167 /** 7168 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7169 * @phba: pointer to lpfc hba data structure. 7170 * 7171 * This routine is invoked to allocate the driver's active sgl memory. 7172 * This array will hold the sglq_entry's for active IOs. 7173 **/ 7174 static int 7175 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7176 { 7177 int size; 7178 size = sizeof(struct lpfc_sglq *); 7179 size *= phba->sli4_hba.max_cfg_param.max_xri; 7180 7181 phba->sli4_hba.lpfc_sglq_active_list = 7182 kzalloc(size, GFP_KERNEL); 7183 if (!phba->sli4_hba.lpfc_sglq_active_list) 7184 return -ENOMEM; 7185 return 0; 7186 } 7187 7188 /** 7189 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7190 * @phba: pointer to lpfc hba data structure. 7191 * 7192 * This routine is invoked to walk through the array of active sglq entries 7193 * and free all of the resources. 7194 * This is just a place holder for now. 7195 **/ 7196 static void 7197 lpfc_free_active_sgl(struct lpfc_hba *phba) 7198 { 7199 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7200 } 7201 7202 /** 7203 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7204 * @phba: pointer to lpfc hba data structure. 7205 * 7206 * This routine is invoked to allocate and initizlize the driver's sgl 7207 * list and set up the sgl xritag tag array accordingly. 7208 * 7209 **/ 7210 static void 7211 lpfc_init_sgl_list(struct lpfc_hba *phba) 7212 { 7213 /* Initialize and populate the sglq list per host/VF. */ 7214 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7215 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7216 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7217 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7218 7219 /* els xri-sgl book keeping */ 7220 phba->sli4_hba.els_xri_cnt = 0; 7221 7222 /* nvme xri-buffer book keeping */ 7223 phba->sli4_hba.io_xri_cnt = 0; 7224 } 7225 7226 /** 7227 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7228 * @phba: pointer to lpfc hba data structure. 7229 * 7230 * This routine is invoked to post rpi header templates to the 7231 * port for those SLI4 ports that do not support extents. This routine 7232 * posts a PAGE_SIZE memory region to the port to hold up to 7233 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7234 * and should be called only when interrupts are disabled. 7235 * 7236 * Return codes 7237 * 0 - successful 7238 * -ERROR - otherwise. 7239 **/ 7240 int 7241 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7242 { 7243 int rc = 0; 7244 struct lpfc_rpi_hdr *rpi_hdr; 7245 7246 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7247 if (!phba->sli4_hba.rpi_hdrs_in_use) 7248 return rc; 7249 if (phba->sli4_hba.extents_in_use) 7250 return -EIO; 7251 7252 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7253 if (!rpi_hdr) { 7254 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7255 "0391 Error during rpi post operation\n"); 7256 lpfc_sli4_remove_rpis(phba); 7257 rc = -ENODEV; 7258 } 7259 7260 return rc; 7261 } 7262 7263 /** 7264 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7265 * @phba: pointer to lpfc hba data structure. 7266 * 7267 * This routine is invoked to allocate a single 4KB memory region to 7268 * support rpis and stores them in the phba. This single region 7269 * provides support for up to 64 rpis. The region is used globally 7270 * by the device. 7271 * 7272 * Returns: 7273 * A valid rpi hdr on success. 7274 * A NULL pointer on any failure. 7275 **/ 7276 struct lpfc_rpi_hdr * 7277 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7278 { 7279 uint16_t rpi_limit, curr_rpi_range; 7280 struct lpfc_dmabuf *dmabuf; 7281 struct lpfc_rpi_hdr *rpi_hdr; 7282 7283 /* 7284 * If the SLI4 port supports extents, posting the rpi header isn't 7285 * required. Set the expected maximum count and let the actual value 7286 * get set when extents are fully allocated. 7287 */ 7288 if (!phba->sli4_hba.rpi_hdrs_in_use) 7289 return NULL; 7290 if (phba->sli4_hba.extents_in_use) 7291 return NULL; 7292 7293 /* The limit on the logical index is just the max_rpi count. */ 7294 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7295 7296 spin_lock_irq(&phba->hbalock); 7297 /* 7298 * Establish the starting RPI in this header block. The starting 7299 * rpi is normalized to a zero base because the physical rpi is 7300 * port based. 7301 */ 7302 curr_rpi_range = phba->sli4_hba.next_rpi; 7303 spin_unlock_irq(&phba->hbalock); 7304 7305 /* Reached full RPI range */ 7306 if (curr_rpi_range == rpi_limit) 7307 return NULL; 7308 7309 /* 7310 * First allocate the protocol header region for the port. The 7311 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7312 */ 7313 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7314 if (!dmabuf) 7315 return NULL; 7316 7317 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7318 LPFC_HDR_TEMPLATE_SIZE, 7319 &dmabuf->phys, GFP_KERNEL); 7320 if (!dmabuf->virt) { 7321 rpi_hdr = NULL; 7322 goto err_free_dmabuf; 7323 } 7324 7325 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7326 rpi_hdr = NULL; 7327 goto err_free_coherent; 7328 } 7329 7330 /* Save the rpi header data for cleanup later. */ 7331 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7332 if (!rpi_hdr) 7333 goto err_free_coherent; 7334 7335 rpi_hdr->dmabuf = dmabuf; 7336 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7337 rpi_hdr->page_count = 1; 7338 spin_lock_irq(&phba->hbalock); 7339 7340 /* The rpi_hdr stores the logical index only. */ 7341 rpi_hdr->start_rpi = curr_rpi_range; 7342 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7343 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7344 7345 spin_unlock_irq(&phba->hbalock); 7346 return rpi_hdr; 7347 7348 err_free_coherent: 7349 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7350 dmabuf->virt, dmabuf->phys); 7351 err_free_dmabuf: 7352 kfree(dmabuf); 7353 return NULL; 7354 } 7355 7356 /** 7357 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7358 * @phba: pointer to lpfc hba data structure. 7359 * 7360 * This routine is invoked to remove all memory resources allocated 7361 * to support rpis for SLI4 ports not supporting extents. This routine 7362 * presumes the caller has released all rpis consumed by fabric or port 7363 * logins and is prepared to have the header pages removed. 7364 **/ 7365 void 7366 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7367 { 7368 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7369 7370 if (!phba->sli4_hba.rpi_hdrs_in_use) 7371 goto exit; 7372 7373 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7374 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7375 list_del(&rpi_hdr->list); 7376 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7377 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7378 kfree(rpi_hdr->dmabuf); 7379 kfree(rpi_hdr); 7380 } 7381 exit: 7382 /* There are no rpis available to the port now. */ 7383 phba->sli4_hba.next_rpi = 0; 7384 } 7385 7386 /** 7387 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7388 * @pdev: pointer to pci device data structure. 7389 * 7390 * This routine is invoked to allocate the driver hba data structure for an 7391 * HBA device. If the allocation is successful, the phba reference to the 7392 * PCI device data structure is set. 7393 * 7394 * Return codes 7395 * pointer to @phba - successful 7396 * NULL - error 7397 **/ 7398 static struct lpfc_hba * 7399 lpfc_hba_alloc(struct pci_dev *pdev) 7400 { 7401 struct lpfc_hba *phba; 7402 7403 /* Allocate memory for HBA structure */ 7404 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7405 if (!phba) { 7406 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7407 return NULL; 7408 } 7409 7410 /* Set reference to PCI device in HBA structure */ 7411 phba->pcidev = pdev; 7412 7413 /* Assign an unused board number */ 7414 phba->brd_no = lpfc_get_instance(); 7415 if (phba->brd_no < 0) { 7416 kfree(phba); 7417 return NULL; 7418 } 7419 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7420 7421 spin_lock_init(&phba->ct_ev_lock); 7422 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7423 7424 return phba; 7425 } 7426 7427 /** 7428 * lpfc_hba_free - Free driver hba data structure with a device. 7429 * @phba: pointer to lpfc hba data structure. 7430 * 7431 * This routine is invoked to free the driver hba data structure with an 7432 * HBA device. 7433 **/ 7434 static void 7435 lpfc_hba_free(struct lpfc_hba *phba) 7436 { 7437 if (phba->sli_rev == LPFC_SLI_REV4) 7438 kfree(phba->sli4_hba.hdwq); 7439 7440 /* Release the driver assigned board number */ 7441 idr_remove(&lpfc_hba_index, phba->brd_no); 7442 7443 /* Free memory allocated with sli3 rings */ 7444 kfree(phba->sli.sli3_ring); 7445 phba->sli.sli3_ring = NULL; 7446 7447 kfree(phba); 7448 return; 7449 } 7450 7451 /** 7452 * lpfc_create_shost - Create hba physical port with associated scsi host. 7453 * @phba: pointer to lpfc hba data structure. 7454 * 7455 * This routine is invoked to create HBA physical port and associate a SCSI 7456 * host with it. 7457 * 7458 * Return codes 7459 * 0 - successful 7460 * other values - error 7461 **/ 7462 static int 7463 lpfc_create_shost(struct lpfc_hba *phba) 7464 { 7465 struct lpfc_vport *vport; 7466 struct Scsi_Host *shost; 7467 7468 /* Initialize HBA FC structure */ 7469 phba->fc_edtov = FF_DEF_EDTOV; 7470 phba->fc_ratov = FF_DEF_RATOV; 7471 phba->fc_altov = FF_DEF_ALTOV; 7472 phba->fc_arbtov = FF_DEF_ARBTOV; 7473 7474 atomic_set(&phba->sdev_cnt, 0); 7475 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7476 if (!vport) 7477 return -ENODEV; 7478 7479 shost = lpfc_shost_from_vport(vport); 7480 phba->pport = vport; 7481 7482 if (phba->nvmet_support) { 7483 /* Only 1 vport (pport) will support NVME target */ 7484 if (phba->txrdy_payload_pool == NULL) { 7485 phba->txrdy_payload_pool = dma_pool_create( 7486 "txrdy_pool", &phba->pcidev->dev, 7487 TXRDY_PAYLOAD_LEN, 16, 0); 7488 if (phba->txrdy_payload_pool) { 7489 phba->targetport = NULL; 7490 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7491 lpfc_printf_log(phba, KERN_INFO, 7492 LOG_INIT | LOG_NVME_DISC, 7493 "6076 NVME Target Found\n"); 7494 } 7495 } 7496 } 7497 7498 lpfc_debugfs_initialize(vport); 7499 /* Put reference to SCSI host to driver's device private data */ 7500 pci_set_drvdata(phba->pcidev, shost); 7501 7502 /* 7503 * At this point we are fully registered with PSA. In addition, 7504 * any initial discovery should be completed. 7505 */ 7506 vport->load_flag |= FC_ALLOW_FDMI; 7507 if (phba->cfg_enable_SmartSAN || 7508 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7509 7510 /* Setup appropriate attribute masks */ 7511 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7512 if (phba->cfg_enable_SmartSAN) 7513 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7514 else 7515 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7516 } 7517 return 0; 7518 } 7519 7520 /** 7521 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7522 * @phba: pointer to lpfc hba data structure. 7523 * 7524 * This routine is invoked to destroy HBA physical port and the associated 7525 * SCSI host. 7526 **/ 7527 static void 7528 lpfc_destroy_shost(struct lpfc_hba *phba) 7529 { 7530 struct lpfc_vport *vport = phba->pport; 7531 7532 /* Destroy physical port that associated with the SCSI host */ 7533 destroy_port(vport); 7534 7535 return; 7536 } 7537 7538 /** 7539 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7540 * @phba: pointer to lpfc hba data structure. 7541 * @shost: the shost to be used to detect Block guard settings. 7542 * 7543 * This routine sets up the local Block guard protocol settings for @shost. 7544 * This routine also allocates memory for debugging bg buffers. 7545 **/ 7546 static void 7547 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7548 { 7549 uint32_t old_mask; 7550 uint32_t old_guard; 7551 7552 int pagecnt = 10; 7553 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7554 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7555 "1478 Registering BlockGuard with the " 7556 "SCSI layer\n"); 7557 7558 old_mask = phba->cfg_prot_mask; 7559 old_guard = phba->cfg_prot_guard; 7560 7561 /* Only allow supported values */ 7562 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7563 SHOST_DIX_TYPE0_PROTECTION | 7564 SHOST_DIX_TYPE1_PROTECTION); 7565 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7566 SHOST_DIX_GUARD_CRC); 7567 7568 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7569 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7570 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7571 7572 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7573 if ((old_mask != phba->cfg_prot_mask) || 7574 (old_guard != phba->cfg_prot_guard)) 7575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7576 "1475 Registering BlockGuard with the " 7577 "SCSI layer: mask %d guard %d\n", 7578 phba->cfg_prot_mask, 7579 phba->cfg_prot_guard); 7580 7581 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7582 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7583 } else 7584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7585 "1479 Not Registering BlockGuard with the SCSI " 7586 "layer, Bad protection parameters: %d %d\n", 7587 old_mask, old_guard); 7588 } 7589 7590 if (!_dump_buf_data) { 7591 while (pagecnt) { 7592 spin_lock_init(&_dump_buf_lock); 7593 _dump_buf_data = 7594 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7595 if (_dump_buf_data) { 7596 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7597 "9043 BLKGRD: allocated %d pages for " 7598 "_dump_buf_data at 0x%p\n", 7599 (1 << pagecnt), _dump_buf_data); 7600 _dump_buf_data_order = pagecnt; 7601 memset(_dump_buf_data, 0, 7602 ((1 << PAGE_SHIFT) << pagecnt)); 7603 break; 7604 } else 7605 --pagecnt; 7606 } 7607 if (!_dump_buf_data_order) 7608 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7609 "9044 BLKGRD: ERROR unable to allocate " 7610 "memory for hexdump\n"); 7611 } else 7612 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7613 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 7614 "\n", _dump_buf_data); 7615 if (!_dump_buf_dif) { 7616 while (pagecnt) { 7617 _dump_buf_dif = 7618 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7619 if (_dump_buf_dif) { 7620 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7621 "9046 BLKGRD: allocated %d pages for " 7622 "_dump_buf_dif at 0x%p\n", 7623 (1 << pagecnt), _dump_buf_dif); 7624 _dump_buf_dif_order = pagecnt; 7625 memset(_dump_buf_dif, 0, 7626 ((1 << PAGE_SHIFT) << pagecnt)); 7627 break; 7628 } else 7629 --pagecnt; 7630 } 7631 if (!_dump_buf_dif_order) 7632 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7633 "9047 BLKGRD: ERROR unable to allocate " 7634 "memory for hexdump\n"); 7635 } else 7636 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7637 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 7638 _dump_buf_dif); 7639 } 7640 7641 /** 7642 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7643 * @phba: pointer to lpfc hba data structure. 7644 * 7645 * This routine is invoked to perform all the necessary post initialization 7646 * setup for the device. 7647 **/ 7648 static void 7649 lpfc_post_init_setup(struct lpfc_hba *phba) 7650 { 7651 struct Scsi_Host *shost; 7652 struct lpfc_adapter_event_header adapter_event; 7653 7654 /* Get the default values for Model Name and Description */ 7655 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7656 7657 /* 7658 * hba setup may have changed the hba_queue_depth so we need to 7659 * adjust the value of can_queue. 7660 */ 7661 shost = pci_get_drvdata(phba->pcidev); 7662 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7663 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 7664 lpfc_setup_bg(phba, shost); 7665 7666 lpfc_host_attrib_init(shost); 7667 7668 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7669 spin_lock_irq(shost->host_lock); 7670 lpfc_poll_start_timer(phba); 7671 spin_unlock_irq(shost->host_lock); 7672 } 7673 7674 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7675 "0428 Perform SCSI scan\n"); 7676 /* Send board arrival event to upper layer */ 7677 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7678 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7679 fc_host_post_vendor_event(shost, fc_get_event_number(), 7680 sizeof(adapter_event), 7681 (char *) &adapter_event, 7682 LPFC_NL_VENDOR_ID); 7683 return; 7684 } 7685 7686 /** 7687 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7688 * @phba: pointer to lpfc hba data structure. 7689 * 7690 * This routine is invoked to set up the PCI device memory space for device 7691 * with SLI-3 interface spec. 7692 * 7693 * Return codes 7694 * 0 - successful 7695 * other values - error 7696 **/ 7697 static int 7698 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7699 { 7700 struct pci_dev *pdev = phba->pcidev; 7701 unsigned long bar0map_len, bar2map_len; 7702 int i, hbq_count; 7703 void *ptr; 7704 int error; 7705 7706 if (!pdev) 7707 return -ENODEV; 7708 7709 /* Set the device DMA mask size */ 7710 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7711 if (error) 7712 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7713 if (error) 7714 return error; 7715 error = -ENODEV; 7716 7717 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7718 * required by each mapping. 7719 */ 7720 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7721 bar0map_len = pci_resource_len(pdev, 0); 7722 7723 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7724 bar2map_len = pci_resource_len(pdev, 2); 7725 7726 /* Map HBA SLIM to a kernel virtual address. */ 7727 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7728 if (!phba->slim_memmap_p) { 7729 dev_printk(KERN_ERR, &pdev->dev, 7730 "ioremap failed for SLIM memory.\n"); 7731 goto out; 7732 } 7733 7734 /* Map HBA Control Registers to a kernel virtual address. */ 7735 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7736 if (!phba->ctrl_regs_memmap_p) { 7737 dev_printk(KERN_ERR, &pdev->dev, 7738 "ioremap failed for HBA control registers.\n"); 7739 goto out_iounmap_slim; 7740 } 7741 7742 /* Allocate memory for SLI-2 structures */ 7743 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7744 &phba->slim2p.phys, GFP_KERNEL); 7745 if (!phba->slim2p.virt) 7746 goto out_iounmap; 7747 7748 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7749 phba->mbox_ext = (phba->slim2p.virt + 7750 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7751 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7752 phba->IOCBs = (phba->slim2p.virt + 7753 offsetof(struct lpfc_sli2_slim, IOCBs)); 7754 7755 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7756 lpfc_sli_hbq_size(), 7757 &phba->hbqslimp.phys, 7758 GFP_KERNEL); 7759 if (!phba->hbqslimp.virt) 7760 goto out_free_slim; 7761 7762 hbq_count = lpfc_sli_hbq_count(); 7763 ptr = phba->hbqslimp.virt; 7764 for (i = 0; i < hbq_count; ++i) { 7765 phba->hbqs[i].hbq_virt = ptr; 7766 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7767 ptr += (lpfc_hbq_defs[i]->entry_count * 7768 sizeof(struct lpfc_hbq_entry)); 7769 } 7770 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7771 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7772 7773 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7774 7775 phba->MBslimaddr = phba->slim_memmap_p; 7776 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7777 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7778 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7779 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7780 7781 return 0; 7782 7783 out_free_slim: 7784 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7785 phba->slim2p.virt, phba->slim2p.phys); 7786 out_iounmap: 7787 iounmap(phba->ctrl_regs_memmap_p); 7788 out_iounmap_slim: 7789 iounmap(phba->slim_memmap_p); 7790 out: 7791 return error; 7792 } 7793 7794 /** 7795 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7796 * @phba: pointer to lpfc hba data structure. 7797 * 7798 * This routine is invoked to unset the PCI device memory space for device 7799 * with SLI-3 interface spec. 7800 **/ 7801 static void 7802 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7803 { 7804 struct pci_dev *pdev; 7805 7806 /* Obtain PCI device reference */ 7807 if (!phba->pcidev) 7808 return; 7809 else 7810 pdev = phba->pcidev; 7811 7812 /* Free coherent DMA memory allocated */ 7813 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7814 phba->hbqslimp.virt, phba->hbqslimp.phys); 7815 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7816 phba->slim2p.virt, phba->slim2p.phys); 7817 7818 /* I/O memory unmap */ 7819 iounmap(phba->ctrl_regs_memmap_p); 7820 iounmap(phba->slim_memmap_p); 7821 7822 return; 7823 } 7824 7825 /** 7826 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7827 * @phba: pointer to lpfc hba data structure. 7828 * 7829 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7830 * done and check status. 7831 * 7832 * Return 0 if successful, otherwise -ENODEV. 7833 **/ 7834 int 7835 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7836 { 7837 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7838 struct lpfc_register reg_data; 7839 int i, port_error = 0; 7840 uint32_t if_type; 7841 7842 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7843 memset(®_data, 0, sizeof(reg_data)); 7844 if (!phba->sli4_hba.PSMPHRregaddr) 7845 return -ENODEV; 7846 7847 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7848 for (i = 0; i < 3000; i++) { 7849 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7850 &portsmphr_reg.word0) || 7851 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7852 /* Port has a fatal POST error, break out */ 7853 port_error = -ENODEV; 7854 break; 7855 } 7856 if (LPFC_POST_STAGE_PORT_READY == 7857 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7858 break; 7859 msleep(10); 7860 } 7861 7862 /* 7863 * If there was a port error during POST, then don't proceed with 7864 * other register reads as the data may not be valid. Just exit. 7865 */ 7866 if (port_error) { 7867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7868 "1408 Port Failed POST - portsmphr=0x%x, " 7869 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7870 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7871 portsmphr_reg.word0, 7872 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7873 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7874 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7875 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7876 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7877 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7878 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7879 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7880 } else { 7881 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7882 "2534 Device Info: SLIFamily=0x%x, " 7883 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7884 "SLIHint_2=0x%x, FT=0x%x\n", 7885 bf_get(lpfc_sli_intf_sli_family, 7886 &phba->sli4_hba.sli_intf), 7887 bf_get(lpfc_sli_intf_slirev, 7888 &phba->sli4_hba.sli_intf), 7889 bf_get(lpfc_sli_intf_if_type, 7890 &phba->sli4_hba.sli_intf), 7891 bf_get(lpfc_sli_intf_sli_hint1, 7892 &phba->sli4_hba.sli_intf), 7893 bf_get(lpfc_sli_intf_sli_hint2, 7894 &phba->sli4_hba.sli_intf), 7895 bf_get(lpfc_sli_intf_func_type, 7896 &phba->sli4_hba.sli_intf)); 7897 /* 7898 * Check for other Port errors during the initialization 7899 * process. Fail the load if the port did not come up 7900 * correctly. 7901 */ 7902 if_type = bf_get(lpfc_sli_intf_if_type, 7903 &phba->sli4_hba.sli_intf); 7904 switch (if_type) { 7905 case LPFC_SLI_INTF_IF_TYPE_0: 7906 phba->sli4_hba.ue_mask_lo = 7907 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7908 phba->sli4_hba.ue_mask_hi = 7909 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7910 uerrlo_reg.word0 = 7911 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7912 uerrhi_reg.word0 = 7913 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7914 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7915 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7917 "1422 Unrecoverable Error " 7918 "Detected during POST " 7919 "uerr_lo_reg=0x%x, " 7920 "uerr_hi_reg=0x%x, " 7921 "ue_mask_lo_reg=0x%x, " 7922 "ue_mask_hi_reg=0x%x\n", 7923 uerrlo_reg.word0, 7924 uerrhi_reg.word0, 7925 phba->sli4_hba.ue_mask_lo, 7926 phba->sli4_hba.ue_mask_hi); 7927 port_error = -ENODEV; 7928 } 7929 break; 7930 case LPFC_SLI_INTF_IF_TYPE_2: 7931 case LPFC_SLI_INTF_IF_TYPE_6: 7932 /* Final checks. The port status should be clean. */ 7933 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7934 ®_data.word0) || 7935 (bf_get(lpfc_sliport_status_err, ®_data) && 7936 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7937 phba->work_status[0] = 7938 readl(phba->sli4_hba.u.if_type2. 7939 ERR1regaddr); 7940 phba->work_status[1] = 7941 readl(phba->sli4_hba.u.if_type2. 7942 ERR2regaddr); 7943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7944 "2888 Unrecoverable port error " 7945 "following POST: port status reg " 7946 "0x%x, port_smphr reg 0x%x, " 7947 "error 1=0x%x, error 2=0x%x\n", 7948 reg_data.word0, 7949 portsmphr_reg.word0, 7950 phba->work_status[0], 7951 phba->work_status[1]); 7952 port_error = -ENODEV; 7953 } 7954 break; 7955 case LPFC_SLI_INTF_IF_TYPE_1: 7956 default: 7957 break; 7958 } 7959 } 7960 return port_error; 7961 } 7962 7963 /** 7964 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7965 * @phba: pointer to lpfc hba data structure. 7966 * @if_type: The SLI4 interface type getting configured. 7967 * 7968 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7969 * memory map. 7970 **/ 7971 static void 7972 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7973 { 7974 switch (if_type) { 7975 case LPFC_SLI_INTF_IF_TYPE_0: 7976 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7977 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7978 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7979 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7980 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7981 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7982 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7983 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7984 phba->sli4_hba.SLIINTFregaddr = 7985 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7986 break; 7987 case LPFC_SLI_INTF_IF_TYPE_2: 7988 phba->sli4_hba.u.if_type2.EQDregaddr = 7989 phba->sli4_hba.conf_regs_memmap_p + 7990 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 7991 phba->sli4_hba.u.if_type2.ERR1regaddr = 7992 phba->sli4_hba.conf_regs_memmap_p + 7993 LPFC_CTL_PORT_ER1_OFFSET; 7994 phba->sli4_hba.u.if_type2.ERR2regaddr = 7995 phba->sli4_hba.conf_regs_memmap_p + 7996 LPFC_CTL_PORT_ER2_OFFSET; 7997 phba->sli4_hba.u.if_type2.CTRLregaddr = 7998 phba->sli4_hba.conf_regs_memmap_p + 7999 LPFC_CTL_PORT_CTL_OFFSET; 8000 phba->sli4_hba.u.if_type2.STATUSregaddr = 8001 phba->sli4_hba.conf_regs_memmap_p + 8002 LPFC_CTL_PORT_STA_OFFSET; 8003 phba->sli4_hba.SLIINTFregaddr = 8004 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8005 phba->sli4_hba.PSMPHRregaddr = 8006 phba->sli4_hba.conf_regs_memmap_p + 8007 LPFC_CTL_PORT_SEM_OFFSET; 8008 phba->sli4_hba.RQDBregaddr = 8009 phba->sli4_hba.conf_regs_memmap_p + 8010 LPFC_ULP0_RQ_DOORBELL; 8011 phba->sli4_hba.WQDBregaddr = 8012 phba->sli4_hba.conf_regs_memmap_p + 8013 LPFC_ULP0_WQ_DOORBELL; 8014 phba->sli4_hba.CQDBregaddr = 8015 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8016 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8017 phba->sli4_hba.MQDBregaddr = 8018 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8019 phba->sli4_hba.BMBXregaddr = 8020 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8021 break; 8022 case LPFC_SLI_INTF_IF_TYPE_6: 8023 phba->sli4_hba.u.if_type2.EQDregaddr = 8024 phba->sli4_hba.conf_regs_memmap_p + 8025 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8026 phba->sli4_hba.u.if_type2.ERR1regaddr = 8027 phba->sli4_hba.conf_regs_memmap_p + 8028 LPFC_CTL_PORT_ER1_OFFSET; 8029 phba->sli4_hba.u.if_type2.ERR2regaddr = 8030 phba->sli4_hba.conf_regs_memmap_p + 8031 LPFC_CTL_PORT_ER2_OFFSET; 8032 phba->sli4_hba.u.if_type2.CTRLregaddr = 8033 phba->sli4_hba.conf_regs_memmap_p + 8034 LPFC_CTL_PORT_CTL_OFFSET; 8035 phba->sli4_hba.u.if_type2.STATUSregaddr = 8036 phba->sli4_hba.conf_regs_memmap_p + 8037 LPFC_CTL_PORT_STA_OFFSET; 8038 phba->sli4_hba.PSMPHRregaddr = 8039 phba->sli4_hba.conf_regs_memmap_p + 8040 LPFC_CTL_PORT_SEM_OFFSET; 8041 phba->sli4_hba.BMBXregaddr = 8042 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8043 break; 8044 case LPFC_SLI_INTF_IF_TYPE_1: 8045 default: 8046 dev_printk(KERN_ERR, &phba->pcidev->dev, 8047 "FATAL - unsupported SLI4 interface type - %d\n", 8048 if_type); 8049 break; 8050 } 8051 } 8052 8053 /** 8054 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8055 * @phba: pointer to lpfc hba data structure. 8056 * 8057 * This routine is invoked to set up SLI4 BAR1 register memory map. 8058 **/ 8059 static void 8060 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8061 { 8062 switch (if_type) { 8063 case LPFC_SLI_INTF_IF_TYPE_0: 8064 phba->sli4_hba.PSMPHRregaddr = 8065 phba->sli4_hba.ctrl_regs_memmap_p + 8066 LPFC_SLIPORT_IF0_SMPHR; 8067 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8068 LPFC_HST_ISR0; 8069 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8070 LPFC_HST_IMR0; 8071 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8072 LPFC_HST_ISCR0; 8073 break; 8074 case LPFC_SLI_INTF_IF_TYPE_6: 8075 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8076 LPFC_IF6_RQ_DOORBELL; 8077 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8078 LPFC_IF6_WQ_DOORBELL; 8079 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8080 LPFC_IF6_CQ_DOORBELL; 8081 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8082 LPFC_IF6_EQ_DOORBELL; 8083 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8084 LPFC_IF6_MQ_DOORBELL; 8085 break; 8086 case LPFC_SLI_INTF_IF_TYPE_2: 8087 case LPFC_SLI_INTF_IF_TYPE_1: 8088 default: 8089 dev_err(&phba->pcidev->dev, 8090 "FATAL - unsupported SLI4 interface type - %d\n", 8091 if_type); 8092 break; 8093 } 8094 } 8095 8096 /** 8097 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8098 * @phba: pointer to lpfc hba data structure. 8099 * @vf: virtual function number 8100 * 8101 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8102 * based on the given viftual function number, @vf. 8103 * 8104 * Return 0 if successful, otherwise -ENODEV. 8105 **/ 8106 static int 8107 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8108 { 8109 if (vf > LPFC_VIR_FUNC_MAX) 8110 return -ENODEV; 8111 8112 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8113 vf * LPFC_VFR_PAGE_SIZE + 8114 LPFC_ULP0_RQ_DOORBELL); 8115 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8116 vf * LPFC_VFR_PAGE_SIZE + 8117 LPFC_ULP0_WQ_DOORBELL); 8118 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8119 vf * LPFC_VFR_PAGE_SIZE + 8120 LPFC_EQCQ_DOORBELL); 8121 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8122 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8123 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8124 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8125 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8126 return 0; 8127 } 8128 8129 /** 8130 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8131 * @phba: pointer to lpfc hba data structure. 8132 * 8133 * This routine is invoked to create the bootstrap mailbox 8134 * region consistent with the SLI-4 interface spec. This 8135 * routine allocates all memory necessary to communicate 8136 * mailbox commands to the port and sets up all alignment 8137 * needs. No locks are expected to be held when calling 8138 * this routine. 8139 * 8140 * Return codes 8141 * 0 - successful 8142 * -ENOMEM - could not allocated memory. 8143 **/ 8144 static int 8145 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8146 { 8147 uint32_t bmbx_size; 8148 struct lpfc_dmabuf *dmabuf; 8149 struct dma_address *dma_address; 8150 uint32_t pa_addr; 8151 uint64_t phys_addr; 8152 8153 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8154 if (!dmabuf) 8155 return -ENOMEM; 8156 8157 /* 8158 * The bootstrap mailbox region is comprised of 2 parts 8159 * plus an alignment restriction of 16 bytes. 8160 */ 8161 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8162 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8163 &dmabuf->phys, GFP_KERNEL); 8164 if (!dmabuf->virt) { 8165 kfree(dmabuf); 8166 return -ENOMEM; 8167 } 8168 8169 /* 8170 * Initialize the bootstrap mailbox pointers now so that the register 8171 * operations are simple later. The mailbox dma address is required 8172 * to be 16-byte aligned. Also align the virtual memory as each 8173 * maibox is copied into the bmbx mailbox region before issuing the 8174 * command to the port. 8175 */ 8176 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8177 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8178 8179 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8180 LPFC_ALIGN_16_BYTE); 8181 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8182 LPFC_ALIGN_16_BYTE); 8183 8184 /* 8185 * Set the high and low physical addresses now. The SLI4 alignment 8186 * requirement is 16 bytes and the mailbox is posted to the port 8187 * as two 30-bit addresses. The other data is a bit marking whether 8188 * the 30-bit address is the high or low address. 8189 * Upcast bmbx aphys to 64bits so shift instruction compiles 8190 * clean on 32 bit machines. 8191 */ 8192 dma_address = &phba->sli4_hba.bmbx.dma_address; 8193 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8194 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8195 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8196 LPFC_BMBX_BIT1_ADDR_HI); 8197 8198 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8199 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8200 LPFC_BMBX_BIT1_ADDR_LO); 8201 return 0; 8202 } 8203 8204 /** 8205 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8206 * @phba: pointer to lpfc hba data structure. 8207 * 8208 * This routine is invoked to teardown the bootstrap mailbox 8209 * region and release all host resources. This routine requires 8210 * the caller to ensure all mailbox commands recovered, no 8211 * additional mailbox comands are sent, and interrupts are disabled 8212 * before calling this routine. 8213 * 8214 **/ 8215 static void 8216 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8217 { 8218 dma_free_coherent(&phba->pcidev->dev, 8219 phba->sli4_hba.bmbx.bmbx_size, 8220 phba->sli4_hba.bmbx.dmabuf->virt, 8221 phba->sli4_hba.bmbx.dmabuf->phys); 8222 8223 kfree(phba->sli4_hba.bmbx.dmabuf); 8224 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8225 } 8226 8227 /** 8228 * lpfc_sli4_read_config - Get the config parameters. 8229 * @phba: pointer to lpfc hba data structure. 8230 * 8231 * This routine is invoked to read the configuration parameters from the HBA. 8232 * The configuration parameters are used to set the base and maximum values 8233 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8234 * allocation for the port. 8235 * 8236 * Return codes 8237 * 0 - successful 8238 * -ENOMEM - No available memory 8239 * -EIO - The mailbox failed to complete successfully. 8240 **/ 8241 int 8242 lpfc_sli4_read_config(struct lpfc_hba *phba) 8243 { 8244 LPFC_MBOXQ_t *pmb; 8245 struct lpfc_mbx_read_config *rd_config; 8246 union lpfc_sli4_cfg_shdr *shdr; 8247 uint32_t shdr_status, shdr_add_status; 8248 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8249 struct lpfc_rsrc_desc_fcfcoe *desc; 8250 char *pdesc_0; 8251 uint16_t forced_link_speed; 8252 uint32_t if_type, qmin; 8253 int length, i, rc = 0, rc2; 8254 8255 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8256 if (!pmb) { 8257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8258 "2011 Unable to allocate memory for issuing " 8259 "SLI_CONFIG_SPECIAL mailbox command\n"); 8260 return -ENOMEM; 8261 } 8262 8263 lpfc_read_config(phba, pmb); 8264 8265 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8266 if (rc != MBX_SUCCESS) { 8267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8268 "2012 Mailbox failed , mbxCmd x%x " 8269 "READ_CONFIG, mbxStatus x%x\n", 8270 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8271 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8272 rc = -EIO; 8273 } else { 8274 rd_config = &pmb->u.mqe.un.rd_config; 8275 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8276 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8277 phba->sli4_hba.lnk_info.lnk_tp = 8278 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8279 phba->sli4_hba.lnk_info.lnk_no = 8280 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8281 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8282 "3081 lnk_type:%d, lnk_numb:%d\n", 8283 phba->sli4_hba.lnk_info.lnk_tp, 8284 phba->sli4_hba.lnk_info.lnk_no); 8285 } else 8286 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8287 "3082 Mailbox (x%x) returned ldv:x0\n", 8288 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8289 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8290 phba->bbcredit_support = 1; 8291 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8292 } 8293 8294 phba->sli4_hba.conf_trunk = 8295 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8296 phba->sli4_hba.extents_in_use = 8297 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8298 phba->sli4_hba.max_cfg_param.max_xri = 8299 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8300 phba->sli4_hba.max_cfg_param.xri_base = 8301 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8302 phba->sli4_hba.max_cfg_param.max_vpi = 8303 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8304 /* Limit the max we support */ 8305 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8306 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8307 phba->sli4_hba.max_cfg_param.vpi_base = 8308 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8309 phba->sli4_hba.max_cfg_param.max_rpi = 8310 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8311 phba->sli4_hba.max_cfg_param.rpi_base = 8312 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8313 phba->sli4_hba.max_cfg_param.max_vfi = 8314 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8315 phba->sli4_hba.max_cfg_param.vfi_base = 8316 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8317 phba->sli4_hba.max_cfg_param.max_fcfi = 8318 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8319 phba->sli4_hba.max_cfg_param.max_eq = 8320 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8321 phba->sli4_hba.max_cfg_param.max_rq = 8322 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8323 phba->sli4_hba.max_cfg_param.max_wq = 8324 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8325 phba->sli4_hba.max_cfg_param.max_cq = 8326 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8327 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8328 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8329 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8330 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8331 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8332 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8333 phba->max_vports = phba->max_vpi; 8334 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8335 "2003 cfg params Extents? %d " 8336 "XRI(B:%d M:%d), " 8337 "VPI(B:%d M:%d) " 8338 "VFI(B:%d M:%d) " 8339 "RPI(B:%d M:%d) " 8340 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8341 phba->sli4_hba.extents_in_use, 8342 phba->sli4_hba.max_cfg_param.xri_base, 8343 phba->sli4_hba.max_cfg_param.max_xri, 8344 phba->sli4_hba.max_cfg_param.vpi_base, 8345 phba->sli4_hba.max_cfg_param.max_vpi, 8346 phba->sli4_hba.max_cfg_param.vfi_base, 8347 phba->sli4_hba.max_cfg_param.max_vfi, 8348 phba->sli4_hba.max_cfg_param.rpi_base, 8349 phba->sli4_hba.max_cfg_param.max_rpi, 8350 phba->sli4_hba.max_cfg_param.max_fcfi, 8351 phba->sli4_hba.max_cfg_param.max_eq, 8352 phba->sli4_hba.max_cfg_param.max_cq, 8353 phba->sli4_hba.max_cfg_param.max_wq, 8354 phba->sli4_hba.max_cfg_param.max_rq); 8355 8356 /* 8357 * Calculate queue resources based on how 8358 * many WQ/CQ/EQs are available. 8359 */ 8360 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8361 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8362 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8363 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8364 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8365 /* 8366 * Whats left after this can go toward NVME / FCP. 8367 * The minus 4 accounts for ELS, NVME LS, MBOX 8368 * plus one extra. When configured for 8369 * NVMET, FCP io channel WQs are not created. 8370 */ 8371 qmin -= 4; 8372 8373 /* If NVME is configured, double the number of CQ/WQs needed */ 8374 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 8375 !phba->nvmet_support) 8376 qmin /= 2; 8377 8378 /* Check to see if there is enough for NVME */ 8379 if ((phba->cfg_irq_chann > qmin) || 8380 (phba->cfg_hdw_queue > qmin)) { 8381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8382 "2005 Reducing Queues: " 8383 "WQ %d CQ %d EQ %d: min %d: " 8384 "IRQ %d HDWQ %d\n", 8385 phba->sli4_hba.max_cfg_param.max_wq, 8386 phba->sli4_hba.max_cfg_param.max_cq, 8387 phba->sli4_hba.max_cfg_param.max_eq, 8388 qmin, phba->cfg_irq_chann, 8389 phba->cfg_hdw_queue); 8390 8391 if (phba->cfg_irq_chann > qmin) 8392 phba->cfg_irq_chann = qmin; 8393 if (phba->cfg_hdw_queue > qmin) 8394 phba->cfg_hdw_queue = qmin; 8395 } 8396 } 8397 8398 if (rc) 8399 goto read_cfg_out; 8400 8401 /* Update link speed if forced link speed is supported */ 8402 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8403 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8404 forced_link_speed = 8405 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8406 if (forced_link_speed) { 8407 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8408 8409 switch (forced_link_speed) { 8410 case LINK_SPEED_1G: 8411 phba->cfg_link_speed = 8412 LPFC_USER_LINK_SPEED_1G; 8413 break; 8414 case LINK_SPEED_2G: 8415 phba->cfg_link_speed = 8416 LPFC_USER_LINK_SPEED_2G; 8417 break; 8418 case LINK_SPEED_4G: 8419 phba->cfg_link_speed = 8420 LPFC_USER_LINK_SPEED_4G; 8421 break; 8422 case LINK_SPEED_8G: 8423 phba->cfg_link_speed = 8424 LPFC_USER_LINK_SPEED_8G; 8425 break; 8426 case LINK_SPEED_10G: 8427 phba->cfg_link_speed = 8428 LPFC_USER_LINK_SPEED_10G; 8429 break; 8430 case LINK_SPEED_16G: 8431 phba->cfg_link_speed = 8432 LPFC_USER_LINK_SPEED_16G; 8433 break; 8434 case LINK_SPEED_32G: 8435 phba->cfg_link_speed = 8436 LPFC_USER_LINK_SPEED_32G; 8437 break; 8438 case LINK_SPEED_64G: 8439 phba->cfg_link_speed = 8440 LPFC_USER_LINK_SPEED_64G; 8441 break; 8442 case 0xffff: 8443 phba->cfg_link_speed = 8444 LPFC_USER_LINK_SPEED_AUTO; 8445 break; 8446 default: 8447 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8448 "0047 Unrecognized link " 8449 "speed : %d\n", 8450 forced_link_speed); 8451 phba->cfg_link_speed = 8452 LPFC_USER_LINK_SPEED_AUTO; 8453 } 8454 } 8455 } 8456 8457 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8458 length = phba->sli4_hba.max_cfg_param.max_xri - 8459 lpfc_sli4_get_els_iocb_cnt(phba); 8460 if (phba->cfg_hba_queue_depth > length) { 8461 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8462 "3361 HBA queue depth changed from %d to %d\n", 8463 phba->cfg_hba_queue_depth, length); 8464 phba->cfg_hba_queue_depth = length; 8465 } 8466 8467 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8468 LPFC_SLI_INTF_IF_TYPE_2) 8469 goto read_cfg_out; 8470 8471 /* get the pf# and vf# for SLI4 if_type 2 port */ 8472 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8473 sizeof(struct lpfc_sli4_cfg_mhdr)); 8474 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8475 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8476 length, LPFC_SLI4_MBX_EMBED); 8477 8478 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8479 shdr = (union lpfc_sli4_cfg_shdr *) 8480 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8481 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8482 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8483 if (rc2 || shdr_status || shdr_add_status) { 8484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8485 "3026 Mailbox failed , mbxCmd x%x " 8486 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8487 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8488 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8489 goto read_cfg_out; 8490 } 8491 8492 /* search for fc_fcoe resrouce descriptor */ 8493 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8494 8495 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8496 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8497 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8498 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8499 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8500 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8501 goto read_cfg_out; 8502 8503 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8504 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8505 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8506 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8507 phba->sli4_hba.iov.pf_number = 8508 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8509 phba->sli4_hba.iov.vf_number = 8510 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8511 break; 8512 } 8513 } 8514 8515 if (i < LPFC_RSRC_DESC_MAX_NUM) 8516 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8517 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8518 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8519 phba->sli4_hba.iov.vf_number); 8520 else 8521 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8522 "3028 GET_FUNCTION_CONFIG: failed to find " 8523 "Resource Descriptor:x%x\n", 8524 LPFC_RSRC_DESC_TYPE_FCFCOE); 8525 8526 read_cfg_out: 8527 mempool_free(pmb, phba->mbox_mem_pool); 8528 return rc; 8529 } 8530 8531 /** 8532 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8533 * @phba: pointer to lpfc hba data structure. 8534 * 8535 * This routine is invoked to setup the port-side endian order when 8536 * the port if_type is 0. This routine has no function for other 8537 * if_types. 8538 * 8539 * Return codes 8540 * 0 - successful 8541 * -ENOMEM - No available memory 8542 * -EIO - The mailbox failed to complete successfully. 8543 **/ 8544 static int 8545 lpfc_setup_endian_order(struct lpfc_hba *phba) 8546 { 8547 LPFC_MBOXQ_t *mboxq; 8548 uint32_t if_type, rc = 0; 8549 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8550 HOST_ENDIAN_HIGH_WORD1}; 8551 8552 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8553 switch (if_type) { 8554 case LPFC_SLI_INTF_IF_TYPE_0: 8555 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8556 GFP_KERNEL); 8557 if (!mboxq) { 8558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8559 "0492 Unable to allocate memory for " 8560 "issuing SLI_CONFIG_SPECIAL mailbox " 8561 "command\n"); 8562 return -ENOMEM; 8563 } 8564 8565 /* 8566 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8567 * two words to contain special data values and no other data. 8568 */ 8569 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8570 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8571 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8572 if (rc != MBX_SUCCESS) { 8573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8574 "0493 SLI_CONFIG_SPECIAL mailbox " 8575 "failed with status x%x\n", 8576 rc); 8577 rc = -EIO; 8578 } 8579 mempool_free(mboxq, phba->mbox_mem_pool); 8580 break; 8581 case LPFC_SLI_INTF_IF_TYPE_6: 8582 case LPFC_SLI_INTF_IF_TYPE_2: 8583 case LPFC_SLI_INTF_IF_TYPE_1: 8584 default: 8585 break; 8586 } 8587 return rc; 8588 } 8589 8590 /** 8591 * lpfc_sli4_queue_verify - Verify and update EQ counts 8592 * @phba: pointer to lpfc hba data structure. 8593 * 8594 * This routine is invoked to check the user settable queue counts for EQs. 8595 * After this routine is called the counts will be set to valid values that 8596 * adhere to the constraints of the system's interrupt vectors and the port's 8597 * queue resources. 8598 * 8599 * Return codes 8600 * 0 - successful 8601 * -ENOMEM - No available memory 8602 **/ 8603 static int 8604 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8605 { 8606 /* 8607 * Sanity check for configured queue parameters against the run-time 8608 * device parameters 8609 */ 8610 8611 if (phba->nvmet_support) { 8612 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) 8613 phba->cfg_nvmet_mrq = phba->cfg_irq_chann; 8614 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8615 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8616 } 8617 8618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8619 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8620 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8621 phba->cfg_nvmet_mrq); 8622 8623 /* Get EQ depth from module parameter, fake the default for now */ 8624 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8625 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8626 8627 /* Get CQ depth from module parameter, fake the default for now */ 8628 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8629 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8630 return 0; 8631 } 8632 8633 static int 8634 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 8635 { 8636 struct lpfc_queue *qdesc; 8637 int cpu; 8638 8639 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8640 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8641 phba->sli4_hba.cq_esize, 8642 LPFC_CQE_EXP_COUNT, cpu); 8643 if (!qdesc) { 8644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8645 "0508 Failed allocate fast-path NVME CQ (%d)\n", 8646 wqidx); 8647 return 1; 8648 } 8649 qdesc->qe_valid = 1; 8650 qdesc->hdwq = wqidx; 8651 qdesc->chann = cpu; 8652 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; 8653 8654 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8655 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT, 8656 cpu); 8657 if (!qdesc) { 8658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8659 "0509 Failed allocate fast-path NVME WQ (%d)\n", 8660 wqidx); 8661 return 1; 8662 } 8663 qdesc->hdwq = wqidx; 8664 qdesc->chann = wqidx; 8665 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; 8666 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8667 return 0; 8668 } 8669 8670 static int 8671 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 8672 { 8673 struct lpfc_queue *qdesc; 8674 uint32_t wqesize; 8675 int cpu; 8676 8677 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8678 /* Create Fast Path FCP CQs */ 8679 if (phba->enab_exp_wqcq_pages) 8680 /* Increase the CQ size when WQEs contain an embedded cdb */ 8681 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8682 phba->sli4_hba.cq_esize, 8683 LPFC_CQE_EXP_COUNT, cpu); 8684 8685 else 8686 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8687 phba->sli4_hba.cq_esize, 8688 phba->sli4_hba.cq_ecount, cpu); 8689 if (!qdesc) { 8690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8691 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8692 return 1; 8693 } 8694 qdesc->qe_valid = 1; 8695 qdesc->hdwq = wqidx; 8696 qdesc->chann = cpu; 8697 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; 8698 8699 /* Create Fast Path FCP WQs */ 8700 if (phba->enab_exp_wqcq_pages) { 8701 /* Increase the WQ size when WQEs contain an embedded cdb */ 8702 wqesize = (phba->fcp_embed_io) ? 8703 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8704 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8705 wqesize, 8706 LPFC_WQE_EXP_COUNT, cpu); 8707 } else 8708 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8709 phba->sli4_hba.wq_esize, 8710 phba->sli4_hba.wq_ecount, cpu); 8711 8712 if (!qdesc) { 8713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8714 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8715 wqidx); 8716 return 1; 8717 } 8718 qdesc->hdwq = wqidx; 8719 qdesc->chann = wqidx; 8720 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; 8721 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8722 return 0; 8723 } 8724 8725 /** 8726 * lpfc_sli4_queue_create - Create all the SLI4 queues 8727 * @phba: pointer to lpfc hba data structure. 8728 * 8729 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8730 * operation. For each SLI4 queue type, the parameters such as queue entry 8731 * count (queue depth) shall be taken from the module parameter. For now, 8732 * we just use some constant number as place holder. 8733 * 8734 * Return codes 8735 * 0 - successful 8736 * -ENOMEM - No availble memory 8737 * -EIO - The mailbox failed to complete successfully. 8738 **/ 8739 int 8740 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8741 { 8742 struct lpfc_queue *qdesc; 8743 int idx, eqidx, cpu; 8744 struct lpfc_sli4_hdw_queue *qp; 8745 struct lpfc_eq_intr_info *eqi; 8746 8747 /* 8748 * Create HBA Record arrays. 8749 * Both NVME and FCP will share that same vectors / EQs 8750 */ 8751 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8752 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8753 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8754 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8755 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8756 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8757 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8758 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8759 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8760 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8761 8762 if (!phba->sli4_hba.hdwq) { 8763 phba->sli4_hba.hdwq = kcalloc( 8764 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8765 GFP_KERNEL); 8766 if (!phba->sli4_hba.hdwq) { 8767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8768 "6427 Failed allocate memory for " 8769 "fast-path Hardware Queue array\n"); 8770 goto out_error; 8771 } 8772 /* Prepare hardware queues to take IO buffers */ 8773 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8774 qp = &phba->sli4_hba.hdwq[idx]; 8775 spin_lock_init(&qp->io_buf_list_get_lock); 8776 spin_lock_init(&qp->io_buf_list_put_lock); 8777 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8778 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8779 qp->get_io_bufs = 0; 8780 qp->put_io_bufs = 0; 8781 qp->total_io_bufs = 0; 8782 spin_lock_init(&qp->abts_scsi_buf_list_lock); 8783 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); 8784 qp->abts_scsi_io_bufs = 0; 8785 spin_lock_init(&qp->abts_nvme_buf_list_lock); 8786 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list); 8787 qp->abts_nvme_io_bufs = 0; 8788 } 8789 } 8790 8791 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8792 if (phba->nvmet_support) { 8793 phba->sli4_hba.nvmet_cqset = kcalloc( 8794 phba->cfg_nvmet_mrq, 8795 sizeof(struct lpfc_queue *), 8796 GFP_KERNEL); 8797 if (!phba->sli4_hba.nvmet_cqset) { 8798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8799 "3121 Fail allocate memory for " 8800 "fast-path CQ set array\n"); 8801 goto out_error; 8802 } 8803 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8804 phba->cfg_nvmet_mrq, 8805 sizeof(struct lpfc_queue *), 8806 GFP_KERNEL); 8807 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8809 "3122 Fail allocate memory for " 8810 "fast-path RQ set hdr array\n"); 8811 goto out_error; 8812 } 8813 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8814 phba->cfg_nvmet_mrq, 8815 sizeof(struct lpfc_queue *), 8816 GFP_KERNEL); 8817 if (!phba->sli4_hba.nvmet_mrq_data) { 8818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8819 "3124 Fail allocate memory for " 8820 "fast-path RQ set data array\n"); 8821 goto out_error; 8822 } 8823 } 8824 } 8825 8826 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8827 8828 /* Create HBA Event Queues (EQs) */ 8829 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8830 /* determine EQ affinity */ 8831 eqidx = lpfc_find_eq_handle(phba, idx); 8832 cpu = lpfc_find_cpu_handle(phba, eqidx, LPFC_FIND_BY_EQ); 8833 /* 8834 * If there are more Hardware Queues than available 8835 * EQs, multiple Hardware Queues may share a common EQ. 8836 */ 8837 if (idx >= phba->cfg_irq_chann) { 8838 /* Share an existing EQ */ 8839 phba->sli4_hba.hdwq[idx].hba_eq = 8840 phba->sli4_hba.hdwq[eqidx].hba_eq; 8841 continue; 8842 } 8843 /* Create an EQ */ 8844 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8845 phba->sli4_hba.eq_esize, 8846 phba->sli4_hba.eq_ecount, cpu); 8847 if (!qdesc) { 8848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8849 "0497 Failed allocate EQ (%d)\n", idx); 8850 goto out_error; 8851 } 8852 qdesc->qe_valid = 1; 8853 qdesc->hdwq = idx; 8854 8855 /* Save the CPU this EQ is affinitised to */ 8856 qdesc->chann = cpu; 8857 phba->sli4_hba.hdwq[idx].hba_eq = qdesc; 8858 qdesc->last_cpu = qdesc->chann; 8859 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8860 list_add(&qdesc->cpu_list, &eqi->list); 8861 } 8862 8863 8864 /* Allocate SCSI SLI4 CQ/WQs */ 8865 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8866 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8867 goto out_error; 8868 } 8869 8870 /* Allocate NVME SLI4 CQ/WQs */ 8871 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8872 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8873 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8874 goto out_error; 8875 } 8876 8877 if (phba->nvmet_support) { 8878 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8879 cpu = lpfc_find_cpu_handle(phba, idx, 8880 LPFC_FIND_BY_HDWQ); 8881 qdesc = lpfc_sli4_queue_alloc( 8882 phba, 8883 LPFC_DEFAULT_PAGE_SIZE, 8884 phba->sli4_hba.cq_esize, 8885 phba->sli4_hba.cq_ecount, 8886 cpu); 8887 if (!qdesc) { 8888 lpfc_printf_log( 8889 phba, KERN_ERR, LOG_INIT, 8890 "3142 Failed allocate NVME " 8891 "CQ Set (%d)\n", idx); 8892 goto out_error; 8893 } 8894 qdesc->qe_valid = 1; 8895 qdesc->hdwq = idx; 8896 qdesc->chann = cpu; 8897 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8898 } 8899 } 8900 } 8901 8902 /* 8903 * Create Slow Path Completion Queues (CQs) 8904 */ 8905 8906 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 8907 /* Create slow-path Mailbox Command Complete Queue */ 8908 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8909 phba->sli4_hba.cq_esize, 8910 phba->sli4_hba.cq_ecount, cpu); 8911 if (!qdesc) { 8912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8913 "0500 Failed allocate slow-path mailbox CQ\n"); 8914 goto out_error; 8915 } 8916 qdesc->qe_valid = 1; 8917 phba->sli4_hba.mbx_cq = qdesc; 8918 8919 /* Create slow-path ELS Complete Queue */ 8920 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8921 phba->sli4_hba.cq_esize, 8922 phba->sli4_hba.cq_ecount, cpu); 8923 if (!qdesc) { 8924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8925 "0501 Failed allocate slow-path ELS CQ\n"); 8926 goto out_error; 8927 } 8928 qdesc->qe_valid = 1; 8929 qdesc->chann = 0; 8930 phba->sli4_hba.els_cq = qdesc; 8931 8932 8933 /* 8934 * Create Slow Path Work Queues (WQs) 8935 */ 8936 8937 /* Create Mailbox Command Queue */ 8938 8939 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8940 phba->sli4_hba.mq_esize, 8941 phba->sli4_hba.mq_ecount, cpu); 8942 if (!qdesc) { 8943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8944 "0505 Failed allocate slow-path MQ\n"); 8945 goto out_error; 8946 } 8947 qdesc->chann = 0; 8948 phba->sli4_hba.mbx_wq = qdesc; 8949 8950 /* 8951 * Create ELS Work Queues 8952 */ 8953 8954 /* Create slow-path ELS Work Queue */ 8955 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8956 phba->sli4_hba.wq_esize, 8957 phba->sli4_hba.wq_ecount, cpu); 8958 if (!qdesc) { 8959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8960 "0504 Failed allocate slow-path ELS WQ\n"); 8961 goto out_error; 8962 } 8963 qdesc->chann = 0; 8964 phba->sli4_hba.els_wq = qdesc; 8965 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8966 8967 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8968 /* Create NVME LS Complete Queue */ 8969 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8970 phba->sli4_hba.cq_esize, 8971 phba->sli4_hba.cq_ecount, cpu); 8972 if (!qdesc) { 8973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8974 "6079 Failed allocate NVME LS CQ\n"); 8975 goto out_error; 8976 } 8977 qdesc->chann = 0; 8978 qdesc->qe_valid = 1; 8979 phba->sli4_hba.nvmels_cq = qdesc; 8980 8981 /* Create NVME LS Work Queue */ 8982 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8983 phba->sli4_hba.wq_esize, 8984 phba->sli4_hba.wq_ecount, cpu); 8985 if (!qdesc) { 8986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8987 "6080 Failed allocate NVME LS WQ\n"); 8988 goto out_error; 8989 } 8990 qdesc->chann = 0; 8991 phba->sli4_hba.nvmels_wq = qdesc; 8992 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8993 } 8994 8995 /* 8996 * Create Receive Queue (RQ) 8997 */ 8998 8999 /* Create Receive Queue for header */ 9000 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9001 phba->sli4_hba.rq_esize, 9002 phba->sli4_hba.rq_ecount, cpu); 9003 if (!qdesc) { 9004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9005 "0506 Failed allocate receive HRQ\n"); 9006 goto out_error; 9007 } 9008 phba->sli4_hba.hdr_rq = qdesc; 9009 9010 /* Create Receive Queue for data */ 9011 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9012 phba->sli4_hba.rq_esize, 9013 phba->sli4_hba.rq_ecount, cpu); 9014 if (!qdesc) { 9015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9016 "0507 Failed allocate receive DRQ\n"); 9017 goto out_error; 9018 } 9019 phba->sli4_hba.dat_rq = qdesc; 9020 9021 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9022 phba->nvmet_support) { 9023 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9024 cpu = lpfc_find_cpu_handle(phba, idx, 9025 LPFC_FIND_BY_HDWQ); 9026 /* Create NVMET Receive Queue for header */ 9027 qdesc = lpfc_sli4_queue_alloc(phba, 9028 LPFC_DEFAULT_PAGE_SIZE, 9029 phba->sli4_hba.rq_esize, 9030 LPFC_NVMET_RQE_DEF_COUNT, 9031 cpu); 9032 if (!qdesc) { 9033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9034 "3146 Failed allocate " 9035 "receive HRQ\n"); 9036 goto out_error; 9037 } 9038 qdesc->hdwq = idx; 9039 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9040 9041 /* Only needed for header of RQ pair */ 9042 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9043 GFP_KERNEL, 9044 cpu_to_node(cpu)); 9045 if (qdesc->rqbp == NULL) { 9046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9047 "6131 Failed allocate " 9048 "Header RQBP\n"); 9049 goto out_error; 9050 } 9051 9052 /* Put list in known state in case driver load fails. */ 9053 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9054 9055 /* Create NVMET Receive Queue for data */ 9056 qdesc = lpfc_sli4_queue_alloc(phba, 9057 LPFC_DEFAULT_PAGE_SIZE, 9058 phba->sli4_hba.rq_esize, 9059 LPFC_NVMET_RQE_DEF_COUNT, 9060 cpu); 9061 if (!qdesc) { 9062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9063 "3156 Failed allocate " 9064 "receive DRQ\n"); 9065 goto out_error; 9066 } 9067 qdesc->hdwq = idx; 9068 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9069 } 9070 } 9071 9072 #if defined(BUILD_NVME) 9073 /* Clear NVME stats */ 9074 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9075 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9076 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9077 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9078 } 9079 } 9080 #endif 9081 9082 /* Clear SCSI stats */ 9083 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9084 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9085 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9086 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9087 } 9088 } 9089 9090 return 0; 9091 9092 out_error: 9093 lpfc_sli4_queue_destroy(phba); 9094 return -ENOMEM; 9095 } 9096 9097 static inline void 9098 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9099 { 9100 if (*qp != NULL) { 9101 lpfc_sli4_queue_free(*qp); 9102 *qp = NULL; 9103 } 9104 } 9105 9106 static inline void 9107 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9108 { 9109 int idx; 9110 9111 if (*qs == NULL) 9112 return; 9113 9114 for (idx = 0; idx < max; idx++) 9115 __lpfc_sli4_release_queue(&(*qs)[idx]); 9116 9117 kfree(*qs); 9118 *qs = NULL; 9119 } 9120 9121 static inline void 9122 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9123 { 9124 struct lpfc_sli4_hdw_queue *hdwq; 9125 uint32_t idx; 9126 9127 hdwq = phba->sli4_hba.hdwq; 9128 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9129 if (idx < phba->cfg_irq_chann) 9130 lpfc_sli4_queue_free(hdwq[idx].hba_eq); 9131 hdwq[idx].hba_eq = NULL; 9132 9133 lpfc_sli4_queue_free(hdwq[idx].fcp_cq); 9134 lpfc_sli4_queue_free(hdwq[idx].nvme_cq); 9135 lpfc_sli4_queue_free(hdwq[idx].fcp_wq); 9136 lpfc_sli4_queue_free(hdwq[idx].nvme_wq); 9137 hdwq[idx].fcp_cq = NULL; 9138 hdwq[idx].nvme_cq = NULL; 9139 hdwq[idx].fcp_wq = NULL; 9140 hdwq[idx].nvme_wq = NULL; 9141 } 9142 } 9143 9144 /** 9145 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9146 * @phba: pointer to lpfc hba data structure. 9147 * 9148 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9149 * operation. 9150 * 9151 * Return codes 9152 * 0 - successful 9153 * -ENOMEM - No available memory 9154 * -EIO - The mailbox failed to complete successfully. 9155 **/ 9156 void 9157 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9158 { 9159 /* 9160 * Set FREE_INIT before beginning to free the queues. 9161 * Wait until the users of queues to acknowledge to 9162 * release queues by clearing FREE_WAIT. 9163 */ 9164 spin_lock_irq(&phba->hbalock); 9165 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9166 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9167 spin_unlock_irq(&phba->hbalock); 9168 msleep(20); 9169 spin_lock_irq(&phba->hbalock); 9170 } 9171 spin_unlock_irq(&phba->hbalock); 9172 9173 /* Release HBA eqs */ 9174 if (phba->sli4_hba.hdwq) 9175 lpfc_sli4_release_hdwq(phba); 9176 9177 if (phba->nvmet_support) { 9178 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9179 phba->cfg_nvmet_mrq); 9180 9181 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9182 phba->cfg_nvmet_mrq); 9183 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9184 phba->cfg_nvmet_mrq); 9185 } 9186 9187 /* Release mailbox command work queue */ 9188 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9189 9190 /* Release ELS work queue */ 9191 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9192 9193 /* Release ELS work queue */ 9194 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9195 9196 /* Release unsolicited receive queue */ 9197 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9198 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9199 9200 /* Release ELS complete queue */ 9201 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9202 9203 /* Release NVME LS complete queue */ 9204 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9205 9206 /* Release mailbox command complete queue */ 9207 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9208 9209 /* Everything on this list has been freed */ 9210 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9211 9212 /* Done with freeing the queues */ 9213 spin_lock_irq(&phba->hbalock); 9214 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9215 spin_unlock_irq(&phba->hbalock); 9216 } 9217 9218 int 9219 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9220 { 9221 struct lpfc_rqb *rqbp; 9222 struct lpfc_dmabuf *h_buf; 9223 struct rqb_dmabuf *rqb_buffer; 9224 9225 rqbp = rq->rqbp; 9226 while (!list_empty(&rqbp->rqb_buffer_list)) { 9227 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9228 struct lpfc_dmabuf, list); 9229 9230 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9231 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9232 rqbp->buffer_count--; 9233 } 9234 return 1; 9235 } 9236 9237 static int 9238 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9239 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9240 int qidx, uint32_t qtype) 9241 { 9242 struct lpfc_sli_ring *pring; 9243 int rc; 9244 9245 if (!eq || !cq || !wq) { 9246 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9247 "6085 Fast-path %s (%d) not allocated\n", 9248 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9249 return -ENOMEM; 9250 } 9251 9252 /* create the Cq first */ 9253 rc = lpfc_cq_create(phba, cq, eq, 9254 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9255 if (rc) { 9256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9257 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9258 qidx, (uint32_t)rc); 9259 return rc; 9260 } 9261 9262 if (qtype != LPFC_MBOX) { 9263 /* Setup cq_map for fast lookup */ 9264 if (cq_map) 9265 *cq_map = cq->queue_id; 9266 9267 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9268 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9269 qidx, cq->queue_id, qidx, eq->queue_id); 9270 9271 /* create the wq */ 9272 rc = lpfc_wq_create(phba, wq, cq, qtype); 9273 if (rc) { 9274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9275 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9276 qidx, (uint32_t)rc); 9277 /* no need to tear down cq - caller will do so */ 9278 return rc; 9279 } 9280 9281 /* Bind this CQ/WQ to the NVME ring */ 9282 pring = wq->pring; 9283 pring->sli.sli4.wqp = (void *)wq; 9284 cq->pring = pring; 9285 9286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9287 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9288 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9289 } else { 9290 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9291 if (rc) { 9292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9293 "0539 Failed setup of slow-path MQ: " 9294 "rc = 0x%x\n", rc); 9295 /* no need to tear down cq - caller will do so */ 9296 return rc; 9297 } 9298 9299 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9300 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9301 phba->sli4_hba.mbx_wq->queue_id, 9302 phba->sli4_hba.mbx_cq->queue_id); 9303 } 9304 9305 return 0; 9306 } 9307 9308 /** 9309 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9310 * @phba: pointer to lpfc hba data structure. 9311 * 9312 * This routine will populate the cq_lookup table by all 9313 * available CQ queue_id's. 9314 **/ 9315 static void 9316 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9317 { 9318 struct lpfc_queue *eq, *childq; 9319 struct lpfc_sli4_hdw_queue *qp; 9320 int qidx; 9321 9322 qp = phba->sli4_hba.hdwq; 9323 memset(phba->sli4_hba.cq_lookup, 0, 9324 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9325 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9326 eq = qp[qidx].hba_eq; 9327 if (!eq) 9328 continue; 9329 list_for_each_entry(childq, &eq->child_list, list) { 9330 if (childq->queue_id > phba->sli4_hba.cq_max) 9331 continue; 9332 if ((childq->subtype == LPFC_FCP) || 9333 (childq->subtype == LPFC_NVME)) 9334 phba->sli4_hba.cq_lookup[childq->queue_id] = 9335 childq; 9336 } 9337 } 9338 } 9339 9340 /** 9341 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9342 * @phba: pointer to lpfc hba data structure. 9343 * 9344 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9345 * operation. 9346 * 9347 * Return codes 9348 * 0 - successful 9349 * -ENOMEM - No available memory 9350 * -EIO - The mailbox failed to complete successfully. 9351 **/ 9352 int 9353 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9354 { 9355 uint32_t shdr_status, shdr_add_status; 9356 union lpfc_sli4_cfg_shdr *shdr; 9357 struct lpfc_sli4_hdw_queue *qp; 9358 LPFC_MBOXQ_t *mboxq; 9359 int qidx; 9360 uint32_t length, usdelay; 9361 int rc = -ENOMEM; 9362 9363 /* Check for dual-ULP support */ 9364 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9365 if (!mboxq) { 9366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9367 "3249 Unable to allocate memory for " 9368 "QUERY_FW_CFG mailbox command\n"); 9369 return -ENOMEM; 9370 } 9371 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9372 sizeof(struct lpfc_sli4_cfg_mhdr)); 9373 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9374 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9375 length, LPFC_SLI4_MBX_EMBED); 9376 9377 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9378 9379 shdr = (union lpfc_sli4_cfg_shdr *) 9380 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9381 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9382 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9383 if (shdr_status || shdr_add_status || rc) { 9384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9385 "3250 QUERY_FW_CFG mailbox failed with status " 9386 "x%x add_status x%x, mbx status x%x\n", 9387 shdr_status, shdr_add_status, rc); 9388 if (rc != MBX_TIMEOUT) 9389 mempool_free(mboxq, phba->mbox_mem_pool); 9390 rc = -ENXIO; 9391 goto out_error; 9392 } 9393 9394 phba->sli4_hba.fw_func_mode = 9395 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9396 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9397 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9398 phba->sli4_hba.physical_port = 9399 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9401 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9402 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9403 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9404 9405 if (rc != MBX_TIMEOUT) 9406 mempool_free(mboxq, phba->mbox_mem_pool); 9407 9408 /* 9409 * Set up HBA Event Queues (EQs) 9410 */ 9411 qp = phba->sli4_hba.hdwq; 9412 9413 /* Set up HBA event queue */ 9414 if (!qp) { 9415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9416 "3147 Fast-path EQs not allocated\n"); 9417 rc = -ENOMEM; 9418 goto out_error; 9419 } 9420 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9421 if (!qp[qidx].hba_eq) { 9422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9423 "0522 Fast-path EQ (%d) not " 9424 "allocated\n", qidx); 9425 rc = -ENOMEM; 9426 goto out_destroy; 9427 } 9428 rc = lpfc_eq_create(phba, qp[qidx].hba_eq, 9429 phba->cfg_fcp_imax); 9430 if (rc) { 9431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9432 "0523 Failed setup of fast-path EQ " 9433 "(%d), rc = 0x%x\n", qidx, 9434 (uint32_t)rc); 9435 goto out_destroy; 9436 } 9437 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9438 "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx, 9439 qp[qidx].hba_eq->queue_id); 9440 } 9441 9442 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9443 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9444 rc = lpfc_create_wq_cq(phba, 9445 qp[qidx].hba_eq, 9446 qp[qidx].nvme_cq, 9447 qp[qidx].nvme_wq, 9448 &phba->sli4_hba.hdwq[qidx].nvme_cq_map, 9449 qidx, LPFC_NVME); 9450 if (rc) { 9451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9452 "6123 Failed to setup fastpath " 9453 "NVME WQ/CQ (%d), rc = 0x%x\n", 9454 qidx, (uint32_t)rc); 9455 goto out_destroy; 9456 } 9457 } 9458 } 9459 9460 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9461 rc = lpfc_create_wq_cq(phba, 9462 qp[qidx].hba_eq, 9463 qp[qidx].fcp_cq, 9464 qp[qidx].fcp_wq, 9465 &phba->sli4_hba.hdwq[qidx].fcp_cq_map, 9466 qidx, LPFC_FCP); 9467 if (rc) { 9468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9469 "0535 Failed to setup fastpath " 9470 "FCP WQ/CQ (%d), rc = 0x%x\n", 9471 qidx, (uint32_t)rc); 9472 goto out_destroy; 9473 } 9474 } 9475 9476 /* 9477 * Set up Slow Path Complete Queues (CQs) 9478 */ 9479 9480 /* Set up slow-path MBOX CQ/MQ */ 9481 9482 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9484 "0528 %s not allocated\n", 9485 phba->sli4_hba.mbx_cq ? 9486 "Mailbox WQ" : "Mailbox CQ"); 9487 rc = -ENOMEM; 9488 goto out_destroy; 9489 } 9490 9491 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9492 phba->sli4_hba.mbx_cq, 9493 phba->sli4_hba.mbx_wq, 9494 NULL, 0, LPFC_MBOX); 9495 if (rc) { 9496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9497 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9498 (uint32_t)rc); 9499 goto out_destroy; 9500 } 9501 if (phba->nvmet_support) { 9502 if (!phba->sli4_hba.nvmet_cqset) { 9503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9504 "3165 Fast-path NVME CQ Set " 9505 "array not allocated\n"); 9506 rc = -ENOMEM; 9507 goto out_destroy; 9508 } 9509 if (phba->cfg_nvmet_mrq > 1) { 9510 rc = lpfc_cq_create_set(phba, 9511 phba->sli4_hba.nvmet_cqset, 9512 qp, 9513 LPFC_WCQ, LPFC_NVMET); 9514 if (rc) { 9515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9516 "3164 Failed setup of NVME CQ " 9517 "Set, rc = 0x%x\n", 9518 (uint32_t)rc); 9519 goto out_destroy; 9520 } 9521 } else { 9522 /* Set up NVMET Receive Complete Queue */ 9523 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9524 qp[0].hba_eq, 9525 LPFC_WCQ, LPFC_NVMET); 9526 if (rc) { 9527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9528 "6089 Failed setup NVMET CQ: " 9529 "rc = 0x%x\n", (uint32_t)rc); 9530 goto out_destroy; 9531 } 9532 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9533 9534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9535 "6090 NVMET CQ setup: cq-id=%d, " 9536 "parent eq-id=%d\n", 9537 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9538 qp[0].hba_eq->queue_id); 9539 } 9540 } 9541 9542 /* Set up slow-path ELS WQ/CQ */ 9543 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9545 "0530 ELS %s not allocated\n", 9546 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9547 rc = -ENOMEM; 9548 goto out_destroy; 9549 } 9550 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9551 phba->sli4_hba.els_cq, 9552 phba->sli4_hba.els_wq, 9553 NULL, 0, LPFC_ELS); 9554 if (rc) { 9555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9556 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9557 (uint32_t)rc); 9558 goto out_destroy; 9559 } 9560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9561 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9562 phba->sli4_hba.els_wq->queue_id, 9563 phba->sli4_hba.els_cq->queue_id); 9564 9565 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9566 /* Set up NVME LS Complete Queue */ 9567 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9569 "6091 LS %s not allocated\n", 9570 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9571 rc = -ENOMEM; 9572 goto out_destroy; 9573 } 9574 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9575 phba->sli4_hba.nvmels_cq, 9576 phba->sli4_hba.nvmels_wq, 9577 NULL, 0, LPFC_NVME_LS); 9578 if (rc) { 9579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9580 "0526 Failed setup of NVVME LS WQ/CQ: " 9581 "rc = 0x%x\n", (uint32_t)rc); 9582 goto out_destroy; 9583 } 9584 9585 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9586 "6096 ELS WQ setup: wq-id=%d, " 9587 "parent cq-id=%d\n", 9588 phba->sli4_hba.nvmels_wq->queue_id, 9589 phba->sli4_hba.nvmels_cq->queue_id); 9590 } 9591 9592 /* 9593 * Create NVMET Receive Queue (RQ) 9594 */ 9595 if (phba->nvmet_support) { 9596 if ((!phba->sli4_hba.nvmet_cqset) || 9597 (!phba->sli4_hba.nvmet_mrq_hdr) || 9598 (!phba->sli4_hba.nvmet_mrq_data)) { 9599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9600 "6130 MRQ CQ Queues not " 9601 "allocated\n"); 9602 rc = -ENOMEM; 9603 goto out_destroy; 9604 } 9605 if (phba->cfg_nvmet_mrq > 1) { 9606 rc = lpfc_mrq_create(phba, 9607 phba->sli4_hba.nvmet_mrq_hdr, 9608 phba->sli4_hba.nvmet_mrq_data, 9609 phba->sli4_hba.nvmet_cqset, 9610 LPFC_NVMET); 9611 if (rc) { 9612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9613 "6098 Failed setup of NVMET " 9614 "MRQ: rc = 0x%x\n", 9615 (uint32_t)rc); 9616 goto out_destroy; 9617 } 9618 9619 } else { 9620 rc = lpfc_rq_create(phba, 9621 phba->sli4_hba.nvmet_mrq_hdr[0], 9622 phba->sli4_hba.nvmet_mrq_data[0], 9623 phba->sli4_hba.nvmet_cqset[0], 9624 LPFC_NVMET); 9625 if (rc) { 9626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9627 "6057 Failed setup of NVMET " 9628 "Receive Queue: rc = 0x%x\n", 9629 (uint32_t)rc); 9630 goto out_destroy; 9631 } 9632 9633 lpfc_printf_log( 9634 phba, KERN_INFO, LOG_INIT, 9635 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9636 "dat-rq-id=%d parent cq-id=%d\n", 9637 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9638 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9639 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9640 9641 } 9642 } 9643 9644 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9646 "0540 Receive Queue not allocated\n"); 9647 rc = -ENOMEM; 9648 goto out_destroy; 9649 } 9650 9651 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9652 phba->sli4_hba.els_cq, LPFC_USOL); 9653 if (rc) { 9654 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9655 "0541 Failed setup of Receive Queue: " 9656 "rc = 0x%x\n", (uint32_t)rc); 9657 goto out_destroy; 9658 } 9659 9660 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9661 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9662 "parent cq-id=%d\n", 9663 phba->sli4_hba.hdr_rq->queue_id, 9664 phba->sli4_hba.dat_rq->queue_id, 9665 phba->sli4_hba.els_cq->queue_id); 9666 9667 if (phba->cfg_fcp_imax) 9668 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9669 else 9670 usdelay = 0; 9671 9672 for (qidx = 0; qidx < phba->cfg_irq_chann; 9673 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9674 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9675 usdelay); 9676 9677 if (phba->sli4_hba.cq_max) { 9678 kfree(phba->sli4_hba.cq_lookup); 9679 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9680 sizeof(struct lpfc_queue *), GFP_KERNEL); 9681 if (!phba->sli4_hba.cq_lookup) { 9682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9683 "0549 Failed setup of CQ Lookup table: " 9684 "size 0x%x\n", phba->sli4_hba.cq_max); 9685 rc = -ENOMEM; 9686 goto out_destroy; 9687 } 9688 lpfc_setup_cq_lookup(phba); 9689 } 9690 return 0; 9691 9692 out_destroy: 9693 lpfc_sli4_queue_unset(phba); 9694 out_error: 9695 return rc; 9696 } 9697 9698 /** 9699 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9700 * @phba: pointer to lpfc hba data structure. 9701 * 9702 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9703 * operation. 9704 * 9705 * Return codes 9706 * 0 - successful 9707 * -ENOMEM - No available memory 9708 * -EIO - The mailbox failed to complete successfully. 9709 **/ 9710 void 9711 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9712 { 9713 struct lpfc_sli4_hdw_queue *qp; 9714 int qidx; 9715 9716 /* Unset mailbox command work queue */ 9717 if (phba->sli4_hba.mbx_wq) 9718 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9719 9720 /* Unset NVME LS work queue */ 9721 if (phba->sli4_hba.nvmels_wq) 9722 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9723 9724 /* Unset ELS work queue */ 9725 if (phba->sli4_hba.els_wq) 9726 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9727 9728 /* Unset unsolicited receive queue */ 9729 if (phba->sli4_hba.hdr_rq) 9730 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9731 phba->sli4_hba.dat_rq); 9732 9733 /* Unset mailbox command complete queue */ 9734 if (phba->sli4_hba.mbx_cq) 9735 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9736 9737 /* Unset ELS complete queue */ 9738 if (phba->sli4_hba.els_cq) 9739 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9740 9741 /* Unset NVME LS complete queue */ 9742 if (phba->sli4_hba.nvmels_cq) 9743 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9744 9745 if (phba->nvmet_support) { 9746 /* Unset NVMET MRQ queue */ 9747 if (phba->sli4_hba.nvmet_mrq_hdr) { 9748 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9749 lpfc_rq_destroy( 9750 phba, 9751 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9752 phba->sli4_hba.nvmet_mrq_data[qidx]); 9753 } 9754 9755 /* Unset NVMET CQ Set complete queue */ 9756 if (phba->sli4_hba.nvmet_cqset) { 9757 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9758 lpfc_cq_destroy( 9759 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9760 } 9761 } 9762 9763 /* Unset fast-path SLI4 queues */ 9764 if (phba->sli4_hba.hdwq) { 9765 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9766 qp = &phba->sli4_hba.hdwq[qidx]; 9767 lpfc_wq_destroy(phba, qp->fcp_wq); 9768 lpfc_wq_destroy(phba, qp->nvme_wq); 9769 lpfc_cq_destroy(phba, qp->fcp_cq); 9770 lpfc_cq_destroy(phba, qp->nvme_cq); 9771 if (qidx < phba->cfg_irq_chann) 9772 lpfc_eq_destroy(phba, qp->hba_eq); 9773 } 9774 } 9775 9776 kfree(phba->sli4_hba.cq_lookup); 9777 phba->sli4_hba.cq_lookup = NULL; 9778 phba->sli4_hba.cq_max = 0; 9779 } 9780 9781 /** 9782 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9783 * @phba: pointer to lpfc hba data structure. 9784 * 9785 * This routine is invoked to allocate and set up a pool of completion queue 9786 * events. The body of the completion queue event is a completion queue entry 9787 * CQE. For now, this pool is used for the interrupt service routine to queue 9788 * the following HBA completion queue events for the worker thread to process: 9789 * - Mailbox asynchronous events 9790 * - Receive queue completion unsolicited events 9791 * Later, this can be used for all the slow-path events. 9792 * 9793 * Return codes 9794 * 0 - successful 9795 * -ENOMEM - No available memory 9796 **/ 9797 static int 9798 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9799 { 9800 struct lpfc_cq_event *cq_event; 9801 int i; 9802 9803 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9804 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9805 if (!cq_event) 9806 goto out_pool_create_fail; 9807 list_add_tail(&cq_event->list, 9808 &phba->sli4_hba.sp_cqe_event_pool); 9809 } 9810 return 0; 9811 9812 out_pool_create_fail: 9813 lpfc_sli4_cq_event_pool_destroy(phba); 9814 return -ENOMEM; 9815 } 9816 9817 /** 9818 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9819 * @phba: pointer to lpfc hba data structure. 9820 * 9821 * This routine is invoked to free the pool of completion queue events at 9822 * driver unload time. Note that, it is the responsibility of the driver 9823 * cleanup routine to free all the outstanding completion-queue events 9824 * allocated from this pool back into the pool before invoking this routine 9825 * to destroy the pool. 9826 **/ 9827 static void 9828 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9829 { 9830 struct lpfc_cq_event *cq_event, *next_cq_event; 9831 9832 list_for_each_entry_safe(cq_event, next_cq_event, 9833 &phba->sli4_hba.sp_cqe_event_pool, list) { 9834 list_del(&cq_event->list); 9835 kfree(cq_event); 9836 } 9837 } 9838 9839 /** 9840 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9841 * @phba: pointer to lpfc hba data structure. 9842 * 9843 * This routine is the lock free version of the API invoked to allocate a 9844 * completion-queue event from the free pool. 9845 * 9846 * Return: Pointer to the newly allocated completion-queue event if successful 9847 * NULL otherwise. 9848 **/ 9849 struct lpfc_cq_event * 9850 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9851 { 9852 struct lpfc_cq_event *cq_event = NULL; 9853 9854 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9855 struct lpfc_cq_event, list); 9856 return cq_event; 9857 } 9858 9859 /** 9860 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9861 * @phba: pointer to lpfc hba data structure. 9862 * 9863 * This routine is the lock version of the API invoked to allocate a 9864 * completion-queue event from the free pool. 9865 * 9866 * Return: Pointer to the newly allocated completion-queue event if successful 9867 * NULL otherwise. 9868 **/ 9869 struct lpfc_cq_event * 9870 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9871 { 9872 struct lpfc_cq_event *cq_event; 9873 unsigned long iflags; 9874 9875 spin_lock_irqsave(&phba->hbalock, iflags); 9876 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9877 spin_unlock_irqrestore(&phba->hbalock, iflags); 9878 return cq_event; 9879 } 9880 9881 /** 9882 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9883 * @phba: pointer to lpfc hba data structure. 9884 * @cq_event: pointer to the completion queue event to be freed. 9885 * 9886 * This routine is the lock free version of the API invoked to release a 9887 * completion-queue event back into the free pool. 9888 **/ 9889 void 9890 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9891 struct lpfc_cq_event *cq_event) 9892 { 9893 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 9894 } 9895 9896 /** 9897 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9898 * @phba: pointer to lpfc hba data structure. 9899 * @cq_event: pointer to the completion queue event to be freed. 9900 * 9901 * This routine is the lock version of the API invoked to release a 9902 * completion-queue event back into the free pool. 9903 **/ 9904 void 9905 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9906 struct lpfc_cq_event *cq_event) 9907 { 9908 unsigned long iflags; 9909 spin_lock_irqsave(&phba->hbalock, iflags); 9910 __lpfc_sli4_cq_event_release(phba, cq_event); 9911 spin_unlock_irqrestore(&phba->hbalock, iflags); 9912 } 9913 9914 /** 9915 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9916 * @phba: pointer to lpfc hba data structure. 9917 * 9918 * This routine is to free all the pending completion-queue events to the 9919 * back into the free pool for device reset. 9920 **/ 9921 static void 9922 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 9923 { 9924 LIST_HEAD(cqelist); 9925 struct lpfc_cq_event *cqe; 9926 unsigned long iflags; 9927 9928 /* Retrieve all the pending WCQEs from pending WCQE lists */ 9929 spin_lock_irqsave(&phba->hbalock, iflags); 9930 /* Pending FCP XRI abort events */ 9931 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9932 &cqelist); 9933 /* Pending ELS XRI abort events */ 9934 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9935 &cqelist); 9936 /* Pending asynnc events */ 9937 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9938 &cqelist); 9939 spin_unlock_irqrestore(&phba->hbalock, iflags); 9940 9941 while (!list_empty(&cqelist)) { 9942 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 9943 lpfc_sli4_cq_event_release(phba, cqe); 9944 } 9945 } 9946 9947 /** 9948 * lpfc_pci_function_reset - Reset pci function. 9949 * @phba: pointer to lpfc hba data structure. 9950 * 9951 * This routine is invoked to request a PCI function reset. It will destroys 9952 * all resources assigned to the PCI function which originates this request. 9953 * 9954 * Return codes 9955 * 0 - successful 9956 * -ENOMEM - No available memory 9957 * -EIO - The mailbox failed to complete successfully. 9958 **/ 9959 int 9960 lpfc_pci_function_reset(struct lpfc_hba *phba) 9961 { 9962 LPFC_MBOXQ_t *mboxq; 9963 uint32_t rc = 0, if_type; 9964 uint32_t shdr_status, shdr_add_status; 9965 uint32_t rdy_chk; 9966 uint32_t port_reset = 0; 9967 union lpfc_sli4_cfg_shdr *shdr; 9968 struct lpfc_register reg_data; 9969 uint16_t devid; 9970 9971 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9972 switch (if_type) { 9973 case LPFC_SLI_INTF_IF_TYPE_0: 9974 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 9975 GFP_KERNEL); 9976 if (!mboxq) { 9977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9978 "0494 Unable to allocate memory for " 9979 "issuing SLI_FUNCTION_RESET mailbox " 9980 "command\n"); 9981 return -ENOMEM; 9982 } 9983 9984 /* Setup PCI function reset mailbox-ioctl command */ 9985 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9986 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 9987 LPFC_SLI4_MBX_EMBED); 9988 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9989 shdr = (union lpfc_sli4_cfg_shdr *) 9990 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 9993 &shdr->response); 9994 if (rc != MBX_TIMEOUT) 9995 mempool_free(mboxq, phba->mbox_mem_pool); 9996 if (shdr_status || shdr_add_status || rc) { 9997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9998 "0495 SLI_FUNCTION_RESET mailbox " 9999 "failed with status x%x add_status x%x," 10000 " mbx status x%x\n", 10001 shdr_status, shdr_add_status, rc); 10002 rc = -ENXIO; 10003 } 10004 break; 10005 case LPFC_SLI_INTF_IF_TYPE_2: 10006 case LPFC_SLI_INTF_IF_TYPE_6: 10007 wait: 10008 /* 10009 * Poll the Port Status Register and wait for RDY for 10010 * up to 30 seconds. If the port doesn't respond, treat 10011 * it as an error. 10012 */ 10013 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10014 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10015 STATUSregaddr, ®_data.word0)) { 10016 rc = -ENODEV; 10017 goto out; 10018 } 10019 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10020 break; 10021 msleep(20); 10022 } 10023 10024 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10025 phba->work_status[0] = readl( 10026 phba->sli4_hba.u.if_type2.ERR1regaddr); 10027 phba->work_status[1] = readl( 10028 phba->sli4_hba.u.if_type2.ERR2regaddr); 10029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10030 "2890 Port not ready, port status reg " 10031 "0x%x error 1=0x%x, error 2=0x%x\n", 10032 reg_data.word0, 10033 phba->work_status[0], 10034 phba->work_status[1]); 10035 rc = -ENODEV; 10036 goto out; 10037 } 10038 10039 if (!port_reset) { 10040 /* 10041 * Reset the port now 10042 */ 10043 reg_data.word0 = 0; 10044 bf_set(lpfc_sliport_ctrl_end, ®_data, 10045 LPFC_SLIPORT_LITTLE_ENDIAN); 10046 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10047 LPFC_SLIPORT_INIT_PORT); 10048 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10049 CTRLregaddr); 10050 /* flush */ 10051 pci_read_config_word(phba->pcidev, 10052 PCI_DEVICE_ID, &devid); 10053 10054 port_reset = 1; 10055 msleep(20); 10056 goto wait; 10057 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10058 rc = -ENODEV; 10059 goto out; 10060 } 10061 break; 10062 10063 case LPFC_SLI_INTF_IF_TYPE_1: 10064 default: 10065 break; 10066 } 10067 10068 out: 10069 /* Catch the not-ready port failure after a port reset. */ 10070 if (rc) { 10071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10072 "3317 HBA not functional: IP Reset Failed " 10073 "try: echo fw_reset > board_mode\n"); 10074 rc = -ENODEV; 10075 } 10076 10077 return rc; 10078 } 10079 10080 /** 10081 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10082 * @phba: pointer to lpfc hba data structure. 10083 * 10084 * This routine is invoked to set up the PCI device memory space for device 10085 * with SLI-4 interface spec. 10086 * 10087 * Return codes 10088 * 0 - successful 10089 * other values - error 10090 **/ 10091 static int 10092 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10093 { 10094 struct pci_dev *pdev = phba->pcidev; 10095 unsigned long bar0map_len, bar1map_len, bar2map_len; 10096 int error; 10097 uint32_t if_type; 10098 10099 if (!pdev) 10100 return -ENODEV; 10101 10102 /* Set the device DMA mask size */ 10103 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10104 if (error) 10105 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10106 if (error) 10107 return error; 10108 10109 /* 10110 * The BARs and register set definitions and offset locations are 10111 * dependent on the if_type. 10112 */ 10113 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10114 &phba->sli4_hba.sli_intf.word0)) { 10115 return -ENODEV; 10116 } 10117 10118 /* There is no SLI3 failback for SLI4 devices. */ 10119 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10120 LPFC_SLI_INTF_VALID) { 10121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10122 "2894 SLI_INTF reg contents invalid " 10123 "sli_intf reg 0x%x\n", 10124 phba->sli4_hba.sli_intf.word0); 10125 return -ENODEV; 10126 } 10127 10128 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10129 /* 10130 * Get the bus address of SLI4 device Bar regions and the 10131 * number of bytes required by each mapping. The mapping of the 10132 * particular PCI BARs regions is dependent on the type of 10133 * SLI4 device. 10134 */ 10135 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10136 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10137 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10138 10139 /* 10140 * Map SLI4 PCI Config Space Register base to a kernel virtual 10141 * addr 10142 */ 10143 phba->sli4_hba.conf_regs_memmap_p = 10144 ioremap(phba->pci_bar0_map, bar0map_len); 10145 if (!phba->sli4_hba.conf_regs_memmap_p) { 10146 dev_printk(KERN_ERR, &pdev->dev, 10147 "ioremap failed for SLI4 PCI config " 10148 "registers.\n"); 10149 return -ENODEV; 10150 } 10151 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10152 /* Set up BAR0 PCI config space register memory map */ 10153 lpfc_sli4_bar0_register_memmap(phba, if_type); 10154 } else { 10155 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10156 bar0map_len = pci_resource_len(pdev, 1); 10157 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10158 dev_printk(KERN_ERR, &pdev->dev, 10159 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10160 return -ENODEV; 10161 } 10162 phba->sli4_hba.conf_regs_memmap_p = 10163 ioremap(phba->pci_bar0_map, bar0map_len); 10164 if (!phba->sli4_hba.conf_regs_memmap_p) { 10165 dev_printk(KERN_ERR, &pdev->dev, 10166 "ioremap failed for SLI4 PCI config " 10167 "registers.\n"); 10168 return -ENODEV; 10169 } 10170 lpfc_sli4_bar0_register_memmap(phba, if_type); 10171 } 10172 10173 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10174 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10175 /* 10176 * Map SLI4 if type 0 HBA Control Register base to a 10177 * kernel virtual address and setup the registers. 10178 */ 10179 phba->pci_bar1_map = pci_resource_start(pdev, 10180 PCI_64BIT_BAR2); 10181 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10182 phba->sli4_hba.ctrl_regs_memmap_p = 10183 ioremap(phba->pci_bar1_map, 10184 bar1map_len); 10185 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10186 dev_err(&pdev->dev, 10187 "ioremap failed for SLI4 HBA " 10188 "control registers.\n"); 10189 error = -ENOMEM; 10190 goto out_iounmap_conf; 10191 } 10192 phba->pci_bar2_memmap_p = 10193 phba->sli4_hba.ctrl_regs_memmap_p; 10194 lpfc_sli4_bar1_register_memmap(phba, if_type); 10195 } else { 10196 error = -ENOMEM; 10197 goto out_iounmap_conf; 10198 } 10199 } 10200 10201 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10202 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10203 /* 10204 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10205 * virtual address and setup the registers. 10206 */ 10207 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10208 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10209 phba->sli4_hba.drbl_regs_memmap_p = 10210 ioremap(phba->pci_bar1_map, bar1map_len); 10211 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10212 dev_err(&pdev->dev, 10213 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10214 error = -ENOMEM; 10215 goto out_iounmap_conf; 10216 } 10217 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10218 lpfc_sli4_bar1_register_memmap(phba, if_type); 10219 } 10220 10221 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10222 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10223 /* 10224 * Map SLI4 if type 0 HBA Doorbell Register base to 10225 * a kernel virtual address and setup the registers. 10226 */ 10227 phba->pci_bar2_map = pci_resource_start(pdev, 10228 PCI_64BIT_BAR4); 10229 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10230 phba->sli4_hba.drbl_regs_memmap_p = 10231 ioremap(phba->pci_bar2_map, 10232 bar2map_len); 10233 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10234 dev_err(&pdev->dev, 10235 "ioremap failed for SLI4 HBA" 10236 " doorbell registers.\n"); 10237 error = -ENOMEM; 10238 goto out_iounmap_ctrl; 10239 } 10240 phba->pci_bar4_memmap_p = 10241 phba->sli4_hba.drbl_regs_memmap_p; 10242 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10243 if (error) 10244 goto out_iounmap_all; 10245 } else { 10246 error = -ENOMEM; 10247 goto out_iounmap_all; 10248 } 10249 } 10250 10251 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10252 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10253 /* 10254 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10255 * virtual address and setup the registers. 10256 */ 10257 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10258 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10259 phba->sli4_hba.dpp_regs_memmap_p = 10260 ioremap(phba->pci_bar2_map, bar2map_len); 10261 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10262 dev_err(&pdev->dev, 10263 "ioremap failed for SLI4 HBA dpp registers.\n"); 10264 error = -ENOMEM; 10265 goto out_iounmap_ctrl; 10266 } 10267 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10268 } 10269 10270 /* Set up the EQ/CQ register handeling functions now */ 10271 switch (if_type) { 10272 case LPFC_SLI_INTF_IF_TYPE_0: 10273 case LPFC_SLI_INTF_IF_TYPE_2: 10274 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10275 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10276 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10277 break; 10278 case LPFC_SLI_INTF_IF_TYPE_6: 10279 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10280 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10281 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10282 break; 10283 default: 10284 break; 10285 } 10286 10287 return 0; 10288 10289 out_iounmap_all: 10290 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10291 out_iounmap_ctrl: 10292 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10293 out_iounmap_conf: 10294 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10295 10296 return error; 10297 } 10298 10299 /** 10300 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10301 * @phba: pointer to lpfc hba data structure. 10302 * 10303 * This routine is invoked to unset the PCI device memory space for device 10304 * with SLI-4 interface spec. 10305 **/ 10306 static void 10307 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10308 { 10309 uint32_t if_type; 10310 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10311 10312 switch (if_type) { 10313 case LPFC_SLI_INTF_IF_TYPE_0: 10314 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10315 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10316 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10317 break; 10318 case LPFC_SLI_INTF_IF_TYPE_2: 10319 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10320 break; 10321 case LPFC_SLI_INTF_IF_TYPE_6: 10322 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10323 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10324 break; 10325 case LPFC_SLI_INTF_IF_TYPE_1: 10326 default: 10327 dev_printk(KERN_ERR, &phba->pcidev->dev, 10328 "FATAL - unsupported SLI4 interface type - %d\n", 10329 if_type); 10330 break; 10331 } 10332 } 10333 10334 /** 10335 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10336 * @phba: pointer to lpfc hba data structure. 10337 * 10338 * This routine is invoked to enable the MSI-X interrupt vectors to device 10339 * with SLI-3 interface specs. 10340 * 10341 * Return codes 10342 * 0 - successful 10343 * other values - error 10344 **/ 10345 static int 10346 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10347 { 10348 int rc; 10349 LPFC_MBOXQ_t *pmb; 10350 10351 /* Set up MSI-X multi-message vectors */ 10352 rc = pci_alloc_irq_vectors(phba->pcidev, 10353 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10354 if (rc < 0) { 10355 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10356 "0420 PCI enable MSI-X failed (%d)\n", rc); 10357 goto vec_fail_out; 10358 } 10359 10360 /* 10361 * Assign MSI-X vectors to interrupt handlers 10362 */ 10363 10364 /* vector-0 is associated to slow-path handler */ 10365 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10366 &lpfc_sli_sp_intr_handler, 0, 10367 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10368 if (rc) { 10369 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10370 "0421 MSI-X slow-path request_irq failed " 10371 "(%d)\n", rc); 10372 goto msi_fail_out; 10373 } 10374 10375 /* vector-1 is associated to fast-path handler */ 10376 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10377 &lpfc_sli_fp_intr_handler, 0, 10378 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10379 10380 if (rc) { 10381 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10382 "0429 MSI-X fast-path request_irq failed " 10383 "(%d)\n", rc); 10384 goto irq_fail_out; 10385 } 10386 10387 /* 10388 * Configure HBA MSI-X attention conditions to messages 10389 */ 10390 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10391 10392 if (!pmb) { 10393 rc = -ENOMEM; 10394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10395 "0474 Unable to allocate memory for issuing " 10396 "MBOX_CONFIG_MSI command\n"); 10397 goto mem_fail_out; 10398 } 10399 rc = lpfc_config_msi(phba, pmb); 10400 if (rc) 10401 goto mbx_fail_out; 10402 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10403 if (rc != MBX_SUCCESS) { 10404 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10405 "0351 Config MSI mailbox command failed, " 10406 "mbxCmd x%x, mbxStatus x%x\n", 10407 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10408 goto mbx_fail_out; 10409 } 10410 10411 /* Free memory allocated for mailbox command */ 10412 mempool_free(pmb, phba->mbox_mem_pool); 10413 return rc; 10414 10415 mbx_fail_out: 10416 /* Free memory allocated for mailbox command */ 10417 mempool_free(pmb, phba->mbox_mem_pool); 10418 10419 mem_fail_out: 10420 /* free the irq already requested */ 10421 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10422 10423 irq_fail_out: 10424 /* free the irq already requested */ 10425 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10426 10427 msi_fail_out: 10428 /* Unconfigure MSI-X capability structure */ 10429 pci_free_irq_vectors(phba->pcidev); 10430 10431 vec_fail_out: 10432 return rc; 10433 } 10434 10435 /** 10436 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10437 * @phba: pointer to lpfc hba data structure. 10438 * 10439 * This routine is invoked to enable the MSI interrupt mode to device with 10440 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10441 * enable the MSI vector. The device driver is responsible for calling the 10442 * request_irq() to register MSI vector with a interrupt the handler, which 10443 * is done in this function. 10444 * 10445 * Return codes 10446 * 0 - successful 10447 * other values - error 10448 */ 10449 static int 10450 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10451 { 10452 int rc; 10453 10454 rc = pci_enable_msi(phba->pcidev); 10455 if (!rc) 10456 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10457 "0462 PCI enable MSI mode success.\n"); 10458 else { 10459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10460 "0471 PCI enable MSI mode failed (%d)\n", rc); 10461 return rc; 10462 } 10463 10464 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10465 0, LPFC_DRIVER_NAME, phba); 10466 if (rc) { 10467 pci_disable_msi(phba->pcidev); 10468 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10469 "0478 MSI request_irq failed (%d)\n", rc); 10470 } 10471 return rc; 10472 } 10473 10474 /** 10475 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10476 * @phba: pointer to lpfc hba data structure. 10477 * 10478 * This routine is invoked to enable device interrupt and associate driver's 10479 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10480 * spec. Depends on the interrupt mode configured to the driver, the driver 10481 * will try to fallback from the configured interrupt mode to an interrupt 10482 * mode which is supported by the platform, kernel, and device in the order 10483 * of: 10484 * MSI-X -> MSI -> IRQ. 10485 * 10486 * Return codes 10487 * 0 - successful 10488 * other values - error 10489 **/ 10490 static uint32_t 10491 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10492 { 10493 uint32_t intr_mode = LPFC_INTR_ERROR; 10494 int retval; 10495 10496 if (cfg_mode == 2) { 10497 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10498 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10499 if (!retval) { 10500 /* Now, try to enable MSI-X interrupt mode */ 10501 retval = lpfc_sli_enable_msix(phba); 10502 if (!retval) { 10503 /* Indicate initialization to MSI-X mode */ 10504 phba->intr_type = MSIX; 10505 intr_mode = 2; 10506 } 10507 } 10508 } 10509 10510 /* Fallback to MSI if MSI-X initialization failed */ 10511 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10512 retval = lpfc_sli_enable_msi(phba); 10513 if (!retval) { 10514 /* Indicate initialization to MSI mode */ 10515 phba->intr_type = MSI; 10516 intr_mode = 1; 10517 } 10518 } 10519 10520 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10521 if (phba->intr_type == NONE) { 10522 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10523 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10524 if (!retval) { 10525 /* Indicate initialization to INTx mode */ 10526 phba->intr_type = INTx; 10527 intr_mode = 0; 10528 } 10529 } 10530 return intr_mode; 10531 } 10532 10533 /** 10534 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10535 * @phba: pointer to lpfc hba data structure. 10536 * 10537 * This routine is invoked to disable device interrupt and disassociate the 10538 * driver's interrupt handler(s) from interrupt vector(s) to device with 10539 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10540 * release the interrupt vector(s) for the message signaled interrupt. 10541 **/ 10542 static void 10543 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10544 { 10545 int nr_irqs, i; 10546 10547 if (phba->intr_type == MSIX) 10548 nr_irqs = LPFC_MSIX_VECTORS; 10549 else 10550 nr_irqs = 1; 10551 10552 for (i = 0; i < nr_irqs; i++) 10553 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10554 pci_free_irq_vectors(phba->pcidev); 10555 10556 /* Reset interrupt management states */ 10557 phba->intr_type = NONE; 10558 phba->sli.slistat.sli_intr = 0; 10559 } 10560 10561 /** 10562 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ 10563 * @phba: pointer to lpfc hba data structure. 10564 * @id: EQ vector index or Hardware Queue index 10565 * @match: LPFC_FIND_BY_EQ = match by EQ 10566 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10567 */ 10568 static uint16_t 10569 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10570 { 10571 struct lpfc_vector_map_info *cpup; 10572 int cpu; 10573 10574 /* Find the desired phys_id for the specified EQ */ 10575 for_each_present_cpu(cpu) { 10576 cpup = &phba->sli4_hba.cpu_map[cpu]; 10577 if ((match == LPFC_FIND_BY_EQ) && 10578 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10579 (cpup->eq == id)) 10580 return cpu; 10581 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10582 return cpu; 10583 } 10584 return 0; 10585 } 10586 10587 /** 10588 * lpfc_find_eq_handle - Find the EQ that corresponds to the specified 10589 * Hardware Queue 10590 * @phba: pointer to lpfc hba data structure. 10591 * @hdwq: Hardware Queue index 10592 */ 10593 static uint16_t 10594 lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq) 10595 { 10596 struct lpfc_vector_map_info *cpup; 10597 int cpu; 10598 10599 /* Find the desired phys_id for the specified EQ */ 10600 for_each_present_cpu(cpu) { 10601 cpup = &phba->sli4_hba.cpu_map[cpu]; 10602 if (cpup->hdwq == hdwq) 10603 return cpup->eq; 10604 } 10605 return 0; 10606 } 10607 10608 #ifdef CONFIG_X86 10609 /** 10610 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10611 * @phba: pointer to lpfc hba data structure. 10612 * @cpu: CPU map index 10613 * @phys_id: CPU package physical id 10614 * @core_id: CPU core id 10615 */ 10616 static int 10617 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10618 uint16_t phys_id, uint16_t core_id) 10619 { 10620 struct lpfc_vector_map_info *cpup; 10621 int idx; 10622 10623 for_each_present_cpu(idx) { 10624 cpup = &phba->sli4_hba.cpu_map[idx]; 10625 /* Does the cpup match the one we are looking for */ 10626 if ((cpup->phys_id == phys_id) && 10627 (cpup->core_id == core_id) && 10628 (cpu != idx)) 10629 return 1; 10630 } 10631 return 0; 10632 } 10633 #endif 10634 10635 /** 10636 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10637 * @phba: pointer to lpfc hba data structure. 10638 * @vectors: number of msix vectors allocated. 10639 * 10640 * The routine will figure out the CPU affinity assignment for every 10641 * MSI-X vector allocated for the HBA. 10642 * In addition, the CPU to IO channel mapping will be calculated 10643 * and the phba->sli4_hba.cpu_map array will reflect this. 10644 */ 10645 static void 10646 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10647 { 10648 int i, cpu, idx; 10649 int max_phys_id, min_phys_id; 10650 int max_core_id, min_core_id; 10651 struct lpfc_vector_map_info *cpup; 10652 const struct cpumask *maskp; 10653 #ifdef CONFIG_X86 10654 struct cpuinfo_x86 *cpuinfo; 10655 #endif 10656 10657 /* Init cpu_map array */ 10658 memset(phba->sli4_hba.cpu_map, 0xff, 10659 (sizeof(struct lpfc_vector_map_info) * 10660 phba->sli4_hba.num_possible_cpu)); 10661 10662 max_phys_id = 0; 10663 min_phys_id = 0xffff; 10664 max_core_id = 0; 10665 min_core_id = 0xffff; 10666 10667 /* Update CPU map with physical id and core id of each CPU */ 10668 for_each_present_cpu(cpu) { 10669 cpup = &phba->sli4_hba.cpu_map[cpu]; 10670 #ifdef CONFIG_X86 10671 cpuinfo = &cpu_data(cpu); 10672 cpup->phys_id = cpuinfo->phys_proc_id; 10673 cpup->core_id = cpuinfo->cpu_core_id; 10674 cpup->hyper = lpfc_find_hyper(phba, cpu, 10675 cpup->phys_id, cpup->core_id); 10676 #else 10677 /* No distinction between CPUs for other platforms */ 10678 cpup->phys_id = 0; 10679 cpup->core_id = cpu; 10680 cpup->hyper = 0; 10681 #endif 10682 10683 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10684 "3328 CPU physid %d coreid %d\n", 10685 cpup->phys_id, cpup->core_id); 10686 10687 if (cpup->phys_id > max_phys_id) 10688 max_phys_id = cpup->phys_id; 10689 if (cpup->phys_id < min_phys_id) 10690 min_phys_id = cpup->phys_id; 10691 10692 if (cpup->core_id > max_core_id) 10693 max_core_id = cpup->core_id; 10694 if (cpup->core_id < min_core_id) 10695 min_core_id = cpup->core_id; 10696 } 10697 10698 for_each_possible_cpu(i) { 10699 struct lpfc_eq_intr_info *eqi = 10700 per_cpu_ptr(phba->sli4_hba.eq_info, i); 10701 10702 INIT_LIST_HEAD(&eqi->list); 10703 eqi->icnt = 0; 10704 } 10705 10706 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10707 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10708 if (!maskp) 10709 continue; 10710 10711 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 10712 cpup = &phba->sli4_hba.cpu_map[cpu]; 10713 cpup->eq = idx; 10714 cpup->hdwq = idx; 10715 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10716 10717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10718 "3336 Set Affinity: CPU %d " 10719 "hdwq %d irq %d\n", 10720 cpu, cpup->hdwq, cpup->irq); 10721 } 10722 } 10723 return; 10724 } 10725 10726 /** 10727 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 10728 * @phba: pointer to lpfc hba data structure. 10729 * 10730 * This routine is invoked to enable the MSI-X interrupt vectors to device 10731 * with SLI-4 interface spec. 10732 * 10733 * Return codes 10734 * 0 - successful 10735 * other values - error 10736 **/ 10737 static int 10738 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 10739 { 10740 int vectors, rc, index; 10741 char *name; 10742 10743 /* Set up MSI-X multi-message vectors */ 10744 vectors = phba->cfg_irq_chann; 10745 10746 rc = pci_alloc_irq_vectors(phba->pcidev, 10747 1, 10748 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 10749 if (rc < 0) { 10750 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10751 "0484 PCI enable MSI-X failed (%d)\n", rc); 10752 goto vec_fail_out; 10753 } 10754 vectors = rc; 10755 10756 /* Assign MSI-X vectors to interrupt handlers */ 10757 for (index = 0; index < vectors; index++) { 10758 name = phba->sli4_hba.hba_eq_hdl[index].handler_name; 10759 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 10760 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 10761 LPFC_DRIVER_HANDLER_NAME"%d", index); 10762 10763 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10764 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10765 rc = request_irq(pci_irq_vector(phba->pcidev, index), 10766 &lpfc_sli4_hba_intr_handler, 0, 10767 name, 10768 &phba->sli4_hba.hba_eq_hdl[index]); 10769 if (rc) { 10770 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10771 "0486 MSI-X fast-path (%d) " 10772 "request_irq failed (%d)\n", index, rc); 10773 goto cfg_fail_out; 10774 } 10775 } 10776 10777 if (vectors != phba->cfg_irq_chann) { 10778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10779 "3238 Reducing IO channels to match number of " 10780 "MSI-X vectors, requested %d got %d\n", 10781 phba->cfg_irq_chann, vectors); 10782 if (phba->cfg_irq_chann > vectors) 10783 phba->cfg_irq_chann = vectors; 10784 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors)) 10785 phba->cfg_nvmet_mrq = vectors; 10786 } 10787 10788 return rc; 10789 10790 cfg_fail_out: 10791 /* free the irq already requested */ 10792 for (--index; index >= 0; index--) 10793 free_irq(pci_irq_vector(phba->pcidev, index), 10794 &phba->sli4_hba.hba_eq_hdl[index]); 10795 10796 /* Unconfigure MSI-X capability structure */ 10797 pci_free_irq_vectors(phba->pcidev); 10798 10799 vec_fail_out: 10800 return rc; 10801 } 10802 10803 /** 10804 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 10805 * @phba: pointer to lpfc hba data structure. 10806 * 10807 * This routine is invoked to enable the MSI interrupt mode to device with 10808 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 10809 * to enable the MSI vector. The device driver is responsible for calling 10810 * the request_irq() to register MSI vector with a interrupt the handler, 10811 * which is done in this function. 10812 * 10813 * Return codes 10814 * 0 - successful 10815 * other values - error 10816 **/ 10817 static int 10818 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 10819 { 10820 int rc, index; 10821 10822 rc = pci_enable_msi(phba->pcidev); 10823 if (!rc) 10824 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10825 "0487 PCI enable MSI mode success.\n"); 10826 else { 10827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10828 "0488 PCI enable MSI mode failed (%d)\n", rc); 10829 return rc; 10830 } 10831 10832 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10833 0, LPFC_DRIVER_NAME, phba); 10834 if (rc) { 10835 pci_disable_msi(phba->pcidev); 10836 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10837 "0490 MSI request_irq failed (%d)\n", rc); 10838 return rc; 10839 } 10840 10841 for (index = 0; index < phba->cfg_irq_chann; index++) { 10842 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10843 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10844 } 10845 10846 return 0; 10847 } 10848 10849 /** 10850 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 10851 * @phba: pointer to lpfc hba data structure. 10852 * 10853 * This routine is invoked to enable device interrupt and associate driver's 10854 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 10855 * interface spec. Depends on the interrupt mode configured to the driver, 10856 * the driver will try to fallback from the configured interrupt mode to an 10857 * interrupt mode which is supported by the platform, kernel, and device in 10858 * the order of: 10859 * MSI-X -> MSI -> IRQ. 10860 * 10861 * Return codes 10862 * 0 - successful 10863 * other values - error 10864 **/ 10865 static uint32_t 10866 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10867 { 10868 uint32_t intr_mode = LPFC_INTR_ERROR; 10869 int retval, idx; 10870 10871 if (cfg_mode == 2) { 10872 /* Preparation before conf_msi mbox cmd */ 10873 retval = 0; 10874 if (!retval) { 10875 /* Now, try to enable MSI-X interrupt mode */ 10876 retval = lpfc_sli4_enable_msix(phba); 10877 if (!retval) { 10878 /* Indicate initialization to MSI-X mode */ 10879 phba->intr_type = MSIX; 10880 intr_mode = 2; 10881 } 10882 } 10883 } 10884 10885 /* Fallback to MSI if MSI-X initialization failed */ 10886 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10887 retval = lpfc_sli4_enable_msi(phba); 10888 if (!retval) { 10889 /* Indicate initialization to MSI mode */ 10890 phba->intr_type = MSI; 10891 intr_mode = 1; 10892 } 10893 } 10894 10895 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10896 if (phba->intr_type == NONE) { 10897 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10898 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10899 if (!retval) { 10900 struct lpfc_hba_eq_hdl *eqhdl; 10901 10902 /* Indicate initialization to INTx mode */ 10903 phba->intr_type = INTx; 10904 intr_mode = 0; 10905 10906 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10907 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 10908 eqhdl->idx = idx; 10909 eqhdl->phba = phba; 10910 } 10911 } 10912 } 10913 return intr_mode; 10914 } 10915 10916 /** 10917 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 10918 * @phba: pointer to lpfc hba data structure. 10919 * 10920 * This routine is invoked to disable device interrupt and disassociate 10921 * the driver's interrupt handler(s) from interrupt vector(s) to device 10922 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 10923 * will release the interrupt vector(s) for the message signaled interrupt. 10924 **/ 10925 static void 10926 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 10927 { 10928 /* Disable the currently initialized interrupt mode */ 10929 if (phba->intr_type == MSIX) { 10930 int index; 10931 10932 /* Free up MSI-X multi-message vectors */ 10933 for (index = 0; index < phba->cfg_irq_chann; index++) { 10934 irq_set_affinity_hint( 10935 pci_irq_vector(phba->pcidev, index), 10936 NULL); 10937 free_irq(pci_irq_vector(phba->pcidev, index), 10938 &phba->sli4_hba.hba_eq_hdl[index]); 10939 } 10940 } else { 10941 free_irq(phba->pcidev->irq, phba); 10942 } 10943 10944 pci_free_irq_vectors(phba->pcidev); 10945 10946 /* Reset interrupt management states */ 10947 phba->intr_type = NONE; 10948 phba->sli.slistat.sli_intr = 0; 10949 } 10950 10951 /** 10952 * lpfc_unset_hba - Unset SLI3 hba device initialization 10953 * @phba: pointer to lpfc hba data structure. 10954 * 10955 * This routine is invoked to unset the HBA device initialization steps to 10956 * a device with SLI-3 interface spec. 10957 **/ 10958 static void 10959 lpfc_unset_hba(struct lpfc_hba *phba) 10960 { 10961 struct lpfc_vport *vport = phba->pport; 10962 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10963 10964 spin_lock_irq(shost->host_lock); 10965 vport->load_flag |= FC_UNLOADING; 10966 spin_unlock_irq(shost->host_lock); 10967 10968 kfree(phba->vpi_bmask); 10969 kfree(phba->vpi_ids); 10970 10971 lpfc_stop_hba_timers(phba); 10972 10973 phba->pport->work_port_events = 0; 10974 10975 lpfc_sli_hba_down(phba); 10976 10977 lpfc_sli_brdrestart(phba); 10978 10979 lpfc_sli_disable_intr(phba); 10980 10981 return; 10982 } 10983 10984 /** 10985 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 10986 * @phba: Pointer to HBA context object. 10987 * 10988 * This function is called in the SLI4 code path to wait for completion 10989 * of device's XRIs exchange busy. It will check the XRI exchange busy 10990 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 10991 * that, it will check the XRI exchange busy on outstanding FCP and ELS 10992 * I/Os every 30 seconds, log error message, and wait forever. Only when 10993 * all XRI exchange busy complete, the driver unload shall proceed with 10994 * invoking the function reset ioctl mailbox command to the CNA and the 10995 * the rest of the driver unload resource release. 10996 **/ 10997 static void 10998 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 10999 { 11000 struct lpfc_sli4_hdw_queue *qp; 11001 int idx, ccnt, fcnt; 11002 int wait_time = 0; 11003 int io_xri_cmpl = 1; 11004 int nvmet_xri_cmpl = 1; 11005 int fcp_xri_cmpl = 1; 11006 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11007 11008 /* Driver just aborted IOs during the hba_unset process. Pause 11009 * here to give the HBA time to complete the IO and get entries 11010 * into the abts lists. 11011 */ 11012 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11013 11014 /* Wait for NVME pending IO to flush back to transport. */ 11015 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11016 lpfc_nvme_wait_for_io_drain(phba); 11017 11018 ccnt = 0; 11019 fcnt = 0; 11020 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11021 qp = &phba->sli4_hba.hdwq[idx]; 11022 fcp_xri_cmpl = list_empty( 11023 &qp->lpfc_abts_scsi_buf_list); 11024 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11025 fcnt++; 11026 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11027 io_xri_cmpl = list_empty( 11028 &qp->lpfc_abts_nvme_buf_list); 11029 if (!io_xri_cmpl) /* if list is NOT empty */ 11030 ccnt++; 11031 } 11032 } 11033 if (ccnt) 11034 io_xri_cmpl = 0; 11035 if (fcnt) 11036 fcp_xri_cmpl = 0; 11037 11038 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11039 nvmet_xri_cmpl = 11040 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11041 } 11042 11043 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || 11044 !nvmet_xri_cmpl) { 11045 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11046 if (!nvmet_xri_cmpl) 11047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11048 "6424 NVMET XRI exchange busy " 11049 "wait time: %d seconds.\n", 11050 wait_time/1000); 11051 if (!io_xri_cmpl) 11052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11053 "6100 NVME XRI exchange busy " 11054 "wait time: %d seconds.\n", 11055 wait_time/1000); 11056 if (!fcp_xri_cmpl) 11057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11058 "2877 FCP XRI exchange busy " 11059 "wait time: %d seconds.\n", 11060 wait_time/1000); 11061 if (!els_xri_cmpl) 11062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11063 "2878 ELS XRI exchange busy " 11064 "wait time: %d seconds.\n", 11065 wait_time/1000); 11066 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11067 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11068 } else { 11069 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11070 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11071 } 11072 11073 ccnt = 0; 11074 fcnt = 0; 11075 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11076 qp = &phba->sli4_hba.hdwq[idx]; 11077 fcp_xri_cmpl = list_empty( 11078 &qp->lpfc_abts_scsi_buf_list); 11079 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11080 fcnt++; 11081 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11082 io_xri_cmpl = list_empty( 11083 &qp->lpfc_abts_nvme_buf_list); 11084 if (!io_xri_cmpl) /* if list is NOT empty */ 11085 ccnt++; 11086 } 11087 } 11088 if (ccnt) 11089 io_xri_cmpl = 0; 11090 if (fcnt) 11091 fcp_xri_cmpl = 0; 11092 11093 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11094 nvmet_xri_cmpl = list_empty( 11095 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11096 } 11097 els_xri_cmpl = 11098 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11099 11100 } 11101 } 11102 11103 /** 11104 * lpfc_sli4_hba_unset - Unset the fcoe hba 11105 * @phba: Pointer to HBA context object. 11106 * 11107 * This function is called in the SLI4 code path to reset the HBA's FCoE 11108 * function. The caller is not required to hold any lock. This routine 11109 * issues PCI function reset mailbox command to reset the FCoE function. 11110 * At the end of the function, it calls lpfc_hba_down_post function to 11111 * free any pending commands. 11112 **/ 11113 static void 11114 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11115 { 11116 int wait_cnt = 0; 11117 LPFC_MBOXQ_t *mboxq; 11118 struct pci_dev *pdev = phba->pcidev; 11119 11120 lpfc_stop_hba_timers(phba); 11121 if (phba->pport) 11122 phba->sli4_hba.intr_enable = 0; 11123 11124 /* 11125 * Gracefully wait out the potential current outstanding asynchronous 11126 * mailbox command. 11127 */ 11128 11129 /* First, block any pending async mailbox command from posted */ 11130 spin_lock_irq(&phba->hbalock); 11131 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11132 spin_unlock_irq(&phba->hbalock); 11133 /* Now, trying to wait it out if we can */ 11134 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11135 msleep(10); 11136 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11137 break; 11138 } 11139 /* Forcefully release the outstanding mailbox command if timed out */ 11140 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11141 spin_lock_irq(&phba->hbalock); 11142 mboxq = phba->sli.mbox_active; 11143 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11144 __lpfc_mbox_cmpl_put(phba, mboxq); 11145 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11146 phba->sli.mbox_active = NULL; 11147 spin_unlock_irq(&phba->hbalock); 11148 } 11149 11150 /* Abort all iocbs associated with the hba */ 11151 lpfc_sli_hba_iocb_abort(phba); 11152 11153 /* Wait for completion of device XRI exchange busy */ 11154 lpfc_sli4_xri_exchange_busy_wait(phba); 11155 11156 /* Disable PCI subsystem interrupt */ 11157 lpfc_sli4_disable_intr(phba); 11158 11159 /* Disable SR-IOV if enabled */ 11160 if (phba->cfg_sriov_nr_virtfn) 11161 pci_disable_sriov(pdev); 11162 11163 /* Stop kthread signal shall trigger work_done one more time */ 11164 kthread_stop(phba->worker_thread); 11165 11166 /* Disable FW logging to host memory */ 11167 lpfc_ras_stop_fwlog(phba); 11168 11169 /* Unset the queues shared with the hardware then release all 11170 * allocated resources. 11171 */ 11172 lpfc_sli4_queue_unset(phba); 11173 lpfc_sli4_queue_destroy(phba); 11174 11175 /* Reset SLI4 HBA FCoE function */ 11176 lpfc_pci_function_reset(phba); 11177 11178 /* Free RAS DMA memory */ 11179 if (phba->ras_fwlog.ras_enabled) 11180 lpfc_sli4_ras_dma_free(phba); 11181 11182 /* Stop the SLI4 device port */ 11183 if (phba->pport) 11184 phba->pport->work_port_events = 0; 11185 } 11186 11187 /** 11188 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11189 * @phba: Pointer to HBA context object. 11190 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11191 * 11192 * This function is called in the SLI4 code path to read the port's 11193 * sli4 capabilities. 11194 * 11195 * This function may be be called from any context that can block-wait 11196 * for the completion. The expectation is that this routine is called 11197 * typically from probe_one or from the online routine. 11198 **/ 11199 int 11200 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11201 { 11202 int rc; 11203 struct lpfc_mqe *mqe; 11204 struct lpfc_pc_sli4_params *sli4_params; 11205 uint32_t mbox_tmo; 11206 11207 rc = 0; 11208 mqe = &mboxq->u.mqe; 11209 11210 /* Read the port's SLI4 Parameters port capabilities */ 11211 lpfc_pc_sli4_params(mboxq); 11212 if (!phba->sli4_hba.intr_enable) 11213 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11214 else { 11215 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11216 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11217 } 11218 11219 if (unlikely(rc)) 11220 return 1; 11221 11222 sli4_params = &phba->sli4_hba.pc_sli4_params; 11223 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11224 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11225 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11226 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11227 &mqe->un.sli4_params); 11228 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11229 &mqe->un.sli4_params); 11230 sli4_params->proto_types = mqe->un.sli4_params.word3; 11231 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11232 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11233 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11234 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11235 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11236 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11237 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11238 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11239 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11240 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11241 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11242 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11243 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11244 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11245 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11246 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11247 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11248 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11249 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11250 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11251 11252 /* Make sure that sge_supp_len can be handled by the driver */ 11253 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11254 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11255 11256 return rc; 11257 } 11258 11259 /** 11260 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 11261 * @phba: Pointer to HBA context object. 11262 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11263 * 11264 * This function is called in the SLI4 code path to read the port's 11265 * sli4 capabilities. 11266 * 11267 * This function may be be called from any context that can block-wait 11268 * for the completion. The expectation is that this routine is called 11269 * typically from probe_one or from the online routine. 11270 **/ 11271 int 11272 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11273 { 11274 int rc; 11275 struct lpfc_mqe *mqe = &mboxq->u.mqe; 11276 struct lpfc_pc_sli4_params *sli4_params; 11277 uint32_t mbox_tmo; 11278 int length; 11279 bool exp_wqcq_pages = true; 11280 struct lpfc_sli4_parameters *mbx_sli4_parameters; 11281 11282 /* 11283 * By default, the driver assumes the SLI4 port requires RPI 11284 * header postings. The SLI4_PARAM response will correct this 11285 * assumption. 11286 */ 11287 phba->sli4_hba.rpi_hdrs_in_use = 1; 11288 11289 /* Read the port's SLI4 Config Parameters */ 11290 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 11291 sizeof(struct lpfc_sli4_cfg_mhdr)); 11292 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11293 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 11294 length, LPFC_SLI4_MBX_EMBED); 11295 if (!phba->sli4_hba.intr_enable) 11296 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11297 else { 11298 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11299 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11300 } 11301 if (unlikely(rc)) 11302 return rc; 11303 sli4_params = &phba->sli4_hba.pc_sli4_params; 11304 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 11305 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 11306 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 11307 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 11308 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 11309 mbx_sli4_parameters); 11310 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 11311 mbx_sli4_parameters); 11312 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 11313 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 11314 else 11315 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 11316 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 11317 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 11318 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 11319 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 11320 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 11321 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 11322 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 11323 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 11324 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 11325 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 11326 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 11327 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 11328 mbx_sli4_parameters); 11329 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 11330 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 11331 mbx_sli4_parameters); 11332 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 11333 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 11334 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && 11335 bf_get(cfg_xib, mbx_sli4_parameters)); 11336 11337 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || 11338 !phba->nvme_support) { 11339 phba->nvme_support = 0; 11340 phba->nvmet_support = 0; 11341 phba->cfg_nvmet_mrq = 0; 11342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 11343 "6101 Disabling NVME support: " 11344 "Not supported by firmware: %d %d\n", 11345 bf_get(cfg_nvme, mbx_sli4_parameters), 11346 bf_get(cfg_xib, mbx_sli4_parameters)); 11347 11348 /* If firmware doesn't support NVME, just use SCSI support */ 11349 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 11350 return -ENODEV; 11351 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 11352 } 11353 11354 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 11355 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11356 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 11357 phba->cfg_enable_pbde = 0; 11358 11359 /* 11360 * To support Suppress Response feature we must satisfy 3 conditions. 11361 * lpfc_suppress_rsp module parameter must be set (default). 11362 * In SLI4-Parameters Descriptor: 11363 * Extended Inline Buffers (XIB) must be supported. 11364 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 11365 * (double negative). 11366 */ 11367 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 11368 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 11369 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 11370 else 11371 phba->cfg_suppress_rsp = 0; 11372 11373 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 11374 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 11375 11376 /* Make sure that sge_supp_len can be handled by the driver */ 11377 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11378 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11379 11380 /* 11381 * Check whether the adapter supports an embedded copy of the 11382 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 11383 * to use this option, 128-byte WQEs must be used. 11384 */ 11385 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 11386 phba->fcp_embed_io = 1; 11387 else 11388 phba->fcp_embed_io = 0; 11389 11390 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 11391 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 11392 bf_get(cfg_xib, mbx_sli4_parameters), 11393 phba->cfg_enable_pbde, 11394 phba->fcp_embed_io, phba->nvme_support, 11395 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 11396 11397 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 11398 LPFC_SLI_INTF_IF_TYPE_2) && 11399 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 11400 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 11401 exp_wqcq_pages = false; 11402 11403 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 11404 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 11405 exp_wqcq_pages && 11406 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 11407 phba->enab_exp_wqcq_pages = 1; 11408 else 11409 phba->enab_exp_wqcq_pages = 0; 11410 /* 11411 * Check if the SLI port supports MDS Diagnostics 11412 */ 11413 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 11414 phba->mds_diags_support = 1; 11415 else 11416 phba->mds_diags_support = 0; 11417 11418 return 0; 11419 } 11420 11421 /** 11422 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 11423 * @pdev: pointer to PCI device 11424 * @pid: pointer to PCI device identifier 11425 * 11426 * This routine is to be called to attach a device with SLI-3 interface spec 11427 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11428 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11429 * information of the device and driver to see if the driver state that it can 11430 * support this kind of device. If the match is successful, the driver core 11431 * invokes this routine. If this routine determines it can claim the HBA, it 11432 * does all the initialization that it needs to do to handle the HBA properly. 11433 * 11434 * Return code 11435 * 0 - driver can claim the device 11436 * negative value - driver can not claim the device 11437 **/ 11438 static int 11439 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 11440 { 11441 struct lpfc_hba *phba; 11442 struct lpfc_vport *vport = NULL; 11443 struct Scsi_Host *shost = NULL; 11444 int error; 11445 uint32_t cfg_mode, intr_mode; 11446 11447 /* Allocate memory for HBA structure */ 11448 phba = lpfc_hba_alloc(pdev); 11449 if (!phba) 11450 return -ENOMEM; 11451 11452 /* Perform generic PCI device enabling operation */ 11453 error = lpfc_enable_pci_dev(phba); 11454 if (error) 11455 goto out_free_phba; 11456 11457 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 11458 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 11459 if (error) 11460 goto out_disable_pci_dev; 11461 11462 /* Set up SLI-3 specific device PCI memory space */ 11463 error = lpfc_sli_pci_mem_setup(phba); 11464 if (error) { 11465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11466 "1402 Failed to set up pci memory space.\n"); 11467 goto out_disable_pci_dev; 11468 } 11469 11470 /* Set up SLI-3 specific device driver resources */ 11471 error = lpfc_sli_driver_resource_setup(phba); 11472 if (error) { 11473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11474 "1404 Failed to set up driver resource.\n"); 11475 goto out_unset_pci_mem_s3; 11476 } 11477 11478 /* Initialize and populate the iocb list per host */ 11479 11480 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 11481 if (error) { 11482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11483 "1405 Failed to initialize iocb list.\n"); 11484 goto out_unset_driver_resource_s3; 11485 } 11486 11487 /* Set up common device driver resources */ 11488 error = lpfc_setup_driver_resource_phase2(phba); 11489 if (error) { 11490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11491 "1406 Failed to set up driver resource.\n"); 11492 goto out_free_iocb_list; 11493 } 11494 11495 /* Get the default values for Model Name and Description */ 11496 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11497 11498 /* Create SCSI host to the physical port */ 11499 error = lpfc_create_shost(phba); 11500 if (error) { 11501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11502 "1407 Failed to create scsi host.\n"); 11503 goto out_unset_driver_resource; 11504 } 11505 11506 /* Configure sysfs attributes */ 11507 vport = phba->pport; 11508 error = lpfc_alloc_sysfs_attr(vport); 11509 if (error) { 11510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11511 "1476 Failed to allocate sysfs attr\n"); 11512 goto out_destroy_shost; 11513 } 11514 11515 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11516 /* Now, trying to enable interrupt and bring up the device */ 11517 cfg_mode = phba->cfg_use_msi; 11518 while (true) { 11519 /* Put device to a known state before enabling interrupt */ 11520 lpfc_stop_port(phba); 11521 /* Configure and enable interrupt */ 11522 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 11523 if (intr_mode == LPFC_INTR_ERROR) { 11524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11525 "0431 Failed to enable interrupt.\n"); 11526 error = -ENODEV; 11527 goto out_free_sysfs_attr; 11528 } 11529 /* SLI-3 HBA setup */ 11530 if (lpfc_sli_hba_setup(phba)) { 11531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11532 "1477 Failed to set up hba\n"); 11533 error = -ENODEV; 11534 goto out_remove_device; 11535 } 11536 11537 /* Wait 50ms for the interrupts of previous mailbox commands */ 11538 msleep(50); 11539 /* Check active interrupts on message signaled interrupts */ 11540 if (intr_mode == 0 || 11541 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 11542 /* Log the current active interrupt mode */ 11543 phba->intr_mode = intr_mode; 11544 lpfc_log_intr_mode(phba, intr_mode); 11545 break; 11546 } else { 11547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11548 "0447 Configure interrupt mode (%d) " 11549 "failed active interrupt test.\n", 11550 intr_mode); 11551 /* Disable the current interrupt mode */ 11552 lpfc_sli_disable_intr(phba); 11553 /* Try next level of interrupt mode */ 11554 cfg_mode = --intr_mode; 11555 } 11556 } 11557 11558 /* Perform post initialization setup */ 11559 lpfc_post_init_setup(phba); 11560 11561 /* Check if there are static vports to be created. */ 11562 lpfc_create_static_vport(phba); 11563 11564 return 0; 11565 11566 out_remove_device: 11567 lpfc_unset_hba(phba); 11568 out_free_sysfs_attr: 11569 lpfc_free_sysfs_attr(vport); 11570 out_destroy_shost: 11571 lpfc_destroy_shost(phba); 11572 out_unset_driver_resource: 11573 lpfc_unset_driver_resource_phase2(phba); 11574 out_free_iocb_list: 11575 lpfc_free_iocb_list(phba); 11576 out_unset_driver_resource_s3: 11577 lpfc_sli_driver_resource_unset(phba); 11578 out_unset_pci_mem_s3: 11579 lpfc_sli_pci_mem_unset(phba); 11580 out_disable_pci_dev: 11581 lpfc_disable_pci_dev(phba); 11582 if (shost) 11583 scsi_host_put(shost); 11584 out_free_phba: 11585 lpfc_hba_free(phba); 11586 return error; 11587 } 11588 11589 /** 11590 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 11591 * @pdev: pointer to PCI device 11592 * 11593 * This routine is to be called to disattach a device with SLI-3 interface 11594 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11595 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11596 * device to be removed from the PCI subsystem properly. 11597 **/ 11598 static void 11599 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 11600 { 11601 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11602 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11603 struct lpfc_vport **vports; 11604 struct lpfc_hba *phba = vport->phba; 11605 int i; 11606 11607 spin_lock_irq(&phba->hbalock); 11608 vport->load_flag |= FC_UNLOADING; 11609 spin_unlock_irq(&phba->hbalock); 11610 11611 lpfc_free_sysfs_attr(vport); 11612 11613 /* Release all the vports against this physical port */ 11614 vports = lpfc_create_vport_work_array(phba); 11615 if (vports != NULL) 11616 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11617 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11618 continue; 11619 fc_vport_terminate(vports[i]->fc_vport); 11620 } 11621 lpfc_destroy_vport_work_array(phba, vports); 11622 11623 /* Remove FC host and then SCSI host with the physical port */ 11624 fc_remove_host(shost); 11625 scsi_remove_host(shost); 11626 11627 lpfc_cleanup(vport); 11628 11629 /* 11630 * Bring down the SLI Layer. This step disable all interrupts, 11631 * clears the rings, discards all mailbox commands, and resets 11632 * the HBA. 11633 */ 11634 11635 /* HBA interrupt will be disabled after this call */ 11636 lpfc_sli_hba_down(phba); 11637 /* Stop kthread signal shall trigger work_done one more time */ 11638 kthread_stop(phba->worker_thread); 11639 /* Final cleanup of txcmplq and reset the HBA */ 11640 lpfc_sli_brdrestart(phba); 11641 11642 kfree(phba->vpi_bmask); 11643 kfree(phba->vpi_ids); 11644 11645 lpfc_stop_hba_timers(phba); 11646 spin_lock_irq(&phba->port_list_lock); 11647 list_del_init(&vport->listentry); 11648 spin_unlock_irq(&phba->port_list_lock); 11649 11650 lpfc_debugfs_terminate(vport); 11651 11652 /* Disable SR-IOV if enabled */ 11653 if (phba->cfg_sriov_nr_virtfn) 11654 pci_disable_sriov(pdev); 11655 11656 /* Disable interrupt */ 11657 lpfc_sli_disable_intr(phba); 11658 11659 scsi_host_put(shost); 11660 11661 /* 11662 * Call scsi_free before mem_free since scsi bufs are released to their 11663 * corresponding pools here. 11664 */ 11665 lpfc_scsi_free(phba); 11666 lpfc_free_iocb_list(phba); 11667 11668 lpfc_mem_free_all(phba); 11669 11670 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 11671 phba->hbqslimp.virt, phba->hbqslimp.phys); 11672 11673 /* Free resources associated with SLI2 interface */ 11674 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 11675 phba->slim2p.virt, phba->slim2p.phys); 11676 11677 /* unmap adapter SLIM and Control Registers */ 11678 iounmap(phba->ctrl_regs_memmap_p); 11679 iounmap(phba->slim_memmap_p); 11680 11681 lpfc_hba_free(phba); 11682 11683 pci_release_mem_regions(pdev); 11684 pci_disable_device(pdev); 11685 } 11686 11687 /** 11688 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 11689 * @pdev: pointer to PCI device 11690 * @msg: power management message 11691 * 11692 * This routine is to be called from the kernel's PCI subsystem to support 11693 * system Power Management (PM) to device with SLI-3 interface spec. When 11694 * PM invokes this method, it quiesces the device by stopping the driver's 11695 * worker thread for the device, turning off device's interrupt and DMA, 11696 * and bring the device offline. Note that as the driver implements the 11697 * minimum PM requirements to a power-aware driver's PM support for the 11698 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11699 * to the suspend() method call will be treated as SUSPEND and the driver will 11700 * fully reinitialize its device during resume() method call, the driver will 11701 * set device to PCI_D3hot state in PCI config space instead of setting it 11702 * according to the @msg provided by the PM. 11703 * 11704 * Return code 11705 * 0 - driver suspended the device 11706 * Error otherwise 11707 **/ 11708 static int 11709 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 11710 { 11711 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11712 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11713 11714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11715 "0473 PCI device Power Management suspend.\n"); 11716 11717 /* Bring down the device */ 11718 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11719 lpfc_offline(phba); 11720 kthread_stop(phba->worker_thread); 11721 11722 /* Disable interrupt from device */ 11723 lpfc_sli_disable_intr(phba); 11724 11725 /* Save device state to PCI config space */ 11726 pci_save_state(pdev); 11727 pci_set_power_state(pdev, PCI_D3hot); 11728 11729 return 0; 11730 } 11731 11732 /** 11733 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 11734 * @pdev: pointer to PCI device 11735 * 11736 * This routine is to be called from the kernel's PCI subsystem to support 11737 * system Power Management (PM) to device with SLI-3 interface spec. When PM 11738 * invokes this method, it restores the device's PCI config space state and 11739 * fully reinitializes the device and brings it online. Note that as the 11740 * driver implements the minimum PM requirements to a power-aware driver's 11741 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 11742 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 11743 * driver will fully reinitialize its device during resume() method call, 11744 * the device will be set to PCI_D0 directly in PCI config space before 11745 * restoring the state. 11746 * 11747 * Return code 11748 * 0 - driver suspended the device 11749 * Error otherwise 11750 **/ 11751 static int 11752 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 11753 { 11754 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11755 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11756 uint32_t intr_mode; 11757 int error; 11758 11759 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11760 "0452 PCI device Power Management resume.\n"); 11761 11762 /* Restore device state from PCI config space */ 11763 pci_set_power_state(pdev, PCI_D0); 11764 pci_restore_state(pdev); 11765 11766 /* 11767 * As the new kernel behavior of pci_restore_state() API call clears 11768 * device saved_state flag, need to save the restored state again. 11769 */ 11770 pci_save_state(pdev); 11771 11772 if (pdev->is_busmaster) 11773 pci_set_master(pdev); 11774 11775 /* Startup the kernel thread for this host adapter. */ 11776 phba->worker_thread = kthread_run(lpfc_do_work, phba, 11777 "lpfc_worker_%d", phba->brd_no); 11778 if (IS_ERR(phba->worker_thread)) { 11779 error = PTR_ERR(phba->worker_thread); 11780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11781 "0434 PM resume failed to start worker " 11782 "thread: error=x%x.\n", error); 11783 return error; 11784 } 11785 11786 /* Configure and enable interrupt */ 11787 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11788 if (intr_mode == LPFC_INTR_ERROR) { 11789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11790 "0430 PM resume Failed to enable interrupt\n"); 11791 return -EIO; 11792 } else 11793 phba->intr_mode = intr_mode; 11794 11795 /* Restart HBA and bring it online */ 11796 lpfc_sli_brdrestart(phba); 11797 lpfc_online(phba); 11798 11799 /* Log the current active interrupt mode */ 11800 lpfc_log_intr_mode(phba, phba->intr_mode); 11801 11802 return 0; 11803 } 11804 11805 /** 11806 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 11807 * @phba: pointer to lpfc hba data structure. 11808 * 11809 * This routine is called to prepare the SLI3 device for PCI slot recover. It 11810 * aborts all the outstanding SCSI I/Os to the pci device. 11811 **/ 11812 static void 11813 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 11814 { 11815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11816 "2723 PCI channel I/O abort preparing for recovery\n"); 11817 11818 /* 11819 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 11820 * and let the SCSI mid-layer to retry them to recover. 11821 */ 11822 lpfc_sli_abort_fcp_rings(phba); 11823 } 11824 11825 /** 11826 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 11827 * @phba: pointer to lpfc hba data structure. 11828 * 11829 * This routine is called to prepare the SLI3 device for PCI slot reset. It 11830 * disables the device interrupt and pci device, and aborts the internal FCP 11831 * pending I/Os. 11832 **/ 11833 static void 11834 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 11835 { 11836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11837 "2710 PCI channel disable preparing for reset\n"); 11838 11839 /* Block any management I/Os to the device */ 11840 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 11841 11842 /* Block all SCSI devices' I/Os on the host */ 11843 lpfc_scsi_dev_block(phba); 11844 11845 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11846 lpfc_sli_flush_fcp_rings(phba); 11847 11848 /* stop all timers */ 11849 lpfc_stop_hba_timers(phba); 11850 11851 /* Disable interrupt and pci device */ 11852 lpfc_sli_disable_intr(phba); 11853 pci_disable_device(phba->pcidev); 11854 } 11855 11856 /** 11857 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 11858 * @phba: pointer to lpfc hba data structure. 11859 * 11860 * This routine is called to prepare the SLI3 device for PCI slot permanently 11861 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 11862 * pending I/Os. 11863 **/ 11864 static void 11865 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 11866 { 11867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11868 "2711 PCI channel permanent disable for failure\n"); 11869 /* Block all SCSI devices' I/Os on the host */ 11870 lpfc_scsi_dev_block(phba); 11871 11872 /* stop all timers */ 11873 lpfc_stop_hba_timers(phba); 11874 11875 /* Clean up all driver's outstanding SCSI I/Os */ 11876 lpfc_sli_flush_fcp_rings(phba); 11877 } 11878 11879 /** 11880 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 11881 * @pdev: pointer to PCI device. 11882 * @state: the current PCI connection state. 11883 * 11884 * This routine is called from the PCI subsystem for I/O error handling to 11885 * device with SLI-3 interface spec. This function is called by the PCI 11886 * subsystem after a PCI bus error affecting this device has been detected. 11887 * When this function is invoked, it will need to stop all the I/Os and 11888 * interrupt(s) to the device. Once that is done, it will return 11889 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 11890 * as desired. 11891 * 11892 * Return codes 11893 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 11894 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11895 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11896 **/ 11897 static pci_ers_result_t 11898 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 11899 { 11900 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11901 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11902 11903 switch (state) { 11904 case pci_channel_io_normal: 11905 /* Non-fatal error, prepare for recovery */ 11906 lpfc_sli_prep_dev_for_recover(phba); 11907 return PCI_ERS_RESULT_CAN_RECOVER; 11908 case pci_channel_io_frozen: 11909 /* Fatal error, prepare for slot reset */ 11910 lpfc_sli_prep_dev_for_reset(phba); 11911 return PCI_ERS_RESULT_NEED_RESET; 11912 case pci_channel_io_perm_failure: 11913 /* Permanent failure, prepare for device down */ 11914 lpfc_sli_prep_dev_for_perm_failure(phba); 11915 return PCI_ERS_RESULT_DISCONNECT; 11916 default: 11917 /* Unknown state, prepare and request slot reset */ 11918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11919 "0472 Unknown PCI error state: x%x\n", state); 11920 lpfc_sli_prep_dev_for_reset(phba); 11921 return PCI_ERS_RESULT_NEED_RESET; 11922 } 11923 } 11924 11925 /** 11926 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 11927 * @pdev: pointer to PCI device. 11928 * 11929 * This routine is called from the PCI subsystem for error handling to 11930 * device with SLI-3 interface spec. This is called after PCI bus has been 11931 * reset to restart the PCI card from scratch, as if from a cold-boot. 11932 * During the PCI subsystem error recovery, after driver returns 11933 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 11934 * recovery and then call this routine before calling the .resume method 11935 * to recover the device. This function will initialize the HBA device, 11936 * enable the interrupt, but it will just put the HBA to offline state 11937 * without passing any I/O traffic. 11938 * 11939 * Return codes 11940 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11941 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11942 */ 11943 static pci_ers_result_t 11944 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 11945 { 11946 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11947 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11948 struct lpfc_sli *psli = &phba->sli; 11949 uint32_t intr_mode; 11950 11951 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 11952 if (pci_enable_device_mem(pdev)) { 11953 printk(KERN_ERR "lpfc: Cannot re-enable " 11954 "PCI device after reset.\n"); 11955 return PCI_ERS_RESULT_DISCONNECT; 11956 } 11957 11958 pci_restore_state(pdev); 11959 11960 /* 11961 * As the new kernel behavior of pci_restore_state() API call clears 11962 * device saved_state flag, need to save the restored state again. 11963 */ 11964 pci_save_state(pdev); 11965 11966 if (pdev->is_busmaster) 11967 pci_set_master(pdev); 11968 11969 spin_lock_irq(&phba->hbalock); 11970 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 11971 spin_unlock_irq(&phba->hbalock); 11972 11973 /* Configure and enable interrupt */ 11974 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11975 if (intr_mode == LPFC_INTR_ERROR) { 11976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11977 "0427 Cannot re-enable interrupt after " 11978 "slot reset.\n"); 11979 return PCI_ERS_RESULT_DISCONNECT; 11980 } else 11981 phba->intr_mode = intr_mode; 11982 11983 /* Take device offline, it will perform cleanup */ 11984 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11985 lpfc_offline(phba); 11986 lpfc_sli_brdrestart(phba); 11987 11988 /* Log the current active interrupt mode */ 11989 lpfc_log_intr_mode(phba, phba->intr_mode); 11990 11991 return PCI_ERS_RESULT_RECOVERED; 11992 } 11993 11994 /** 11995 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 11996 * @pdev: pointer to PCI device 11997 * 11998 * This routine is called from the PCI subsystem for error handling to device 11999 * with SLI-3 interface spec. It is called when kernel error recovery tells 12000 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12001 * error recovery. After this call, traffic can start to flow from this device 12002 * again. 12003 */ 12004 static void 12005 lpfc_io_resume_s3(struct pci_dev *pdev) 12006 { 12007 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12008 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12009 12010 /* Bring device online, it will be no-op for non-fatal error resume */ 12011 lpfc_online(phba); 12012 } 12013 12014 /** 12015 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12016 * @phba: pointer to lpfc hba data structure. 12017 * 12018 * returns the number of ELS/CT IOCBs to reserve 12019 **/ 12020 int 12021 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12022 { 12023 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12024 12025 if (phba->sli_rev == LPFC_SLI_REV4) { 12026 if (max_xri <= 100) 12027 return 10; 12028 else if (max_xri <= 256) 12029 return 25; 12030 else if (max_xri <= 512) 12031 return 50; 12032 else if (max_xri <= 1024) 12033 return 100; 12034 else if (max_xri <= 1536) 12035 return 150; 12036 else if (max_xri <= 2048) 12037 return 200; 12038 else 12039 return 250; 12040 } else 12041 return 0; 12042 } 12043 12044 /** 12045 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12046 * @phba: pointer to lpfc hba data structure. 12047 * 12048 * returns the number of ELS/CT + NVMET IOCBs to reserve 12049 **/ 12050 int 12051 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12052 { 12053 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12054 12055 if (phba->nvmet_support) 12056 max_xri += LPFC_NVMET_BUF_POST; 12057 return max_xri; 12058 } 12059 12060 12061 static void 12062 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12063 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12064 const struct firmware *fw) 12065 { 12066 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || 12067 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12068 magic_number != MAGIC_NUMER_G6) || 12069 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12070 magic_number != MAGIC_NUMER_G7)) 12071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12072 "3030 This firmware version is not supported on " 12073 "this HBA model. Device:%x Magic:%x Type:%x " 12074 "ID:%x Size %d %zd\n", 12075 phba->pcidev->device, magic_number, ftype, fid, 12076 fsize, fw->size); 12077 else 12078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12079 "3022 FW Download failed. Device:%x Magic:%x Type:%x " 12080 "ID:%x Size %d %zd\n", 12081 phba->pcidev->device, magic_number, ftype, fid, 12082 fsize, fw->size); 12083 } 12084 12085 12086 /** 12087 * lpfc_write_firmware - attempt to write a firmware image to the port 12088 * @fw: pointer to firmware image returned from request_firmware. 12089 * @phba: pointer to lpfc hba data structure. 12090 * 12091 **/ 12092 static void 12093 lpfc_write_firmware(const struct firmware *fw, void *context) 12094 { 12095 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12096 char fwrev[FW_REV_STR_SIZE]; 12097 struct lpfc_grp_hdr *image; 12098 struct list_head dma_buffer_list; 12099 int i, rc = 0; 12100 struct lpfc_dmabuf *dmabuf, *next; 12101 uint32_t offset = 0, temp_offset = 0; 12102 uint32_t magic_number, ftype, fid, fsize; 12103 12104 /* It can be null in no-wait mode, sanity check */ 12105 if (!fw) { 12106 rc = -ENXIO; 12107 goto out; 12108 } 12109 image = (struct lpfc_grp_hdr *)fw->data; 12110 12111 magic_number = be32_to_cpu(image->magic_number); 12112 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12113 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12114 fsize = be32_to_cpu(image->size); 12115 12116 INIT_LIST_HEAD(&dma_buffer_list); 12117 lpfc_decode_firmware_rev(phba, fwrev, 1); 12118 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12120 "3023 Updating Firmware, Current Version:%s " 12121 "New Version:%s\n", 12122 fwrev, image->revision); 12123 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12124 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12125 GFP_KERNEL); 12126 if (!dmabuf) { 12127 rc = -ENOMEM; 12128 goto release_out; 12129 } 12130 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12131 SLI4_PAGE_SIZE, 12132 &dmabuf->phys, 12133 GFP_KERNEL); 12134 if (!dmabuf->virt) { 12135 kfree(dmabuf); 12136 rc = -ENOMEM; 12137 goto release_out; 12138 } 12139 list_add_tail(&dmabuf->list, &dma_buffer_list); 12140 } 12141 while (offset < fw->size) { 12142 temp_offset = offset; 12143 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12144 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12145 memcpy(dmabuf->virt, 12146 fw->data + temp_offset, 12147 fw->size - temp_offset); 12148 temp_offset = fw->size; 12149 break; 12150 } 12151 memcpy(dmabuf->virt, fw->data + temp_offset, 12152 SLI4_PAGE_SIZE); 12153 temp_offset += SLI4_PAGE_SIZE; 12154 } 12155 rc = lpfc_wr_object(phba, &dma_buffer_list, 12156 (fw->size - offset), &offset); 12157 if (rc) { 12158 lpfc_log_write_firmware_error(phba, offset, 12159 magic_number, ftype, fid, fsize, fw); 12160 goto release_out; 12161 } 12162 } 12163 rc = offset; 12164 } else 12165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12166 "3029 Skipped Firmware update, Current " 12167 "Version:%s New Version:%s\n", 12168 fwrev, image->revision); 12169 12170 release_out: 12171 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12172 list_del(&dmabuf->list); 12173 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12174 dmabuf->virt, dmabuf->phys); 12175 kfree(dmabuf); 12176 } 12177 release_firmware(fw); 12178 out: 12179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12180 "3024 Firmware update done: %d.\n", rc); 12181 return; 12182 } 12183 12184 /** 12185 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12186 * @phba: pointer to lpfc hba data structure. 12187 * 12188 * This routine is called to perform Linux generic firmware upgrade on device 12189 * that supports such feature. 12190 **/ 12191 int 12192 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 12193 { 12194 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 12195 int ret; 12196 const struct firmware *fw; 12197 12198 /* Only supported on SLI4 interface type 2 for now */ 12199 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 12200 LPFC_SLI_INTF_IF_TYPE_2) 12201 return -EPERM; 12202 12203 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 12204 12205 if (fw_upgrade == INT_FW_UPGRADE) { 12206 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 12207 file_name, &phba->pcidev->dev, 12208 GFP_KERNEL, (void *)phba, 12209 lpfc_write_firmware); 12210 } else if (fw_upgrade == RUN_FW_UPGRADE) { 12211 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 12212 if (!ret) 12213 lpfc_write_firmware(fw, (void *)phba); 12214 } else { 12215 ret = -EINVAL; 12216 } 12217 12218 return ret; 12219 } 12220 12221 /** 12222 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 12223 * @pdev: pointer to PCI device 12224 * @pid: pointer to PCI device identifier 12225 * 12226 * This routine is called from the kernel's PCI subsystem to device with 12227 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12228 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12229 * information of the device and driver to see if the driver state that it 12230 * can support this kind of device. If the match is successful, the driver 12231 * core invokes this routine. If this routine determines it can claim the HBA, 12232 * it does all the initialization that it needs to do to handle the HBA 12233 * properly. 12234 * 12235 * Return code 12236 * 0 - driver can claim the device 12237 * negative value - driver can not claim the device 12238 **/ 12239 static int 12240 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 12241 { 12242 struct lpfc_hba *phba; 12243 struct lpfc_vport *vport = NULL; 12244 struct Scsi_Host *shost = NULL; 12245 int error; 12246 uint32_t cfg_mode, intr_mode; 12247 12248 /* Allocate memory for HBA structure */ 12249 phba = lpfc_hba_alloc(pdev); 12250 if (!phba) 12251 return -ENOMEM; 12252 12253 /* Perform generic PCI device enabling operation */ 12254 error = lpfc_enable_pci_dev(phba); 12255 if (error) 12256 goto out_free_phba; 12257 12258 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 12259 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 12260 if (error) 12261 goto out_disable_pci_dev; 12262 12263 /* Set up SLI-4 specific device PCI memory space */ 12264 error = lpfc_sli4_pci_mem_setup(phba); 12265 if (error) { 12266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12267 "1410 Failed to set up pci memory space.\n"); 12268 goto out_disable_pci_dev; 12269 } 12270 12271 /* Set up SLI-4 Specific device driver resources */ 12272 error = lpfc_sli4_driver_resource_setup(phba); 12273 if (error) { 12274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12275 "1412 Failed to set up driver resource.\n"); 12276 goto out_unset_pci_mem_s4; 12277 } 12278 12279 INIT_LIST_HEAD(&phba->active_rrq_list); 12280 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 12281 12282 /* Set up common device driver resources */ 12283 error = lpfc_setup_driver_resource_phase2(phba); 12284 if (error) { 12285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12286 "1414 Failed to set up driver resource.\n"); 12287 goto out_unset_driver_resource_s4; 12288 } 12289 12290 /* Get the default values for Model Name and Description */ 12291 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12292 12293 /* Now, trying to enable interrupt and bring up the device */ 12294 cfg_mode = phba->cfg_use_msi; 12295 12296 /* Put device to a known state before enabling interrupt */ 12297 phba->pport = NULL; 12298 lpfc_stop_port(phba); 12299 12300 /* Configure and enable interrupt */ 12301 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 12302 if (intr_mode == LPFC_INTR_ERROR) { 12303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12304 "0426 Failed to enable interrupt.\n"); 12305 error = -ENODEV; 12306 goto out_unset_driver_resource; 12307 } 12308 /* Default to single EQ for non-MSI-X */ 12309 if (phba->intr_type != MSIX) { 12310 phba->cfg_irq_chann = 1; 12311 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12312 if (phba->nvmet_support) 12313 phba->cfg_nvmet_mrq = 1; 12314 } 12315 } 12316 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 12317 12318 /* Create SCSI host to the physical port */ 12319 error = lpfc_create_shost(phba); 12320 if (error) { 12321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12322 "1415 Failed to create scsi host.\n"); 12323 goto out_disable_intr; 12324 } 12325 vport = phba->pport; 12326 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12327 12328 /* Configure sysfs attributes */ 12329 error = lpfc_alloc_sysfs_attr(vport); 12330 if (error) { 12331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12332 "1416 Failed to allocate sysfs attr\n"); 12333 goto out_destroy_shost; 12334 } 12335 12336 /* Set up SLI-4 HBA */ 12337 if (lpfc_sli4_hba_setup(phba)) { 12338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12339 "1421 Failed to set up hba\n"); 12340 error = -ENODEV; 12341 goto out_free_sysfs_attr; 12342 } 12343 12344 /* Log the current active interrupt mode */ 12345 phba->intr_mode = intr_mode; 12346 lpfc_log_intr_mode(phba, intr_mode); 12347 12348 /* Perform post initialization setup */ 12349 lpfc_post_init_setup(phba); 12350 12351 /* NVME support in FW earlier in the driver load corrects the 12352 * FC4 type making a check for nvme_support unnecessary. 12353 */ 12354 if (phba->nvmet_support == 0) { 12355 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12356 /* Create NVME binding with nvme_fc_transport. This 12357 * ensures the vport is initialized. If the localport 12358 * create fails, it should not unload the driver to 12359 * support field issues. 12360 */ 12361 error = lpfc_nvme_create_localport(vport); 12362 if (error) { 12363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12364 "6004 NVME registration " 12365 "failed, error x%x\n", 12366 error); 12367 } 12368 } 12369 } 12370 12371 /* check for firmware upgrade or downgrade */ 12372 if (phba->cfg_request_firmware_upgrade) 12373 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 12374 12375 /* Check if there are static vports to be created. */ 12376 lpfc_create_static_vport(phba); 12377 12378 /* Enable RAS FW log support */ 12379 lpfc_sli4_ras_setup(phba); 12380 12381 return 0; 12382 12383 out_free_sysfs_attr: 12384 lpfc_free_sysfs_attr(vport); 12385 out_destroy_shost: 12386 lpfc_destroy_shost(phba); 12387 out_disable_intr: 12388 lpfc_sli4_disable_intr(phba); 12389 out_unset_driver_resource: 12390 lpfc_unset_driver_resource_phase2(phba); 12391 out_unset_driver_resource_s4: 12392 lpfc_sli4_driver_resource_unset(phba); 12393 out_unset_pci_mem_s4: 12394 lpfc_sli4_pci_mem_unset(phba); 12395 out_disable_pci_dev: 12396 lpfc_disable_pci_dev(phba); 12397 if (shost) 12398 scsi_host_put(shost); 12399 out_free_phba: 12400 lpfc_hba_free(phba); 12401 return error; 12402 } 12403 12404 /** 12405 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 12406 * @pdev: pointer to PCI device 12407 * 12408 * This routine is called from the kernel's PCI subsystem to device with 12409 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12410 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12411 * device to be removed from the PCI subsystem properly. 12412 **/ 12413 static void 12414 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 12415 { 12416 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12417 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12418 struct lpfc_vport **vports; 12419 struct lpfc_hba *phba = vport->phba; 12420 int i; 12421 12422 /* Mark the device unloading flag */ 12423 spin_lock_irq(&phba->hbalock); 12424 vport->load_flag |= FC_UNLOADING; 12425 spin_unlock_irq(&phba->hbalock); 12426 12427 /* Free the HBA sysfs attributes */ 12428 lpfc_free_sysfs_attr(vport); 12429 12430 /* Release all the vports against this physical port */ 12431 vports = lpfc_create_vport_work_array(phba); 12432 if (vports != NULL) 12433 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12434 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12435 continue; 12436 fc_vport_terminate(vports[i]->fc_vport); 12437 } 12438 lpfc_destroy_vport_work_array(phba, vports); 12439 12440 /* Remove FC host and then SCSI host with the physical port */ 12441 fc_remove_host(shost); 12442 scsi_remove_host(shost); 12443 12444 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 12445 * localports are destroyed after to cleanup all transport memory. 12446 */ 12447 lpfc_cleanup(vport); 12448 lpfc_nvmet_destroy_targetport(phba); 12449 lpfc_nvme_destroy_localport(vport); 12450 12451 /* De-allocate multi-XRI pools */ 12452 if (phba->cfg_xri_rebalancing) 12453 lpfc_destroy_multixri_pools(phba); 12454 12455 /* 12456 * Bring down the SLI Layer. This step disables all interrupts, 12457 * clears the rings, discards all mailbox commands, and resets 12458 * the HBA FCoE function. 12459 */ 12460 lpfc_debugfs_terminate(vport); 12461 12462 lpfc_stop_hba_timers(phba); 12463 spin_lock_irq(&phba->port_list_lock); 12464 list_del_init(&vport->listentry); 12465 spin_unlock_irq(&phba->port_list_lock); 12466 12467 /* Perform scsi free before driver resource_unset since scsi 12468 * buffers are released to their corresponding pools here. 12469 */ 12470 lpfc_io_free(phba); 12471 lpfc_free_iocb_list(phba); 12472 lpfc_sli4_hba_unset(phba); 12473 12474 lpfc_unset_driver_resource_phase2(phba); 12475 lpfc_sli4_driver_resource_unset(phba); 12476 12477 /* Unmap adapter Control and Doorbell registers */ 12478 lpfc_sli4_pci_mem_unset(phba); 12479 12480 /* Release PCI resources and disable device's PCI function */ 12481 scsi_host_put(shost); 12482 lpfc_disable_pci_dev(phba); 12483 12484 /* Finally, free the driver's device data structure */ 12485 lpfc_hba_free(phba); 12486 12487 return; 12488 } 12489 12490 /** 12491 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 12492 * @pdev: pointer to PCI device 12493 * @msg: power management message 12494 * 12495 * This routine is called from the kernel's PCI subsystem to support system 12496 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 12497 * this method, it quiesces the device by stopping the driver's worker 12498 * thread for the device, turning off device's interrupt and DMA, and bring 12499 * the device offline. Note that as the driver implements the minimum PM 12500 * requirements to a power-aware driver's PM support for suspend/resume -- all 12501 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 12502 * method call will be treated as SUSPEND and the driver will fully 12503 * reinitialize its device during resume() method call, the driver will set 12504 * device to PCI_D3hot state in PCI config space instead of setting it 12505 * according to the @msg provided by the PM. 12506 * 12507 * Return code 12508 * 0 - driver suspended the device 12509 * Error otherwise 12510 **/ 12511 static int 12512 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 12513 { 12514 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12515 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12516 12517 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12518 "2843 PCI device Power Management suspend.\n"); 12519 12520 /* Bring down the device */ 12521 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12522 lpfc_offline(phba); 12523 kthread_stop(phba->worker_thread); 12524 12525 /* Disable interrupt from device */ 12526 lpfc_sli4_disable_intr(phba); 12527 lpfc_sli4_queue_destroy(phba); 12528 12529 /* Save device state to PCI config space */ 12530 pci_save_state(pdev); 12531 pci_set_power_state(pdev, PCI_D3hot); 12532 12533 return 0; 12534 } 12535 12536 /** 12537 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 12538 * @pdev: pointer to PCI device 12539 * 12540 * This routine is called from the kernel's PCI subsystem to support system 12541 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 12542 * this method, it restores the device's PCI config space state and fully 12543 * reinitializes the device and brings it online. Note that as the driver 12544 * implements the minimum PM requirements to a power-aware driver's PM for 12545 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12546 * to the suspend() method call will be treated as SUSPEND and the driver 12547 * will fully reinitialize its device during resume() method call, the device 12548 * will be set to PCI_D0 directly in PCI config space before restoring the 12549 * state. 12550 * 12551 * Return code 12552 * 0 - driver suspended the device 12553 * Error otherwise 12554 **/ 12555 static int 12556 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 12557 { 12558 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12559 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12560 uint32_t intr_mode; 12561 int error; 12562 12563 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12564 "0292 PCI device Power Management resume.\n"); 12565 12566 /* Restore device state from PCI config space */ 12567 pci_set_power_state(pdev, PCI_D0); 12568 pci_restore_state(pdev); 12569 12570 /* 12571 * As the new kernel behavior of pci_restore_state() API call clears 12572 * device saved_state flag, need to save the restored state again. 12573 */ 12574 pci_save_state(pdev); 12575 12576 if (pdev->is_busmaster) 12577 pci_set_master(pdev); 12578 12579 /* Startup the kernel thread for this host adapter. */ 12580 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12581 "lpfc_worker_%d", phba->brd_no); 12582 if (IS_ERR(phba->worker_thread)) { 12583 error = PTR_ERR(phba->worker_thread); 12584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12585 "0293 PM resume failed to start worker " 12586 "thread: error=x%x.\n", error); 12587 return error; 12588 } 12589 12590 /* Configure and enable interrupt */ 12591 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12592 if (intr_mode == LPFC_INTR_ERROR) { 12593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12594 "0294 PM resume Failed to enable interrupt\n"); 12595 return -EIO; 12596 } else 12597 phba->intr_mode = intr_mode; 12598 12599 /* Restart HBA and bring it online */ 12600 lpfc_sli_brdrestart(phba); 12601 lpfc_online(phba); 12602 12603 /* Log the current active interrupt mode */ 12604 lpfc_log_intr_mode(phba, phba->intr_mode); 12605 12606 return 0; 12607 } 12608 12609 /** 12610 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 12611 * @phba: pointer to lpfc hba data structure. 12612 * 12613 * This routine is called to prepare the SLI4 device for PCI slot recover. It 12614 * aborts all the outstanding SCSI I/Os to the pci device. 12615 **/ 12616 static void 12617 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 12618 { 12619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12620 "2828 PCI channel I/O abort preparing for recovery\n"); 12621 /* 12622 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12623 * and let the SCSI mid-layer to retry them to recover. 12624 */ 12625 lpfc_sli_abort_fcp_rings(phba); 12626 } 12627 12628 /** 12629 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 12630 * @phba: pointer to lpfc hba data structure. 12631 * 12632 * This routine is called to prepare the SLI4 device for PCI slot reset. It 12633 * disables the device interrupt and pci device, and aborts the internal FCP 12634 * pending I/Os. 12635 **/ 12636 static void 12637 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 12638 { 12639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12640 "2826 PCI channel disable preparing for reset\n"); 12641 12642 /* Block any management I/Os to the device */ 12643 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 12644 12645 /* Block all SCSI devices' I/Os on the host */ 12646 lpfc_scsi_dev_block(phba); 12647 12648 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12649 lpfc_sli_flush_fcp_rings(phba); 12650 12651 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12652 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12653 lpfc_sli_flush_nvme_rings(phba); 12654 12655 /* stop all timers */ 12656 lpfc_stop_hba_timers(phba); 12657 12658 /* Disable interrupt and pci device */ 12659 lpfc_sli4_disable_intr(phba); 12660 lpfc_sli4_queue_destroy(phba); 12661 pci_disable_device(phba->pcidev); 12662 } 12663 12664 /** 12665 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 12666 * @phba: pointer to lpfc hba data structure. 12667 * 12668 * This routine is called to prepare the SLI4 device for PCI slot permanently 12669 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12670 * pending I/Os. 12671 **/ 12672 static void 12673 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12674 { 12675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12676 "2827 PCI channel permanent disable for failure\n"); 12677 12678 /* Block all SCSI devices' I/Os on the host */ 12679 lpfc_scsi_dev_block(phba); 12680 12681 /* stop all timers */ 12682 lpfc_stop_hba_timers(phba); 12683 12684 /* Clean up all driver's outstanding SCSI I/Os */ 12685 lpfc_sli_flush_fcp_rings(phba); 12686 12687 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12688 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12689 lpfc_sli_flush_nvme_rings(phba); 12690 } 12691 12692 /** 12693 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 12694 * @pdev: pointer to PCI device. 12695 * @state: the current PCI connection state. 12696 * 12697 * This routine is called from the PCI subsystem for error handling to device 12698 * with SLI-4 interface spec. This function is called by the PCI subsystem 12699 * after a PCI bus error affecting this device has been detected. When this 12700 * function is invoked, it will need to stop all the I/Os and interrupt(s) 12701 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 12702 * for the PCI subsystem to perform proper recovery as desired. 12703 * 12704 * Return codes 12705 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12706 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12707 **/ 12708 static pci_ers_result_t 12709 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 12710 { 12711 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12712 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12713 12714 switch (state) { 12715 case pci_channel_io_normal: 12716 /* Non-fatal error, prepare for recovery */ 12717 lpfc_sli4_prep_dev_for_recover(phba); 12718 return PCI_ERS_RESULT_CAN_RECOVER; 12719 case pci_channel_io_frozen: 12720 /* Fatal error, prepare for slot reset */ 12721 lpfc_sli4_prep_dev_for_reset(phba); 12722 return PCI_ERS_RESULT_NEED_RESET; 12723 case pci_channel_io_perm_failure: 12724 /* Permanent failure, prepare for device down */ 12725 lpfc_sli4_prep_dev_for_perm_failure(phba); 12726 return PCI_ERS_RESULT_DISCONNECT; 12727 default: 12728 /* Unknown state, prepare and request slot reset */ 12729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12730 "2825 Unknown PCI error state: x%x\n", state); 12731 lpfc_sli4_prep_dev_for_reset(phba); 12732 return PCI_ERS_RESULT_NEED_RESET; 12733 } 12734 } 12735 12736 /** 12737 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 12738 * @pdev: pointer to PCI device. 12739 * 12740 * This routine is called from the PCI subsystem for error handling to device 12741 * with SLI-4 interface spec. It is called after PCI bus has been reset to 12742 * restart the PCI card from scratch, as if from a cold-boot. During the 12743 * PCI subsystem error recovery, after the driver returns 12744 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12745 * recovery and then call this routine before calling the .resume method to 12746 * recover the device. This function will initialize the HBA device, enable 12747 * the interrupt, but it will just put the HBA to offline state without 12748 * passing any I/O traffic. 12749 * 12750 * Return codes 12751 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12752 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12753 */ 12754 static pci_ers_result_t 12755 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 12756 { 12757 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12758 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12759 struct lpfc_sli *psli = &phba->sli; 12760 uint32_t intr_mode; 12761 12762 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12763 if (pci_enable_device_mem(pdev)) { 12764 printk(KERN_ERR "lpfc: Cannot re-enable " 12765 "PCI device after reset.\n"); 12766 return PCI_ERS_RESULT_DISCONNECT; 12767 } 12768 12769 pci_restore_state(pdev); 12770 12771 /* 12772 * As the new kernel behavior of pci_restore_state() API call clears 12773 * device saved_state flag, need to save the restored state again. 12774 */ 12775 pci_save_state(pdev); 12776 12777 if (pdev->is_busmaster) 12778 pci_set_master(pdev); 12779 12780 spin_lock_irq(&phba->hbalock); 12781 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12782 spin_unlock_irq(&phba->hbalock); 12783 12784 /* Configure and enable interrupt */ 12785 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12786 if (intr_mode == LPFC_INTR_ERROR) { 12787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12788 "2824 Cannot re-enable interrupt after " 12789 "slot reset.\n"); 12790 return PCI_ERS_RESULT_DISCONNECT; 12791 } else 12792 phba->intr_mode = intr_mode; 12793 12794 /* Log the current active interrupt mode */ 12795 lpfc_log_intr_mode(phba, phba->intr_mode); 12796 12797 return PCI_ERS_RESULT_RECOVERED; 12798 } 12799 12800 /** 12801 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 12802 * @pdev: pointer to PCI device 12803 * 12804 * This routine is called from the PCI subsystem for error handling to device 12805 * with SLI-4 interface spec. It is called when kernel error recovery tells 12806 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12807 * error recovery. After this call, traffic can start to flow from this device 12808 * again. 12809 **/ 12810 static void 12811 lpfc_io_resume_s4(struct pci_dev *pdev) 12812 { 12813 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12814 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12815 12816 /* 12817 * In case of slot reset, as function reset is performed through 12818 * mailbox command which needs DMA to be enabled, this operation 12819 * has to be moved to the io resume phase. Taking device offline 12820 * will perform the necessary cleanup. 12821 */ 12822 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 12823 /* Perform device reset */ 12824 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12825 lpfc_offline(phba); 12826 lpfc_sli_brdrestart(phba); 12827 /* Bring the device back online */ 12828 lpfc_online(phba); 12829 } 12830 } 12831 12832 /** 12833 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 12834 * @pdev: pointer to PCI device 12835 * @pid: pointer to PCI device identifier 12836 * 12837 * This routine is to be registered to the kernel's PCI subsystem. When an 12838 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 12839 * at PCI device-specific information of the device and driver to see if the 12840 * driver state that it can support this kind of device. If the match is 12841 * successful, the driver core invokes this routine. This routine dispatches 12842 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 12843 * do all the initialization that it needs to do to handle the HBA device 12844 * properly. 12845 * 12846 * Return code 12847 * 0 - driver can claim the device 12848 * negative value - driver can not claim the device 12849 **/ 12850 static int 12851 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 12852 { 12853 int rc; 12854 struct lpfc_sli_intf intf; 12855 12856 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 12857 return -ENODEV; 12858 12859 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 12860 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 12861 rc = lpfc_pci_probe_one_s4(pdev, pid); 12862 else 12863 rc = lpfc_pci_probe_one_s3(pdev, pid); 12864 12865 return rc; 12866 } 12867 12868 /** 12869 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 12870 * @pdev: pointer to PCI device 12871 * 12872 * This routine is to be registered to the kernel's PCI subsystem. When an 12873 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 12874 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 12875 * remove routine, which will perform all the necessary cleanup for the 12876 * device to be removed from the PCI subsystem properly. 12877 **/ 12878 static void 12879 lpfc_pci_remove_one(struct pci_dev *pdev) 12880 { 12881 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12882 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12883 12884 switch (phba->pci_dev_grp) { 12885 case LPFC_PCI_DEV_LP: 12886 lpfc_pci_remove_one_s3(pdev); 12887 break; 12888 case LPFC_PCI_DEV_OC: 12889 lpfc_pci_remove_one_s4(pdev); 12890 break; 12891 default: 12892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12893 "1424 Invalid PCI device group: 0x%x\n", 12894 phba->pci_dev_grp); 12895 break; 12896 } 12897 return; 12898 } 12899 12900 /** 12901 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 12902 * @pdev: pointer to PCI device 12903 * @msg: power management message 12904 * 12905 * This routine is to be registered to the kernel's PCI subsystem to support 12906 * system Power Management (PM). When PM invokes this method, it dispatches 12907 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 12908 * suspend the device. 12909 * 12910 * Return code 12911 * 0 - driver suspended the device 12912 * Error otherwise 12913 **/ 12914 static int 12915 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 12916 { 12917 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12918 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12919 int rc = -ENODEV; 12920 12921 switch (phba->pci_dev_grp) { 12922 case LPFC_PCI_DEV_LP: 12923 rc = lpfc_pci_suspend_one_s3(pdev, msg); 12924 break; 12925 case LPFC_PCI_DEV_OC: 12926 rc = lpfc_pci_suspend_one_s4(pdev, msg); 12927 break; 12928 default: 12929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12930 "1425 Invalid PCI device group: 0x%x\n", 12931 phba->pci_dev_grp); 12932 break; 12933 } 12934 return rc; 12935 } 12936 12937 /** 12938 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 12939 * @pdev: pointer to PCI device 12940 * 12941 * This routine is to be registered to the kernel's PCI subsystem to support 12942 * system Power Management (PM). When PM invokes this method, it dispatches 12943 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 12944 * resume the device. 12945 * 12946 * Return code 12947 * 0 - driver suspended the device 12948 * Error otherwise 12949 **/ 12950 static int 12951 lpfc_pci_resume_one(struct pci_dev *pdev) 12952 { 12953 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12954 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12955 int rc = -ENODEV; 12956 12957 switch (phba->pci_dev_grp) { 12958 case LPFC_PCI_DEV_LP: 12959 rc = lpfc_pci_resume_one_s3(pdev); 12960 break; 12961 case LPFC_PCI_DEV_OC: 12962 rc = lpfc_pci_resume_one_s4(pdev); 12963 break; 12964 default: 12965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12966 "1426 Invalid PCI device group: 0x%x\n", 12967 phba->pci_dev_grp); 12968 break; 12969 } 12970 return rc; 12971 } 12972 12973 /** 12974 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 12975 * @pdev: pointer to PCI device. 12976 * @state: the current PCI connection state. 12977 * 12978 * This routine is registered to the PCI subsystem for error handling. This 12979 * function is called by the PCI subsystem after a PCI bus error affecting 12980 * this device has been detected. When this routine is invoked, it dispatches 12981 * the action to the proper SLI-3 or SLI-4 device error detected handling 12982 * routine, which will perform the proper error detected operation. 12983 * 12984 * Return codes 12985 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12986 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12987 **/ 12988 static pci_ers_result_t 12989 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12990 { 12991 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12992 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12993 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12994 12995 switch (phba->pci_dev_grp) { 12996 case LPFC_PCI_DEV_LP: 12997 rc = lpfc_io_error_detected_s3(pdev, state); 12998 break; 12999 case LPFC_PCI_DEV_OC: 13000 rc = lpfc_io_error_detected_s4(pdev, state); 13001 break; 13002 default: 13003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13004 "1427 Invalid PCI device group: 0x%x\n", 13005 phba->pci_dev_grp); 13006 break; 13007 } 13008 return rc; 13009 } 13010 13011 /** 13012 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13013 * @pdev: pointer to PCI device. 13014 * 13015 * This routine is registered to the PCI subsystem for error handling. This 13016 * function is called after PCI bus has been reset to restart the PCI card 13017 * from scratch, as if from a cold-boot. When this routine is invoked, it 13018 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13019 * routine, which will perform the proper device reset. 13020 * 13021 * Return codes 13022 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13023 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13024 **/ 13025 static pci_ers_result_t 13026 lpfc_io_slot_reset(struct pci_dev *pdev) 13027 { 13028 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13029 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13030 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13031 13032 switch (phba->pci_dev_grp) { 13033 case LPFC_PCI_DEV_LP: 13034 rc = lpfc_io_slot_reset_s3(pdev); 13035 break; 13036 case LPFC_PCI_DEV_OC: 13037 rc = lpfc_io_slot_reset_s4(pdev); 13038 break; 13039 default: 13040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13041 "1428 Invalid PCI device group: 0x%x\n", 13042 phba->pci_dev_grp); 13043 break; 13044 } 13045 return rc; 13046 } 13047 13048 /** 13049 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13050 * @pdev: pointer to PCI device 13051 * 13052 * This routine is registered to the PCI subsystem for error handling. It 13053 * is called when kernel error recovery tells the lpfc driver that it is 13054 * OK to resume normal PCI operation after PCI bus error recovery. When 13055 * this routine is invoked, it dispatches the action to the proper SLI-3 13056 * or SLI-4 device io_resume routine, which will resume the device operation. 13057 **/ 13058 static void 13059 lpfc_io_resume(struct pci_dev *pdev) 13060 { 13061 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13062 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13063 13064 switch (phba->pci_dev_grp) { 13065 case LPFC_PCI_DEV_LP: 13066 lpfc_io_resume_s3(pdev); 13067 break; 13068 case LPFC_PCI_DEV_OC: 13069 lpfc_io_resume_s4(pdev); 13070 break; 13071 default: 13072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13073 "1429 Invalid PCI device group: 0x%x\n", 13074 phba->pci_dev_grp); 13075 break; 13076 } 13077 return; 13078 } 13079 13080 /** 13081 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13082 * @phba: pointer to lpfc hba data structure. 13083 * 13084 * This routine checks to see if OAS is supported for this adapter. If 13085 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13086 * the enable oas flag is cleared and the pool created for OAS device data 13087 * is destroyed. 13088 * 13089 **/ 13090 static void 13091 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13092 { 13093 13094 if (!phba->cfg_EnableXLane) 13095 return; 13096 13097 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13098 phba->cfg_fof = 1; 13099 } else { 13100 phba->cfg_fof = 0; 13101 if (phba->device_data_mem_pool) 13102 mempool_destroy(phba->device_data_mem_pool); 13103 phba->device_data_mem_pool = NULL; 13104 } 13105 13106 return; 13107 } 13108 13109 /** 13110 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13111 * @phba: pointer to lpfc hba data structure. 13112 * 13113 * This routine checks to see if RAS is supported by the adapter. Check the 13114 * function through which RAS support enablement is to be done. 13115 **/ 13116 void 13117 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13118 { 13119 switch (phba->pcidev->device) { 13120 case PCI_DEVICE_ID_LANCER_G6_FC: 13121 case PCI_DEVICE_ID_LANCER_G7_FC: 13122 phba->ras_fwlog.ras_hwsupport = true; 13123 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13124 phba->cfg_ras_fwlog_buffsize) 13125 phba->ras_fwlog.ras_enabled = true; 13126 else 13127 phba->ras_fwlog.ras_enabled = false; 13128 break; 13129 default: 13130 phba->ras_fwlog.ras_hwsupport = false; 13131 } 13132 } 13133 13134 13135 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13136 13137 static const struct pci_error_handlers lpfc_err_handler = { 13138 .error_detected = lpfc_io_error_detected, 13139 .slot_reset = lpfc_io_slot_reset, 13140 .resume = lpfc_io_resume, 13141 }; 13142 13143 static struct pci_driver lpfc_driver = { 13144 .name = LPFC_DRIVER_NAME, 13145 .id_table = lpfc_id_table, 13146 .probe = lpfc_pci_probe_one, 13147 .remove = lpfc_pci_remove_one, 13148 .shutdown = lpfc_pci_remove_one, 13149 .suspend = lpfc_pci_suspend_one, 13150 .resume = lpfc_pci_resume_one, 13151 .err_handler = &lpfc_err_handler, 13152 }; 13153 13154 static const struct file_operations lpfc_mgmt_fop = { 13155 .owner = THIS_MODULE, 13156 }; 13157 13158 static struct miscdevice lpfc_mgmt_dev = { 13159 .minor = MISC_DYNAMIC_MINOR, 13160 .name = "lpfcmgmt", 13161 .fops = &lpfc_mgmt_fop, 13162 }; 13163 13164 /** 13165 * lpfc_init - lpfc module initialization routine 13166 * 13167 * This routine is to be invoked when the lpfc module is loaded into the 13168 * kernel. The special kernel macro module_init() is used to indicate the 13169 * role of this routine to the kernel as lpfc module entry point. 13170 * 13171 * Return codes 13172 * 0 - successful 13173 * -ENOMEM - FC attach transport failed 13174 * all others - failed 13175 */ 13176 static int __init 13177 lpfc_init(void) 13178 { 13179 int error = 0; 13180 13181 printk(LPFC_MODULE_DESC "\n"); 13182 printk(LPFC_COPYRIGHT "\n"); 13183 13184 error = misc_register(&lpfc_mgmt_dev); 13185 if (error) 13186 printk(KERN_ERR "Could not register lpfcmgmt device, " 13187 "misc_register returned with status %d", error); 13188 13189 lpfc_transport_functions.vport_create = lpfc_vport_create; 13190 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 13191 lpfc_transport_template = 13192 fc_attach_transport(&lpfc_transport_functions); 13193 if (lpfc_transport_template == NULL) 13194 return -ENOMEM; 13195 lpfc_vport_transport_template = 13196 fc_attach_transport(&lpfc_vport_transport_functions); 13197 if (lpfc_vport_transport_template == NULL) { 13198 fc_release_transport(lpfc_transport_template); 13199 return -ENOMEM; 13200 } 13201 lpfc_nvme_cmd_template(); 13202 lpfc_nvmet_cmd_template(); 13203 13204 /* Initialize in case vector mapping is needed */ 13205 lpfc_present_cpu = num_present_cpus(); 13206 13207 error = pci_register_driver(&lpfc_driver); 13208 if (error) { 13209 fc_release_transport(lpfc_transport_template); 13210 fc_release_transport(lpfc_vport_transport_template); 13211 } 13212 13213 return error; 13214 } 13215 13216 /** 13217 * lpfc_exit - lpfc module removal routine 13218 * 13219 * This routine is invoked when the lpfc module is removed from the kernel. 13220 * The special kernel macro module_exit() is used to indicate the role of 13221 * this routine to the kernel as lpfc module exit point. 13222 */ 13223 static void __exit 13224 lpfc_exit(void) 13225 { 13226 misc_deregister(&lpfc_mgmt_dev); 13227 pci_unregister_driver(&lpfc_driver); 13228 fc_release_transport(lpfc_transport_template); 13229 fc_release_transport(lpfc_vport_transport_template); 13230 if (_dump_buf_data) { 13231 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 13232 "_dump_buf_data at 0x%p\n", 13233 (1L << _dump_buf_data_order), _dump_buf_data); 13234 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 13235 } 13236 13237 if (_dump_buf_dif) { 13238 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 13239 "_dump_buf_dif at 0x%p\n", 13240 (1L << _dump_buf_dif_order), _dump_buf_dif); 13241 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 13242 } 13243 idr_destroy(&lpfc_hba_index); 13244 } 13245 13246 module_init(lpfc_init); 13247 module_exit(lpfc_exit); 13248 MODULE_LICENSE("GPL"); 13249 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 13250 MODULE_AUTHOR("Broadcom"); 13251 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 13252