1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 43 #include <scsi/scsi.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_transport_fc.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/fc/fc_fs.h> 49 50 #include <linux/nvme-fc-driver.h> 51 52 #include "lpfc_hw4.h" 53 #include "lpfc_hw.h" 54 #include "lpfc_sli.h" 55 #include "lpfc_sli4.h" 56 #include "lpfc_nl.h" 57 #include "lpfc_disc.h" 58 #include "lpfc.h" 59 #include "lpfc_scsi.h" 60 #include "lpfc_nvme.h" 61 #include "lpfc_nvmet.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 char *_dump_buf_data; 69 unsigned long _dump_buf_data_order; 70 char *_dump_buf_dif; 71 unsigned long _dump_buf_dif_order; 72 spinlock_t _dump_buf_lock; 73 74 /* Used when mapping IRQ vectors in a driver centric manner */ 75 uint32_t lpfc_present_cpu; 76 77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 78 static int lpfc_post_rcv_buf(struct lpfc_hba *); 79 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 81 static int lpfc_setup_endian_order(struct lpfc_hba *); 82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 83 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 85 static void lpfc_init_sgl_list(struct lpfc_hba *); 86 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 87 static void lpfc_free_active_sgl(struct lpfc_hba *); 88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 93 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 96 static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t); 97 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 104 /** 105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 106 * @phba: pointer to lpfc hba data structure. 107 * 108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 109 * mailbox command. It retrieves the revision information from the HBA and 110 * collects the Vital Product Data (VPD) about the HBA for preparing the 111 * configuration of the HBA. 112 * 113 * Return codes: 114 * 0 - success. 115 * -ERESTART - requests the SLI layer to reset the HBA and try again. 116 * Any other value - indicates an error. 117 **/ 118 int 119 lpfc_config_port_prep(struct lpfc_hba *phba) 120 { 121 lpfc_vpd_t *vp = &phba->vpd; 122 int i = 0, rc; 123 LPFC_MBOXQ_t *pmb; 124 MAILBOX_t *mb; 125 char *lpfc_vpd_data = NULL; 126 uint16_t offset = 0; 127 static char licensed[56] = 128 "key unlock for use with gnu public licensed code only\0"; 129 static int init_key = 1; 130 131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 132 if (!pmb) { 133 phba->link_state = LPFC_HBA_ERROR; 134 return -ENOMEM; 135 } 136 137 mb = &pmb->u.mb; 138 phba->link_state = LPFC_INIT_MBX_CMDS; 139 140 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 141 if (init_key) { 142 uint32_t *ptext = (uint32_t *) licensed; 143 144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 145 *ptext = cpu_to_be32(*ptext); 146 init_key = 0; 147 } 148 149 lpfc_read_nv(phba, pmb); 150 memset((char*)mb->un.varRDnvp.rsvd3, 0, 151 sizeof (mb->un.varRDnvp.rsvd3)); 152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 153 sizeof (licensed)); 154 155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 156 157 if (rc != MBX_SUCCESS) { 158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 159 "0324 Config Port initialization " 160 "error, mbxCmd x%x READ_NVPARM, " 161 "mbxStatus x%x\n", 162 mb->mbxCommand, mb->mbxStatus); 163 mempool_free(pmb, phba->mbox_mem_pool); 164 return -ERESTART; 165 } 166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 167 sizeof(phba->wwnn)); 168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 169 sizeof(phba->wwpn)); 170 } 171 172 /* 173 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 174 * which was already set in lpfc_get_cfgparam() 175 */ 176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 177 178 /* Setup and issue mailbox READ REV command */ 179 lpfc_read_rev(phba, pmb); 180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 181 if (rc != MBX_SUCCESS) { 182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 183 "0439 Adapter failed to init, mbxCmd x%x " 184 "READ_REV, mbxStatus x%x\n", 185 mb->mbxCommand, mb->mbxStatus); 186 mempool_free( pmb, phba->mbox_mem_pool); 187 return -ERESTART; 188 } 189 190 191 /* 192 * The value of rr must be 1 since the driver set the cv field to 1. 193 * This setting requires the FW to set all revision fields. 194 */ 195 if (mb->un.varRdRev.rr == 0) { 196 vp->rev.rBit = 0; 197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 198 "0440 Adapter failed to init, READ_REV has " 199 "missing revision information.\n"); 200 mempool_free(pmb, phba->mbox_mem_pool); 201 return -ERESTART; 202 } 203 204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 205 mempool_free(pmb, phba->mbox_mem_pool); 206 return -EINVAL; 207 } 208 209 /* Save information as VPD data */ 210 vp->rev.rBit = 1; 211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 216 vp->rev.biuRev = mb->un.varRdRev.biuRev; 217 vp->rev.smRev = mb->un.varRdRev.smRev; 218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 219 vp->rev.endecRev = mb->un.varRdRev.endecRev; 220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 226 227 /* If the sli feature level is less then 9, we must 228 * tear down all RPIs and VPIs on link down if NPIV 229 * is enabled. 230 */ 231 if (vp->rev.feaLevelHigh < 9) 232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 233 234 if (lpfc_is_LC_HBA(phba->pcidev->device)) 235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 236 sizeof (phba->RandomData)); 237 238 /* Get adapter VPD information */ 239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 240 if (!lpfc_vpd_data) 241 goto out_free_mbox; 242 do { 243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 245 246 if (rc != MBX_SUCCESS) { 247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 248 "0441 VPD not present on adapter, " 249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 250 mb->mbxCommand, mb->mbxStatus); 251 mb->un.varDmp.word_cnt = 0; 252 } 253 /* dump mem may return a zero when finished or we got a 254 * mailbox error, either way we are done. 255 */ 256 if (mb->un.varDmp.word_cnt == 0) 257 break; 258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 261 lpfc_vpd_data + offset, 262 mb->un.varDmp.word_cnt); 263 offset += mb->un.varDmp.word_cnt; 264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 266 267 kfree(lpfc_vpd_data); 268 out_free_mbox: 269 mempool_free(pmb, phba->mbox_mem_pool); 270 return 0; 271 } 272 273 /** 274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 275 * @phba: pointer to lpfc hba data structure. 276 * @pmboxq: pointer to the driver internal queue element for mailbox command. 277 * 278 * This is the completion handler for driver's configuring asynchronous event 279 * mailbox command to the device. If the mailbox command returns successfully, 280 * it will set internal async event support flag to 1; otherwise, it will 281 * set internal async event support flag to 0. 282 **/ 283 static void 284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 285 { 286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 287 phba->temp_sensor_support = 1; 288 else 289 phba->temp_sensor_support = 0; 290 mempool_free(pmboxq, phba->mbox_mem_pool); 291 return; 292 } 293 294 /** 295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 296 * @phba: pointer to lpfc hba data structure. 297 * @pmboxq: pointer to the driver internal queue element for mailbox command. 298 * 299 * This is the completion handler for dump mailbox command for getting 300 * wake up parameters. When this command complete, the response contain 301 * Option rom version of the HBA. This function translate the version number 302 * into a human readable string and store it in OptionROMVersion. 303 **/ 304 static void 305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 306 { 307 struct prog_id *prg; 308 uint32_t prog_id_word; 309 char dist = ' '; 310 /* character array used for decoding dist type. */ 311 char dist_char[] = "nabx"; 312 313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 314 mempool_free(pmboxq, phba->mbox_mem_pool); 315 return; 316 } 317 318 prg = (struct prog_id *) &prog_id_word; 319 320 /* word 7 contain option rom version */ 321 prog_id_word = pmboxq->u.mb.un.varWords[7]; 322 323 /* Decode the Option rom version word to a readable string */ 324 if (prg->dist < 4) 325 dist = dist_char[prg->dist]; 326 327 if ((prg->dist == 3) && (prg->num == 0)) 328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 329 prg->ver, prg->rev, prg->lev); 330 else 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 332 prg->ver, prg->rev, prg->lev, 333 dist, prg->num); 334 mempool_free(pmboxq, phba->mbox_mem_pool); 335 return; 336 } 337 338 /** 339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 340 * cfg_soft_wwnn, cfg_soft_wwpn 341 * @vport: pointer to lpfc vport data structure. 342 * 343 * 344 * Return codes 345 * None. 346 **/ 347 void 348 lpfc_update_vport_wwn(struct lpfc_vport *vport) 349 { 350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 352 353 /* If the soft name exists then update it using the service params */ 354 if (vport->phba->cfg_soft_wwnn) 355 u64_to_wwn(vport->phba->cfg_soft_wwnn, 356 vport->fc_sparam.nodeName.u.wwn); 357 if (vport->phba->cfg_soft_wwpn) 358 u64_to_wwn(vport->phba->cfg_soft_wwpn, 359 vport->fc_sparam.portName.u.wwn); 360 361 /* 362 * If the name is empty or there exists a soft name 363 * then copy the service params name, otherwise use the fc name 364 */ 365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 367 sizeof(struct lpfc_name)); 368 else 369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 370 sizeof(struct lpfc_name)); 371 372 /* 373 * If the port name has changed, then set the Param changes flag 374 * to unreg the login 375 */ 376 if (vport->fc_portname.u.wwn[0] != 0 && 377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 378 sizeof(struct lpfc_name))) 379 vport->vport_flag |= FAWWPN_PARAM_CHG; 380 381 if (vport->fc_portname.u.wwn[0] == 0 || 382 vport->phba->cfg_soft_wwpn || 383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 384 vport->vport_flag & FAWWPN_SET) { 385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 386 sizeof(struct lpfc_name)); 387 vport->vport_flag &= ~FAWWPN_SET; 388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 389 vport->vport_flag |= FAWWPN_SET; 390 } 391 else 392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 393 sizeof(struct lpfc_name)); 394 } 395 396 /** 397 * lpfc_config_port_post - Perform lpfc initialization after config port 398 * @phba: pointer to lpfc hba data structure. 399 * 400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 401 * command call. It performs all internal resource and state setups on the 402 * port: post IOCB buffers, enable appropriate host interrupt attentions, 403 * ELS ring timers, etc. 404 * 405 * Return codes 406 * 0 - success. 407 * Any other value - error. 408 **/ 409 int 410 lpfc_config_port_post(struct lpfc_hba *phba) 411 { 412 struct lpfc_vport *vport = phba->pport; 413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 414 LPFC_MBOXQ_t *pmb; 415 MAILBOX_t *mb; 416 struct lpfc_dmabuf *mp; 417 struct lpfc_sli *psli = &phba->sli; 418 uint32_t status, timeout; 419 int i, j; 420 int rc; 421 422 spin_lock_irq(&phba->hbalock); 423 /* 424 * If the Config port completed correctly the HBA is not 425 * over heated any more. 426 */ 427 if (phba->over_temp_state == HBA_OVER_TEMP) 428 phba->over_temp_state = HBA_NORMAL_TEMP; 429 spin_unlock_irq(&phba->hbalock); 430 431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 432 if (!pmb) { 433 phba->link_state = LPFC_HBA_ERROR; 434 return -ENOMEM; 435 } 436 mb = &pmb->u.mb; 437 438 /* Get login parameters for NID. */ 439 rc = lpfc_read_sparam(phba, pmb, 0); 440 if (rc) { 441 mempool_free(pmb, phba->mbox_mem_pool); 442 return -ENOMEM; 443 } 444 445 pmb->vport = vport; 446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 448 "0448 Adapter failed init, mbxCmd x%x " 449 "READ_SPARM mbxStatus x%x\n", 450 mb->mbxCommand, mb->mbxStatus); 451 phba->link_state = LPFC_HBA_ERROR; 452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 453 mempool_free(pmb, phba->mbox_mem_pool); 454 lpfc_mbuf_free(phba, mp->virt, mp->phys); 455 kfree(mp); 456 return -EIO; 457 } 458 459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 460 461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 462 lpfc_mbuf_free(phba, mp->virt, mp->phys); 463 kfree(mp); 464 pmb->ctx_buf = NULL; 465 lpfc_update_vport_wwn(vport); 466 467 /* Update the fc_host data structures with new wwn. */ 468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 470 fc_host_max_npiv_vports(shost) = phba->max_vpi; 471 472 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 473 /* This should be consolidated into parse_vpd ? - mr */ 474 if (phba->SerialNumber[0] == 0) { 475 uint8_t *outptr; 476 477 outptr = &vport->fc_nodename.u.s.IEEE[0]; 478 for (i = 0; i < 12; i++) { 479 status = *outptr++; 480 j = ((status & 0xf0) >> 4); 481 if (j <= 9) 482 phba->SerialNumber[i] = 483 (char)((uint8_t) 0x30 + (uint8_t) j); 484 else 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 487 i++; 488 j = (status & 0xf); 489 if (j <= 9) 490 phba->SerialNumber[i] = 491 (char)((uint8_t) 0x30 + (uint8_t) j); 492 else 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 495 } 496 } 497 498 lpfc_read_config(phba, pmb); 499 pmb->vport = vport; 500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 502 "0453 Adapter failed to init, mbxCmd x%x " 503 "READ_CONFIG, mbxStatus x%x\n", 504 mb->mbxCommand, mb->mbxStatus); 505 phba->link_state = LPFC_HBA_ERROR; 506 mempool_free( pmb, phba->mbox_mem_pool); 507 return -EIO; 508 } 509 510 /* Check if the port is disabled */ 511 lpfc_sli_read_link_ste(phba); 512 513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 514 i = (mb->un.varRdConfig.max_xri + 1); 515 if (phba->cfg_hba_queue_depth > i) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 517 "3359 HBA queue depth changed from %d to %d\n", 518 phba->cfg_hba_queue_depth, i); 519 phba->cfg_hba_queue_depth = i; 520 } 521 522 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 523 i = (mb->un.varRdConfig.max_xri >> 3); 524 if (phba->pport->cfg_lun_queue_depth > i) { 525 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 526 "3360 LUN queue depth changed from %d to %d\n", 527 phba->pport->cfg_lun_queue_depth, i); 528 phba->pport->cfg_lun_queue_depth = i; 529 } 530 531 phba->lmt = mb->un.varRdConfig.lmt; 532 533 /* Get the default values for Model Name and Description */ 534 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 535 536 phba->link_state = LPFC_LINK_DOWN; 537 538 /* Only process IOCBs on ELS ring till hba_state is READY */ 539 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 540 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 541 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 542 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 543 544 /* Post receive buffers for desired rings */ 545 if (phba->sli_rev != 3) 546 lpfc_post_rcv_buf(phba); 547 548 /* 549 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 550 */ 551 if (phba->intr_type == MSIX) { 552 rc = lpfc_config_msi(phba, pmb); 553 if (rc) { 554 mempool_free(pmb, phba->mbox_mem_pool); 555 return -EIO; 556 } 557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 558 if (rc != MBX_SUCCESS) { 559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 560 "0352 Config MSI mailbox command " 561 "failed, mbxCmd x%x, mbxStatus x%x\n", 562 pmb->u.mb.mbxCommand, 563 pmb->u.mb.mbxStatus); 564 mempool_free(pmb, phba->mbox_mem_pool); 565 return -EIO; 566 } 567 } 568 569 spin_lock_irq(&phba->hbalock); 570 /* Initialize ERATT handling flag */ 571 phba->hba_flag &= ~HBA_ERATT_HANDLED; 572 573 /* Enable appropriate host interrupts */ 574 if (lpfc_readl(phba->HCregaddr, &status)) { 575 spin_unlock_irq(&phba->hbalock); 576 return -EIO; 577 } 578 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 579 if (psli->num_rings > 0) 580 status |= HC_R0INT_ENA; 581 if (psli->num_rings > 1) 582 status |= HC_R1INT_ENA; 583 if (psli->num_rings > 2) 584 status |= HC_R2INT_ENA; 585 if (psli->num_rings > 3) 586 status |= HC_R3INT_ENA; 587 588 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 589 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 590 status &= ~(HC_R0INT_ENA); 591 592 writel(status, phba->HCregaddr); 593 readl(phba->HCregaddr); /* flush */ 594 spin_unlock_irq(&phba->hbalock); 595 596 /* Set up ring-0 (ELS) timer */ 597 timeout = phba->fc_ratov * 2; 598 mod_timer(&vport->els_tmofunc, 599 jiffies + msecs_to_jiffies(1000 * timeout)); 600 /* Set up heart beat (HB) timer */ 601 mod_timer(&phba->hb_tmofunc, 602 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 603 phba->hb_outstanding = 0; 604 phba->last_completion_time = jiffies; 605 /* Set up error attention (ERATT) polling timer */ 606 mod_timer(&phba->eratt_poll, 607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 608 609 if (phba->hba_flag & LINK_DISABLED) { 610 lpfc_printf_log(phba, 611 KERN_ERR, LOG_INIT, 612 "2598 Adapter Link is disabled.\n"); 613 lpfc_down_link(phba, pmb); 614 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 616 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 617 lpfc_printf_log(phba, 618 KERN_ERR, LOG_INIT, 619 "2599 Adapter failed to issue DOWN_LINK" 620 " mbox command rc 0x%x\n", rc); 621 622 mempool_free(pmb, phba->mbox_mem_pool); 623 return -EIO; 624 } 625 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 626 mempool_free(pmb, phba->mbox_mem_pool); 627 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 628 if (rc) 629 return rc; 630 } 631 /* MBOX buffer will be freed in mbox compl */ 632 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 633 if (!pmb) { 634 phba->link_state = LPFC_HBA_ERROR; 635 return -ENOMEM; 636 } 637 638 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 639 pmb->mbox_cmpl = lpfc_config_async_cmpl; 640 pmb->vport = phba->pport; 641 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 642 643 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 644 lpfc_printf_log(phba, 645 KERN_ERR, 646 LOG_INIT, 647 "0456 Adapter failed to issue " 648 "ASYNCEVT_ENABLE mbox status x%x\n", 649 rc); 650 mempool_free(pmb, phba->mbox_mem_pool); 651 } 652 653 /* Get Option rom version */ 654 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 655 if (!pmb) { 656 phba->link_state = LPFC_HBA_ERROR; 657 return -ENOMEM; 658 } 659 660 lpfc_dump_wakeup_param(phba, pmb); 661 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 662 pmb->vport = phba->pport; 663 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 664 665 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 667 "to get Option ROM version status x%x\n", rc); 668 mempool_free(pmb, phba->mbox_mem_pool); 669 } 670 671 return 0; 672 } 673 674 /** 675 * lpfc_hba_init_link - Initialize the FC link 676 * @phba: pointer to lpfc hba data structure. 677 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 678 * 679 * This routine will issue the INIT_LINK mailbox command call. 680 * It is available to other drivers through the lpfc_hba data 681 * structure for use as a delayed link up mechanism with the 682 * module parameter lpfc_suppress_link_up. 683 * 684 * Return code 685 * 0 - success 686 * Any other value - error 687 **/ 688 static int 689 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 690 { 691 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 692 } 693 694 /** 695 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 696 * @phba: pointer to lpfc hba data structure. 697 * @fc_topology: desired fc topology. 698 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 699 * 700 * This routine will issue the INIT_LINK mailbox command call. 701 * It is available to other drivers through the lpfc_hba data 702 * structure for use as a delayed link up mechanism with the 703 * module parameter lpfc_suppress_link_up. 704 * 705 * Return code 706 * 0 - success 707 * Any other value - error 708 **/ 709 int 710 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 711 uint32_t flag) 712 { 713 struct lpfc_vport *vport = phba->pport; 714 LPFC_MBOXQ_t *pmb; 715 MAILBOX_t *mb; 716 int rc; 717 718 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 719 if (!pmb) { 720 phba->link_state = LPFC_HBA_ERROR; 721 return -ENOMEM; 722 } 723 mb = &pmb->u.mb; 724 pmb->vport = vport; 725 726 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 728 !(phba->lmt & LMT_1Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 730 !(phba->lmt & LMT_2Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 732 !(phba->lmt & LMT_4Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 734 !(phba->lmt & LMT_8Gb)) || 735 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 736 !(phba->lmt & LMT_10Gb)) || 737 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 738 !(phba->lmt & LMT_16Gb)) || 739 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 740 !(phba->lmt & LMT_32Gb)) || 741 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 742 !(phba->lmt & LMT_64Gb))) { 743 /* Reset link speed to auto */ 744 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 745 "1302 Invalid speed for this board:%d " 746 "Reset link speed to auto.\n", 747 phba->cfg_link_speed); 748 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 749 } 750 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 752 if (phba->sli_rev < LPFC_SLI_REV4) 753 lpfc_set_loopback_flag(phba); 754 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 755 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 757 "0498 Adapter failed to init, mbxCmd x%x " 758 "INIT_LINK, mbxStatus x%x\n", 759 mb->mbxCommand, mb->mbxStatus); 760 if (phba->sli_rev <= LPFC_SLI_REV3) { 761 /* Clear all interrupt enable conditions */ 762 writel(0, phba->HCregaddr); 763 readl(phba->HCregaddr); /* flush */ 764 /* Clear all pending interrupts */ 765 writel(0xffffffff, phba->HAregaddr); 766 readl(phba->HAregaddr); /* flush */ 767 } 768 phba->link_state = LPFC_HBA_ERROR; 769 if (rc != MBX_BUSY || flag == MBX_POLL) 770 mempool_free(pmb, phba->mbox_mem_pool); 771 return -EIO; 772 } 773 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 774 if (flag == MBX_POLL) 775 mempool_free(pmb, phba->mbox_mem_pool); 776 777 return 0; 778 } 779 780 /** 781 * lpfc_hba_down_link - this routine downs the FC link 782 * @phba: pointer to lpfc hba data structure. 783 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 784 * 785 * This routine will issue the DOWN_LINK mailbox command call. 786 * It is available to other drivers through the lpfc_hba data 787 * structure for use to stop the link. 788 * 789 * Return code 790 * 0 - success 791 * Any other value - error 792 **/ 793 static int 794 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 795 { 796 LPFC_MBOXQ_t *pmb; 797 int rc; 798 799 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 800 if (!pmb) { 801 phba->link_state = LPFC_HBA_ERROR; 802 return -ENOMEM; 803 } 804 805 lpfc_printf_log(phba, 806 KERN_ERR, LOG_INIT, 807 "0491 Adapter Link is disabled.\n"); 808 lpfc_down_link(phba, pmb); 809 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 810 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 811 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 812 lpfc_printf_log(phba, 813 KERN_ERR, LOG_INIT, 814 "2522 Adapter failed to issue DOWN_LINK" 815 " mbox command rc 0x%x\n", rc); 816 817 mempool_free(pmb, phba->mbox_mem_pool); 818 return -EIO; 819 } 820 if (flag == MBX_POLL) 821 mempool_free(pmb, phba->mbox_mem_pool); 822 823 return 0; 824 } 825 826 /** 827 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 828 * @phba: pointer to lpfc HBA data structure. 829 * 830 * This routine will do LPFC uninitialization before the HBA is reset when 831 * bringing down the SLI Layer. 832 * 833 * Return codes 834 * 0 - success. 835 * Any other value - error. 836 **/ 837 int 838 lpfc_hba_down_prep(struct lpfc_hba *phba) 839 { 840 struct lpfc_vport **vports; 841 int i; 842 843 if (phba->sli_rev <= LPFC_SLI_REV3) { 844 /* Disable interrupts */ 845 writel(0, phba->HCregaddr); 846 readl(phba->HCregaddr); /* flush */ 847 } 848 849 if (phba->pport->load_flag & FC_UNLOADING) 850 lpfc_cleanup_discovery_resources(phba->pport); 851 else { 852 vports = lpfc_create_vport_work_array(phba); 853 if (vports != NULL) 854 for (i = 0; i <= phba->max_vports && 855 vports[i] != NULL; i++) 856 lpfc_cleanup_discovery_resources(vports[i]); 857 lpfc_destroy_vport_work_array(phba, vports); 858 } 859 return 0; 860 } 861 862 /** 863 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 864 * rspiocb which got deferred 865 * 866 * @phba: pointer to lpfc HBA data structure. 867 * 868 * This routine will cleanup completed slow path events after HBA is reset 869 * when bringing down the SLI Layer. 870 * 871 * 872 * Return codes 873 * void. 874 **/ 875 static void 876 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 877 { 878 struct lpfc_iocbq *rspiocbq; 879 struct hbq_dmabuf *dmabuf; 880 struct lpfc_cq_event *cq_event; 881 882 spin_lock_irq(&phba->hbalock); 883 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 884 spin_unlock_irq(&phba->hbalock); 885 886 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 887 /* Get the response iocb from the head of work queue */ 888 spin_lock_irq(&phba->hbalock); 889 list_remove_head(&phba->sli4_hba.sp_queue_event, 890 cq_event, struct lpfc_cq_event, list); 891 spin_unlock_irq(&phba->hbalock); 892 893 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 894 case CQE_CODE_COMPL_WQE: 895 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 896 cq_event); 897 lpfc_sli_release_iocbq(phba, rspiocbq); 898 break; 899 case CQE_CODE_RECEIVE: 900 case CQE_CODE_RECEIVE_V1: 901 dmabuf = container_of(cq_event, struct hbq_dmabuf, 902 cq_event); 903 lpfc_in_buf_free(phba, &dmabuf->dbuf); 904 } 905 } 906 } 907 908 /** 909 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 910 * @phba: pointer to lpfc HBA data structure. 911 * 912 * This routine will cleanup posted ELS buffers after the HBA is reset 913 * when bringing down the SLI Layer. 914 * 915 * 916 * Return codes 917 * void. 918 **/ 919 static void 920 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 921 { 922 struct lpfc_sli *psli = &phba->sli; 923 struct lpfc_sli_ring *pring; 924 struct lpfc_dmabuf *mp, *next_mp; 925 LIST_HEAD(buflist); 926 int count; 927 928 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 929 lpfc_sli_hbqbuf_free_all(phba); 930 else { 931 /* Cleanup preposted buffers on the ELS ring */ 932 pring = &psli->sli3_ring[LPFC_ELS_RING]; 933 spin_lock_irq(&phba->hbalock); 934 list_splice_init(&pring->postbufq, &buflist); 935 spin_unlock_irq(&phba->hbalock); 936 937 count = 0; 938 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 939 list_del(&mp->list); 940 count++; 941 lpfc_mbuf_free(phba, mp->virt, mp->phys); 942 kfree(mp); 943 } 944 945 spin_lock_irq(&phba->hbalock); 946 pring->postbufq_cnt -= count; 947 spin_unlock_irq(&phba->hbalock); 948 } 949 } 950 951 /** 952 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 953 * @phba: pointer to lpfc HBA data structure. 954 * 955 * This routine will cleanup the txcmplq after the HBA is reset when bringing 956 * down the SLI Layer. 957 * 958 * Return codes 959 * void 960 **/ 961 static void 962 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 963 { 964 struct lpfc_sli *psli = &phba->sli; 965 struct lpfc_queue *qp = NULL; 966 struct lpfc_sli_ring *pring; 967 LIST_HEAD(completions); 968 int i; 969 struct lpfc_iocbq *piocb, *next_iocb; 970 971 if (phba->sli_rev != LPFC_SLI_REV4) { 972 for (i = 0; i < psli->num_rings; i++) { 973 pring = &psli->sli3_ring[i]; 974 spin_lock_irq(&phba->hbalock); 975 /* At this point in time the HBA is either reset or DOA 976 * Nothing should be on txcmplq as it will 977 * NEVER complete. 978 */ 979 list_splice_init(&pring->txcmplq, &completions); 980 pring->txcmplq_cnt = 0; 981 spin_unlock_irq(&phba->hbalock); 982 983 lpfc_sli_abort_iocb_ring(phba, pring); 984 } 985 /* Cancel all the IOCBs from the completions list */ 986 lpfc_sli_cancel_iocbs(phba, &completions, 987 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 988 return; 989 } 990 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 991 pring = qp->pring; 992 if (!pring) 993 continue; 994 spin_lock_irq(&pring->ring_lock); 995 list_for_each_entry_safe(piocb, next_iocb, 996 &pring->txcmplq, list) 997 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 998 list_splice_init(&pring->txcmplq, &completions); 999 pring->txcmplq_cnt = 0; 1000 spin_unlock_irq(&pring->ring_lock); 1001 lpfc_sli_abort_iocb_ring(phba, pring); 1002 } 1003 /* Cancel all the IOCBs from the completions list */ 1004 lpfc_sli_cancel_iocbs(phba, &completions, 1005 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1006 } 1007 1008 /** 1009 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1010 int i; 1011 * @phba: pointer to lpfc HBA data structure. 1012 * 1013 * This routine will do uninitialization after the HBA is reset when bring 1014 * down the SLI Layer. 1015 * 1016 * Return codes 1017 * 0 - success. 1018 * Any other value - error. 1019 **/ 1020 static int 1021 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1022 { 1023 lpfc_hba_free_post_buf(phba); 1024 lpfc_hba_clean_txcmplq(phba); 1025 return 0; 1026 } 1027 1028 /** 1029 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1030 * @phba: pointer to lpfc HBA data structure. 1031 * 1032 * This routine will do uninitialization after the HBA is reset when bring 1033 * down the SLI Layer. 1034 * 1035 * Return codes 1036 * 0 - success. 1037 * Any other value - error. 1038 **/ 1039 static int 1040 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1041 { 1042 struct lpfc_io_buf *psb, *psb_next; 1043 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1044 struct lpfc_sli4_hdw_queue *qp; 1045 LIST_HEAD(aborts); 1046 LIST_HEAD(nvme_aborts); 1047 LIST_HEAD(nvmet_aborts); 1048 struct lpfc_sglq *sglq_entry = NULL; 1049 int cnt, idx; 1050 1051 1052 lpfc_sli_hbqbuf_free_all(phba); 1053 lpfc_hba_clean_txcmplq(phba); 1054 1055 /* At this point in time the HBA is either reset or DOA. Either 1056 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1057 * on the lpfc_els_sgl_list so that it can either be freed if the 1058 * driver is unloading or reposted if the driver is restarting 1059 * the port. 1060 */ 1061 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1062 /* scsl_buf_list */ 1063 /* sgl_list_lock required because worker thread uses this 1064 * list. 1065 */ 1066 spin_lock(&phba->sli4_hba.sgl_list_lock); 1067 list_for_each_entry(sglq_entry, 1068 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1069 sglq_entry->state = SGL_FREED; 1070 1071 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1072 &phba->sli4_hba.lpfc_els_sgl_list); 1073 1074 1075 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1076 1077 /* abts_xxxx_buf_list_lock required because worker thread uses this 1078 * list. 1079 */ 1080 cnt = 0; 1081 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1082 qp = &phba->sli4_hba.hdwq[idx]; 1083 1084 spin_lock(&qp->abts_scsi_buf_list_lock); 1085 list_splice_init(&qp->lpfc_abts_scsi_buf_list, 1086 &aborts); 1087 1088 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1089 psb->pCmd = NULL; 1090 psb->status = IOSTAT_SUCCESS; 1091 cnt++; 1092 } 1093 spin_lock(&qp->io_buf_list_put_lock); 1094 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1095 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1096 qp->abts_scsi_io_bufs = 0; 1097 spin_unlock(&qp->io_buf_list_put_lock); 1098 spin_unlock(&qp->abts_scsi_buf_list_lock); 1099 1100 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1101 spin_lock(&qp->abts_nvme_buf_list_lock); 1102 list_splice_init(&qp->lpfc_abts_nvme_buf_list, 1103 &nvme_aborts); 1104 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, 1105 list) { 1106 psb->pCmd = NULL; 1107 psb->status = IOSTAT_SUCCESS; 1108 cnt++; 1109 } 1110 spin_lock(&qp->io_buf_list_put_lock); 1111 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1112 qp->abts_nvme_io_bufs = 0; 1113 list_splice_init(&nvme_aborts, 1114 &qp->lpfc_io_buf_list_put); 1115 spin_unlock(&qp->io_buf_list_put_lock); 1116 spin_unlock(&qp->abts_nvme_buf_list_lock); 1117 1118 } 1119 } 1120 1121 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1122 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1123 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1124 &nvmet_aborts); 1125 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1126 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1127 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1128 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1129 } 1130 } 1131 1132 spin_unlock_irq(&phba->hbalock); 1133 lpfc_sli4_free_sp_events(phba); 1134 return cnt; 1135 } 1136 1137 /** 1138 * lpfc_hba_down_post - Wrapper func for hba down post routine 1139 * @phba: pointer to lpfc HBA data structure. 1140 * 1141 * This routine wraps the actual SLI3 or SLI4 routine for performing 1142 * uninitialization after the HBA is reset when bring down the SLI Layer. 1143 * 1144 * Return codes 1145 * 0 - success. 1146 * Any other value - error. 1147 **/ 1148 int 1149 lpfc_hba_down_post(struct lpfc_hba *phba) 1150 { 1151 return (*phba->lpfc_hba_down_post)(phba); 1152 } 1153 1154 /** 1155 * lpfc_hb_timeout - The HBA-timer timeout handler 1156 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1157 * 1158 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1159 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1160 * work-port-events bitmap and the worker thread is notified. This timeout 1161 * event will be used by the worker thread to invoke the actual timeout 1162 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1163 * be performed in the timeout handler and the HBA timeout event bit shall 1164 * be cleared by the worker thread after it has taken the event bitmap out. 1165 **/ 1166 static void 1167 lpfc_hb_timeout(struct timer_list *t) 1168 { 1169 struct lpfc_hba *phba; 1170 uint32_t tmo_posted; 1171 unsigned long iflag; 1172 1173 phba = from_timer(phba, t, hb_tmofunc); 1174 1175 /* Check for heart beat timeout conditions */ 1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1177 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1178 if (!tmo_posted) 1179 phba->pport->work_port_events |= WORKER_HB_TMO; 1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1181 1182 /* Tell the worker thread there is work to do */ 1183 if (!tmo_posted) 1184 lpfc_worker_wake_up(phba); 1185 return; 1186 } 1187 1188 /** 1189 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1190 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1191 * 1192 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1193 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1194 * work-port-events bitmap and the worker thread is notified. This timeout 1195 * event will be used by the worker thread to invoke the actual timeout 1196 * handler routine, lpfc_rrq_handler. Any periodical operations will 1197 * be performed in the timeout handler and the RRQ timeout event bit shall 1198 * be cleared by the worker thread after it has taken the event bitmap out. 1199 **/ 1200 static void 1201 lpfc_rrq_timeout(struct timer_list *t) 1202 { 1203 struct lpfc_hba *phba; 1204 unsigned long iflag; 1205 1206 phba = from_timer(phba, t, rrq_tmr); 1207 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1208 if (!(phba->pport->load_flag & FC_UNLOADING)) 1209 phba->hba_flag |= HBA_RRQ_ACTIVE; 1210 else 1211 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1212 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1213 1214 if (!(phba->pport->load_flag & FC_UNLOADING)) 1215 lpfc_worker_wake_up(phba); 1216 } 1217 1218 /** 1219 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1220 * @phba: pointer to lpfc hba data structure. 1221 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1222 * 1223 * This is the callback function to the lpfc heart-beat mailbox command. 1224 * If configured, the lpfc driver issues the heart-beat mailbox command to 1225 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1226 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1227 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1228 * heart-beat outstanding state. Once the mailbox command comes back and 1229 * no error conditions detected, the heart-beat mailbox command timer is 1230 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1231 * state is cleared for the next heart-beat. If the timer expired with the 1232 * heart-beat outstanding state set, the driver will put the HBA offline. 1233 **/ 1234 static void 1235 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1236 { 1237 unsigned long drvr_flag; 1238 1239 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1240 phba->hb_outstanding = 0; 1241 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1242 1243 /* Check and reset heart-beat timer is necessary */ 1244 mempool_free(pmboxq, phba->mbox_mem_pool); 1245 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1246 !(phba->link_state == LPFC_HBA_ERROR) && 1247 !(phba->pport->load_flag & FC_UNLOADING)) 1248 mod_timer(&phba->hb_tmofunc, 1249 jiffies + 1250 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1251 return; 1252 } 1253 1254 static void 1255 lpfc_hb_eq_delay_work(struct work_struct *work) 1256 { 1257 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1258 struct lpfc_hba, eq_delay_work); 1259 struct lpfc_eq_intr_info *eqi, *eqi_new; 1260 struct lpfc_queue *eq, *eq_next; 1261 unsigned char *eqcnt = NULL; 1262 uint32_t usdelay; 1263 int i; 1264 1265 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1266 return; 1267 1268 if (phba->link_state == LPFC_HBA_ERROR || 1269 phba->pport->fc_flag & FC_OFFLINE_MODE) 1270 goto requeue; 1271 1272 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), 1273 GFP_KERNEL); 1274 if (!eqcnt) 1275 goto requeue; 1276 1277 for (i = 0; i < phba->cfg_irq_chann; i++) { 1278 eq = phba->sli4_hba.hdwq[i].hba_eq; 1279 if (eq && eqcnt[eq->last_cpu] < 2) 1280 eqcnt[eq->last_cpu]++; 1281 continue; 1282 } 1283 1284 for_each_present_cpu(i) { 1285 if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2) 1286 continue; 1287 1288 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1289 1290 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * 1291 LPFC_EQ_DELAY_STEP; 1292 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1293 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1294 1295 eqi->icnt = 0; 1296 1297 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1298 if (eq->last_cpu != i) { 1299 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1300 eq->last_cpu); 1301 list_move_tail(&eq->cpu_list, &eqi_new->list); 1302 continue; 1303 } 1304 if (usdelay != eq->q_mode) 1305 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1306 usdelay); 1307 } 1308 } 1309 1310 kfree(eqcnt); 1311 1312 requeue: 1313 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1314 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1315 } 1316 1317 /** 1318 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1319 * @phba: pointer to lpfc hba data structure. 1320 * 1321 * For each heartbeat, this routine does some heuristic methods to adjust 1322 * XRI distribution. The goal is to fully utilize free XRIs. 1323 **/ 1324 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1325 { 1326 u32 i; 1327 u32 hwq_count; 1328 1329 hwq_count = phba->cfg_hdw_queue; 1330 for (i = 0; i < hwq_count; i++) { 1331 /* Adjust XRIs in private pool */ 1332 lpfc_adjust_pvt_pool_count(phba, i); 1333 1334 /* Adjust high watermark */ 1335 lpfc_adjust_high_watermark(phba, i); 1336 1337 #ifdef LPFC_MXP_STAT 1338 /* Snapshot pbl, pvt and busy count */ 1339 lpfc_snapshot_mxp(phba, i); 1340 #endif 1341 } 1342 } 1343 1344 /** 1345 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1346 * @phba: pointer to lpfc hba data structure. 1347 * 1348 * This is the actual HBA-timer timeout handler to be invoked by the worker 1349 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1350 * handler performs any periodic operations needed for the device. If such 1351 * periodic event has already been attended to either in the interrupt handler 1352 * or by processing slow-ring or fast-ring events within the HBA-timer 1353 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1354 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1355 * is configured and there is no heart-beat mailbox command outstanding, a 1356 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1357 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1358 * to offline. 1359 **/ 1360 void 1361 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1362 { 1363 struct lpfc_vport **vports; 1364 LPFC_MBOXQ_t *pmboxq; 1365 struct lpfc_dmabuf *buf_ptr; 1366 int retval, i; 1367 struct lpfc_sli *psli = &phba->sli; 1368 LIST_HEAD(completions); 1369 1370 if (phba->cfg_xri_rebalancing) { 1371 /* Multi-XRI pools handler */ 1372 lpfc_hb_mxp_handler(phba); 1373 } 1374 1375 vports = lpfc_create_vport_work_array(phba); 1376 if (vports != NULL) 1377 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1378 lpfc_rcv_seq_check_edtov(vports[i]); 1379 lpfc_fdmi_num_disc_check(vports[i]); 1380 } 1381 lpfc_destroy_vport_work_array(phba, vports); 1382 1383 if ((phba->link_state == LPFC_HBA_ERROR) || 1384 (phba->pport->load_flag & FC_UNLOADING) || 1385 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1386 return; 1387 1388 spin_lock_irq(&phba->pport->work_port_lock); 1389 1390 if (time_after(phba->last_completion_time + 1391 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1392 jiffies)) { 1393 spin_unlock_irq(&phba->pport->work_port_lock); 1394 if (!phba->hb_outstanding) 1395 mod_timer(&phba->hb_tmofunc, 1396 jiffies + 1397 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1398 else 1399 mod_timer(&phba->hb_tmofunc, 1400 jiffies + 1401 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1402 return; 1403 } 1404 spin_unlock_irq(&phba->pport->work_port_lock); 1405 1406 if (phba->elsbuf_cnt && 1407 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1408 spin_lock_irq(&phba->hbalock); 1409 list_splice_init(&phba->elsbuf, &completions); 1410 phba->elsbuf_cnt = 0; 1411 phba->elsbuf_prev_cnt = 0; 1412 spin_unlock_irq(&phba->hbalock); 1413 1414 while (!list_empty(&completions)) { 1415 list_remove_head(&completions, buf_ptr, 1416 struct lpfc_dmabuf, list); 1417 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1418 kfree(buf_ptr); 1419 } 1420 } 1421 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1422 1423 /* If there is no heart beat outstanding, issue a heartbeat command */ 1424 if (phba->cfg_enable_hba_heartbeat) { 1425 if (!phba->hb_outstanding) { 1426 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1427 (list_empty(&psli->mboxq))) { 1428 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1429 GFP_KERNEL); 1430 if (!pmboxq) { 1431 mod_timer(&phba->hb_tmofunc, 1432 jiffies + 1433 msecs_to_jiffies(1000 * 1434 LPFC_HB_MBOX_INTERVAL)); 1435 return; 1436 } 1437 1438 lpfc_heart_beat(phba, pmboxq); 1439 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1440 pmboxq->vport = phba->pport; 1441 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1442 MBX_NOWAIT); 1443 1444 if (retval != MBX_BUSY && 1445 retval != MBX_SUCCESS) { 1446 mempool_free(pmboxq, 1447 phba->mbox_mem_pool); 1448 mod_timer(&phba->hb_tmofunc, 1449 jiffies + 1450 msecs_to_jiffies(1000 * 1451 LPFC_HB_MBOX_INTERVAL)); 1452 return; 1453 } 1454 phba->skipped_hb = 0; 1455 phba->hb_outstanding = 1; 1456 } else if (time_before_eq(phba->last_completion_time, 1457 phba->skipped_hb)) { 1458 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1459 "2857 Last completion time not " 1460 " updated in %d ms\n", 1461 jiffies_to_msecs(jiffies 1462 - phba->last_completion_time)); 1463 } else 1464 phba->skipped_hb = jiffies; 1465 1466 mod_timer(&phba->hb_tmofunc, 1467 jiffies + 1468 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1469 return; 1470 } else { 1471 /* 1472 * If heart beat timeout called with hb_outstanding set 1473 * we need to give the hb mailbox cmd a chance to 1474 * complete or TMO. 1475 */ 1476 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1477 "0459 Adapter heartbeat still out" 1478 "standing:last compl time was %d ms.\n", 1479 jiffies_to_msecs(jiffies 1480 - phba->last_completion_time)); 1481 mod_timer(&phba->hb_tmofunc, 1482 jiffies + 1483 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1484 } 1485 } else { 1486 mod_timer(&phba->hb_tmofunc, 1487 jiffies + 1488 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1489 } 1490 } 1491 1492 /** 1493 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1494 * @phba: pointer to lpfc hba data structure. 1495 * 1496 * This routine is called to bring the HBA offline when HBA hardware error 1497 * other than Port Error 6 has been detected. 1498 **/ 1499 static void 1500 lpfc_offline_eratt(struct lpfc_hba *phba) 1501 { 1502 struct lpfc_sli *psli = &phba->sli; 1503 1504 spin_lock_irq(&phba->hbalock); 1505 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1506 spin_unlock_irq(&phba->hbalock); 1507 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1508 1509 lpfc_offline(phba); 1510 lpfc_reset_barrier(phba); 1511 spin_lock_irq(&phba->hbalock); 1512 lpfc_sli_brdreset(phba); 1513 spin_unlock_irq(&phba->hbalock); 1514 lpfc_hba_down_post(phba); 1515 lpfc_sli_brdready(phba, HS_MBRDY); 1516 lpfc_unblock_mgmt_io(phba); 1517 phba->link_state = LPFC_HBA_ERROR; 1518 return; 1519 } 1520 1521 /** 1522 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1523 * @phba: pointer to lpfc hba data structure. 1524 * 1525 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1526 * other than Port Error 6 has been detected. 1527 **/ 1528 void 1529 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1530 { 1531 spin_lock_irq(&phba->hbalock); 1532 phba->link_state = LPFC_HBA_ERROR; 1533 spin_unlock_irq(&phba->hbalock); 1534 1535 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1536 lpfc_offline(phba); 1537 lpfc_hba_down_post(phba); 1538 lpfc_unblock_mgmt_io(phba); 1539 } 1540 1541 /** 1542 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1543 * @phba: pointer to lpfc hba data structure. 1544 * 1545 * This routine is invoked to handle the deferred HBA hardware error 1546 * conditions. This type of error is indicated by HBA by setting ER1 1547 * and another ER bit in the host status register. The driver will 1548 * wait until the ER1 bit clears before handling the error condition. 1549 **/ 1550 static void 1551 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1552 { 1553 uint32_t old_host_status = phba->work_hs; 1554 struct lpfc_sli *psli = &phba->sli; 1555 1556 /* If the pci channel is offline, ignore possible errors, 1557 * since we cannot communicate with the pci card anyway. 1558 */ 1559 if (pci_channel_offline(phba->pcidev)) { 1560 spin_lock_irq(&phba->hbalock); 1561 phba->hba_flag &= ~DEFER_ERATT; 1562 spin_unlock_irq(&phba->hbalock); 1563 return; 1564 } 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1567 "0479 Deferred Adapter Hardware Error " 1568 "Data: x%x x%x x%x\n", 1569 phba->work_hs, 1570 phba->work_status[0], phba->work_status[1]); 1571 1572 spin_lock_irq(&phba->hbalock); 1573 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1574 spin_unlock_irq(&phba->hbalock); 1575 1576 1577 /* 1578 * Firmware stops when it triggred erratt. That could cause the I/Os 1579 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1580 * SCSI layer retry it after re-establishing link. 1581 */ 1582 lpfc_sli_abort_fcp_rings(phba); 1583 1584 /* 1585 * There was a firmware error. Take the hba offline and then 1586 * attempt to restart it. 1587 */ 1588 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1589 lpfc_offline(phba); 1590 1591 /* Wait for the ER1 bit to clear.*/ 1592 while (phba->work_hs & HS_FFER1) { 1593 msleep(100); 1594 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1595 phba->work_hs = UNPLUG_ERR ; 1596 break; 1597 } 1598 /* If driver is unloading let the worker thread continue */ 1599 if (phba->pport->load_flag & FC_UNLOADING) { 1600 phba->work_hs = 0; 1601 break; 1602 } 1603 } 1604 1605 /* 1606 * This is to ptrotect against a race condition in which 1607 * first write to the host attention register clear the 1608 * host status register. 1609 */ 1610 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1611 phba->work_hs = old_host_status & ~HS_FFER1; 1612 1613 spin_lock_irq(&phba->hbalock); 1614 phba->hba_flag &= ~DEFER_ERATT; 1615 spin_unlock_irq(&phba->hbalock); 1616 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1617 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1618 } 1619 1620 static void 1621 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1622 { 1623 struct lpfc_board_event_header board_event; 1624 struct Scsi_Host *shost; 1625 1626 board_event.event_type = FC_REG_BOARD_EVENT; 1627 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1628 shost = lpfc_shost_from_vport(phba->pport); 1629 fc_host_post_vendor_event(shost, fc_get_event_number(), 1630 sizeof(board_event), 1631 (char *) &board_event, 1632 LPFC_NL_VENDOR_ID); 1633 } 1634 1635 /** 1636 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1637 * @phba: pointer to lpfc hba data structure. 1638 * 1639 * This routine is invoked to handle the following HBA hardware error 1640 * conditions: 1641 * 1 - HBA error attention interrupt 1642 * 2 - DMA ring index out of range 1643 * 3 - Mailbox command came back as unknown 1644 **/ 1645 static void 1646 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1647 { 1648 struct lpfc_vport *vport = phba->pport; 1649 struct lpfc_sli *psli = &phba->sli; 1650 uint32_t event_data; 1651 unsigned long temperature; 1652 struct temp_event temp_event_data; 1653 struct Scsi_Host *shost; 1654 1655 /* If the pci channel is offline, ignore possible errors, 1656 * since we cannot communicate with the pci card anyway. 1657 */ 1658 if (pci_channel_offline(phba->pcidev)) { 1659 spin_lock_irq(&phba->hbalock); 1660 phba->hba_flag &= ~DEFER_ERATT; 1661 spin_unlock_irq(&phba->hbalock); 1662 return; 1663 } 1664 1665 /* If resets are disabled then leave the HBA alone and return */ 1666 if (!phba->cfg_enable_hba_reset) 1667 return; 1668 1669 /* Send an internal error event to mgmt application */ 1670 lpfc_board_errevt_to_mgmt(phba); 1671 1672 if (phba->hba_flag & DEFER_ERATT) 1673 lpfc_handle_deferred_eratt(phba); 1674 1675 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1676 if (phba->work_hs & HS_FFER6) 1677 /* Re-establishing Link */ 1678 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1679 "1301 Re-establishing Link " 1680 "Data: x%x x%x x%x\n", 1681 phba->work_hs, phba->work_status[0], 1682 phba->work_status[1]); 1683 if (phba->work_hs & HS_FFER8) 1684 /* Device Zeroization */ 1685 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1686 "2861 Host Authentication device " 1687 "zeroization Data:x%x x%x x%x\n", 1688 phba->work_hs, phba->work_status[0], 1689 phba->work_status[1]); 1690 1691 spin_lock_irq(&phba->hbalock); 1692 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1693 spin_unlock_irq(&phba->hbalock); 1694 1695 /* 1696 * Firmware stops when it triggled erratt with HS_FFER6. 1697 * That could cause the I/Os dropped by the firmware. 1698 * Error iocb (I/O) on txcmplq and let the SCSI layer 1699 * retry it after re-establishing link. 1700 */ 1701 lpfc_sli_abort_fcp_rings(phba); 1702 1703 /* 1704 * There was a firmware error. Take the hba offline and then 1705 * attempt to restart it. 1706 */ 1707 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1708 lpfc_offline(phba); 1709 lpfc_sli_brdrestart(phba); 1710 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1711 lpfc_unblock_mgmt_io(phba); 1712 return; 1713 } 1714 lpfc_unblock_mgmt_io(phba); 1715 } else if (phba->work_hs & HS_CRIT_TEMP) { 1716 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1717 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1718 temp_event_data.event_code = LPFC_CRIT_TEMP; 1719 temp_event_data.data = (uint32_t)temperature; 1720 1721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1722 "0406 Adapter maximum temperature exceeded " 1723 "(%ld), taking this port offline " 1724 "Data: x%x x%x x%x\n", 1725 temperature, phba->work_hs, 1726 phba->work_status[0], phba->work_status[1]); 1727 1728 shost = lpfc_shost_from_vport(phba->pport); 1729 fc_host_post_vendor_event(shost, fc_get_event_number(), 1730 sizeof(temp_event_data), 1731 (char *) &temp_event_data, 1732 SCSI_NL_VID_TYPE_PCI 1733 | PCI_VENDOR_ID_EMULEX); 1734 1735 spin_lock_irq(&phba->hbalock); 1736 phba->over_temp_state = HBA_OVER_TEMP; 1737 spin_unlock_irq(&phba->hbalock); 1738 lpfc_offline_eratt(phba); 1739 1740 } else { 1741 /* The if clause above forces this code path when the status 1742 * failure is a value other than FFER6. Do not call the offline 1743 * twice. This is the adapter hardware error path. 1744 */ 1745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1746 "0457 Adapter Hardware Error " 1747 "Data: x%x x%x x%x\n", 1748 phba->work_hs, 1749 phba->work_status[0], phba->work_status[1]); 1750 1751 event_data = FC_REG_DUMP_EVENT; 1752 shost = lpfc_shost_from_vport(vport); 1753 fc_host_post_vendor_event(shost, fc_get_event_number(), 1754 sizeof(event_data), (char *) &event_data, 1755 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1756 1757 lpfc_offline_eratt(phba); 1758 } 1759 return; 1760 } 1761 1762 /** 1763 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1764 * @phba: pointer to lpfc hba data structure. 1765 * @mbx_action: flag for mailbox shutdown action. 1766 * 1767 * This routine is invoked to perform an SLI4 port PCI function reset in 1768 * response to port status register polling attention. It waits for port 1769 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1770 * During this process, interrupt vectors are freed and later requested 1771 * for handling possible port resource change. 1772 **/ 1773 static int 1774 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1775 bool en_rn_msg) 1776 { 1777 int rc; 1778 uint32_t intr_mode; 1779 1780 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1781 LPFC_SLI_INTF_IF_TYPE_2) { 1782 /* 1783 * On error status condition, driver need to wait for port 1784 * ready before performing reset. 1785 */ 1786 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1787 if (rc) 1788 return rc; 1789 } 1790 1791 /* need reset: attempt for port recovery */ 1792 if (en_rn_msg) 1793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1794 "2887 Reset Needed: Attempting Port " 1795 "Recovery...\n"); 1796 lpfc_offline_prep(phba, mbx_action); 1797 lpfc_offline(phba); 1798 /* release interrupt for possible resource change */ 1799 lpfc_sli4_disable_intr(phba); 1800 rc = lpfc_sli_brdrestart(phba); 1801 if (rc) { 1802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1803 "6309 Failed to restart board\n"); 1804 return rc; 1805 } 1806 /* request and enable interrupt */ 1807 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1808 if (intr_mode == LPFC_INTR_ERROR) { 1809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1810 "3175 Failed to enable interrupt\n"); 1811 return -EIO; 1812 } 1813 phba->intr_mode = intr_mode; 1814 rc = lpfc_online(phba); 1815 if (rc == 0) 1816 lpfc_unblock_mgmt_io(phba); 1817 1818 return rc; 1819 } 1820 1821 /** 1822 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1823 * @phba: pointer to lpfc hba data structure. 1824 * 1825 * This routine is invoked to handle the SLI4 HBA hardware error attention 1826 * conditions. 1827 **/ 1828 static void 1829 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1830 { 1831 struct lpfc_vport *vport = phba->pport; 1832 uint32_t event_data; 1833 struct Scsi_Host *shost; 1834 uint32_t if_type; 1835 struct lpfc_register portstat_reg = {0}; 1836 uint32_t reg_err1, reg_err2; 1837 uint32_t uerrlo_reg, uemasklo_reg; 1838 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1839 bool en_rn_msg = true; 1840 struct temp_event temp_event_data; 1841 struct lpfc_register portsmphr_reg; 1842 int rc, i; 1843 1844 /* If the pci channel is offline, ignore possible errors, since 1845 * we cannot communicate with the pci card anyway. 1846 */ 1847 if (pci_channel_offline(phba->pcidev)) 1848 return; 1849 1850 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1851 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1852 switch (if_type) { 1853 case LPFC_SLI_INTF_IF_TYPE_0: 1854 pci_rd_rc1 = lpfc_readl( 1855 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1856 &uerrlo_reg); 1857 pci_rd_rc2 = lpfc_readl( 1858 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1859 &uemasklo_reg); 1860 /* consider PCI bus read error as pci_channel_offline */ 1861 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1862 return; 1863 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1864 lpfc_sli4_offline_eratt(phba); 1865 return; 1866 } 1867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1868 "7623 Checking UE recoverable"); 1869 1870 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1871 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1872 &portsmphr_reg.word0)) 1873 continue; 1874 1875 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1876 &portsmphr_reg); 1877 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1878 LPFC_PORT_SEM_UE_RECOVERABLE) 1879 break; 1880 /*Sleep for 1Sec, before checking SEMAPHORE */ 1881 msleep(1000); 1882 } 1883 1884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1885 "4827 smphr_port_status x%x : Waited %dSec", 1886 smphr_port_status, i); 1887 1888 /* Recoverable UE, reset the HBA device */ 1889 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1890 LPFC_PORT_SEM_UE_RECOVERABLE) { 1891 for (i = 0; i < 20; i++) { 1892 msleep(1000); 1893 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1894 &portsmphr_reg.word0) && 1895 (LPFC_POST_STAGE_PORT_READY == 1896 bf_get(lpfc_port_smphr_port_status, 1897 &portsmphr_reg))) { 1898 rc = lpfc_sli4_port_sta_fn_reset(phba, 1899 LPFC_MBX_NO_WAIT, en_rn_msg); 1900 if (rc == 0) 1901 return; 1902 lpfc_printf_log(phba, 1903 KERN_ERR, LOG_INIT, 1904 "4215 Failed to recover UE"); 1905 break; 1906 } 1907 } 1908 } 1909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1910 "7624 Firmware not ready: Failing UE recovery," 1911 " waited %dSec", i); 1912 lpfc_sli4_offline_eratt(phba); 1913 break; 1914 1915 case LPFC_SLI_INTF_IF_TYPE_2: 1916 case LPFC_SLI_INTF_IF_TYPE_6: 1917 pci_rd_rc1 = lpfc_readl( 1918 phba->sli4_hba.u.if_type2.STATUSregaddr, 1919 &portstat_reg.word0); 1920 /* consider PCI bus read error as pci_channel_offline */ 1921 if (pci_rd_rc1 == -EIO) { 1922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1923 "3151 PCI bus read access failure: x%x\n", 1924 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1925 return; 1926 } 1927 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1928 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1929 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1931 "2889 Port Overtemperature event, " 1932 "taking port offline Data: x%x x%x\n", 1933 reg_err1, reg_err2); 1934 1935 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1936 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1937 temp_event_data.event_code = LPFC_CRIT_TEMP; 1938 temp_event_data.data = 0xFFFFFFFF; 1939 1940 shost = lpfc_shost_from_vport(phba->pport); 1941 fc_host_post_vendor_event(shost, fc_get_event_number(), 1942 sizeof(temp_event_data), 1943 (char *)&temp_event_data, 1944 SCSI_NL_VID_TYPE_PCI 1945 | PCI_VENDOR_ID_EMULEX); 1946 1947 spin_lock_irq(&phba->hbalock); 1948 phba->over_temp_state = HBA_OVER_TEMP; 1949 spin_unlock_irq(&phba->hbalock); 1950 lpfc_sli4_offline_eratt(phba); 1951 return; 1952 } 1953 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1954 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1956 "3143 Port Down: Firmware Update " 1957 "Detected\n"); 1958 en_rn_msg = false; 1959 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1960 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1962 "3144 Port Down: Debug Dump\n"); 1963 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1964 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1966 "3145 Port Down: Provisioning\n"); 1967 1968 /* If resets are disabled then leave the HBA alone and return */ 1969 if (!phba->cfg_enable_hba_reset) 1970 return; 1971 1972 /* Check port status register for function reset */ 1973 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1974 en_rn_msg); 1975 if (rc == 0) { 1976 /* don't report event on forced debug dump */ 1977 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1978 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1979 return; 1980 else 1981 break; 1982 } 1983 /* fall through for not able to recover */ 1984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1985 "3152 Unrecoverable error, bring the port " 1986 "offline\n"); 1987 lpfc_sli4_offline_eratt(phba); 1988 break; 1989 case LPFC_SLI_INTF_IF_TYPE_1: 1990 default: 1991 break; 1992 } 1993 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1994 "3123 Report dump event to upper layer\n"); 1995 /* Send an internal error event to mgmt application */ 1996 lpfc_board_errevt_to_mgmt(phba); 1997 1998 event_data = FC_REG_DUMP_EVENT; 1999 shost = lpfc_shost_from_vport(vport); 2000 fc_host_post_vendor_event(shost, fc_get_event_number(), 2001 sizeof(event_data), (char *) &event_data, 2002 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2003 } 2004 2005 /** 2006 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2007 * @phba: pointer to lpfc HBA data structure. 2008 * 2009 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2010 * routine from the API jump table function pointer from the lpfc_hba struct. 2011 * 2012 * Return codes 2013 * 0 - success. 2014 * Any other value - error. 2015 **/ 2016 void 2017 lpfc_handle_eratt(struct lpfc_hba *phba) 2018 { 2019 (*phba->lpfc_handle_eratt)(phba); 2020 } 2021 2022 /** 2023 * lpfc_handle_latt - The HBA link event handler 2024 * @phba: pointer to lpfc hba data structure. 2025 * 2026 * This routine is invoked from the worker thread to handle a HBA host 2027 * attention link event. SLI3 only. 2028 **/ 2029 void 2030 lpfc_handle_latt(struct lpfc_hba *phba) 2031 { 2032 struct lpfc_vport *vport = phba->pport; 2033 struct lpfc_sli *psli = &phba->sli; 2034 LPFC_MBOXQ_t *pmb; 2035 volatile uint32_t control; 2036 struct lpfc_dmabuf *mp; 2037 int rc = 0; 2038 2039 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2040 if (!pmb) { 2041 rc = 1; 2042 goto lpfc_handle_latt_err_exit; 2043 } 2044 2045 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2046 if (!mp) { 2047 rc = 2; 2048 goto lpfc_handle_latt_free_pmb; 2049 } 2050 2051 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2052 if (!mp->virt) { 2053 rc = 3; 2054 goto lpfc_handle_latt_free_mp; 2055 } 2056 2057 /* Cleanup any outstanding ELS commands */ 2058 lpfc_els_flush_all_cmd(phba); 2059 2060 psli->slistat.link_event++; 2061 lpfc_read_topology(phba, pmb, mp); 2062 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2063 pmb->vport = vport; 2064 /* Block ELS IOCBs until we have processed this mbox command */ 2065 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2066 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2067 if (rc == MBX_NOT_FINISHED) { 2068 rc = 4; 2069 goto lpfc_handle_latt_free_mbuf; 2070 } 2071 2072 /* Clear Link Attention in HA REG */ 2073 spin_lock_irq(&phba->hbalock); 2074 writel(HA_LATT, phba->HAregaddr); 2075 readl(phba->HAregaddr); /* flush */ 2076 spin_unlock_irq(&phba->hbalock); 2077 2078 return; 2079 2080 lpfc_handle_latt_free_mbuf: 2081 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2082 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2083 lpfc_handle_latt_free_mp: 2084 kfree(mp); 2085 lpfc_handle_latt_free_pmb: 2086 mempool_free(pmb, phba->mbox_mem_pool); 2087 lpfc_handle_latt_err_exit: 2088 /* Enable Link attention interrupts */ 2089 spin_lock_irq(&phba->hbalock); 2090 psli->sli_flag |= LPFC_PROCESS_LA; 2091 control = readl(phba->HCregaddr); 2092 control |= HC_LAINT_ENA; 2093 writel(control, phba->HCregaddr); 2094 readl(phba->HCregaddr); /* flush */ 2095 2096 /* Clear Link Attention in HA REG */ 2097 writel(HA_LATT, phba->HAregaddr); 2098 readl(phba->HAregaddr); /* flush */ 2099 spin_unlock_irq(&phba->hbalock); 2100 lpfc_linkdown(phba); 2101 phba->link_state = LPFC_HBA_ERROR; 2102 2103 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2104 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2105 2106 return; 2107 } 2108 2109 /** 2110 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2111 * @phba: pointer to lpfc hba data structure. 2112 * @vpd: pointer to the vital product data. 2113 * @len: length of the vital product data in bytes. 2114 * 2115 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2116 * an array of characters. In this routine, the ModelName, ProgramType, and 2117 * ModelDesc, etc. fields of the phba data structure will be populated. 2118 * 2119 * Return codes 2120 * 0 - pointer to the VPD passed in is NULL 2121 * 1 - success 2122 **/ 2123 int 2124 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2125 { 2126 uint8_t lenlo, lenhi; 2127 int Length; 2128 int i, j; 2129 int finished = 0; 2130 int index = 0; 2131 2132 if (!vpd) 2133 return 0; 2134 2135 /* Vital Product */ 2136 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2137 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2138 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2139 (uint32_t) vpd[3]); 2140 while (!finished && (index < (len - 4))) { 2141 switch (vpd[index]) { 2142 case 0x82: 2143 case 0x91: 2144 index += 1; 2145 lenlo = vpd[index]; 2146 index += 1; 2147 lenhi = vpd[index]; 2148 index += 1; 2149 i = ((((unsigned short)lenhi) << 8) + lenlo); 2150 index += i; 2151 break; 2152 case 0x90: 2153 index += 1; 2154 lenlo = vpd[index]; 2155 index += 1; 2156 lenhi = vpd[index]; 2157 index += 1; 2158 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2159 if (Length > len - index) 2160 Length = len - index; 2161 while (Length > 0) { 2162 /* Look for Serial Number */ 2163 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2164 index += 2; 2165 i = vpd[index]; 2166 index += 1; 2167 j = 0; 2168 Length -= (3+i); 2169 while(i--) { 2170 phba->SerialNumber[j++] = vpd[index++]; 2171 if (j == 31) 2172 break; 2173 } 2174 phba->SerialNumber[j] = 0; 2175 continue; 2176 } 2177 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2178 phba->vpd_flag |= VPD_MODEL_DESC; 2179 index += 2; 2180 i = vpd[index]; 2181 index += 1; 2182 j = 0; 2183 Length -= (3+i); 2184 while(i--) { 2185 phba->ModelDesc[j++] = vpd[index++]; 2186 if (j == 255) 2187 break; 2188 } 2189 phba->ModelDesc[j] = 0; 2190 continue; 2191 } 2192 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2193 phba->vpd_flag |= VPD_MODEL_NAME; 2194 index += 2; 2195 i = vpd[index]; 2196 index += 1; 2197 j = 0; 2198 Length -= (3+i); 2199 while(i--) { 2200 phba->ModelName[j++] = vpd[index++]; 2201 if (j == 79) 2202 break; 2203 } 2204 phba->ModelName[j] = 0; 2205 continue; 2206 } 2207 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2208 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2209 index += 2; 2210 i = vpd[index]; 2211 index += 1; 2212 j = 0; 2213 Length -= (3+i); 2214 while(i--) { 2215 phba->ProgramType[j++] = vpd[index++]; 2216 if (j == 255) 2217 break; 2218 } 2219 phba->ProgramType[j] = 0; 2220 continue; 2221 } 2222 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2223 phba->vpd_flag |= VPD_PORT; 2224 index += 2; 2225 i = vpd[index]; 2226 index += 1; 2227 j = 0; 2228 Length -= (3+i); 2229 while(i--) { 2230 if ((phba->sli_rev == LPFC_SLI_REV4) && 2231 (phba->sli4_hba.pport_name_sta == 2232 LPFC_SLI4_PPNAME_GET)) { 2233 j++; 2234 index++; 2235 } else 2236 phba->Port[j++] = vpd[index++]; 2237 if (j == 19) 2238 break; 2239 } 2240 if ((phba->sli_rev != LPFC_SLI_REV4) || 2241 (phba->sli4_hba.pport_name_sta == 2242 LPFC_SLI4_PPNAME_NON)) 2243 phba->Port[j] = 0; 2244 continue; 2245 } 2246 else { 2247 index += 2; 2248 i = vpd[index]; 2249 index += 1; 2250 index += i; 2251 Length -= (3 + i); 2252 } 2253 } 2254 finished = 0; 2255 break; 2256 case 0x78: 2257 finished = 1; 2258 break; 2259 default: 2260 index ++; 2261 break; 2262 } 2263 } 2264 2265 return(1); 2266 } 2267 2268 /** 2269 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2270 * @phba: pointer to lpfc hba data structure. 2271 * @mdp: pointer to the data structure to hold the derived model name. 2272 * @descp: pointer to the data structure to hold the derived description. 2273 * 2274 * This routine retrieves HBA's description based on its registered PCI device 2275 * ID. The @descp passed into this function points to an array of 256 chars. It 2276 * shall be returned with the model name, maximum speed, and the host bus type. 2277 * The @mdp passed into this function points to an array of 80 chars. When the 2278 * function returns, the @mdp will be filled with the model name. 2279 **/ 2280 static void 2281 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2282 { 2283 lpfc_vpd_t *vp; 2284 uint16_t dev_id = phba->pcidev->device; 2285 int max_speed; 2286 int GE = 0; 2287 int oneConnect = 0; /* default is not a oneConnect */ 2288 struct { 2289 char *name; 2290 char *bus; 2291 char *function; 2292 } m = {"<Unknown>", "", ""}; 2293 2294 if (mdp && mdp[0] != '\0' 2295 && descp && descp[0] != '\0') 2296 return; 2297 2298 if (phba->lmt & LMT_64Gb) 2299 max_speed = 64; 2300 else if (phba->lmt & LMT_32Gb) 2301 max_speed = 32; 2302 else if (phba->lmt & LMT_16Gb) 2303 max_speed = 16; 2304 else if (phba->lmt & LMT_10Gb) 2305 max_speed = 10; 2306 else if (phba->lmt & LMT_8Gb) 2307 max_speed = 8; 2308 else if (phba->lmt & LMT_4Gb) 2309 max_speed = 4; 2310 else if (phba->lmt & LMT_2Gb) 2311 max_speed = 2; 2312 else if (phba->lmt & LMT_1Gb) 2313 max_speed = 1; 2314 else 2315 max_speed = 0; 2316 2317 vp = &phba->vpd; 2318 2319 switch (dev_id) { 2320 case PCI_DEVICE_ID_FIREFLY: 2321 m = (typeof(m)){"LP6000", "PCI", 2322 "Obsolete, Unsupported Fibre Channel Adapter"}; 2323 break; 2324 case PCI_DEVICE_ID_SUPERFLY: 2325 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2326 m = (typeof(m)){"LP7000", "PCI", ""}; 2327 else 2328 m = (typeof(m)){"LP7000E", "PCI", ""}; 2329 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2330 break; 2331 case PCI_DEVICE_ID_DRAGONFLY: 2332 m = (typeof(m)){"LP8000", "PCI", 2333 "Obsolete, Unsupported Fibre Channel Adapter"}; 2334 break; 2335 case PCI_DEVICE_ID_CENTAUR: 2336 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2337 m = (typeof(m)){"LP9002", "PCI", ""}; 2338 else 2339 m = (typeof(m)){"LP9000", "PCI", ""}; 2340 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2341 break; 2342 case PCI_DEVICE_ID_RFLY: 2343 m = (typeof(m)){"LP952", "PCI", 2344 "Obsolete, Unsupported Fibre Channel Adapter"}; 2345 break; 2346 case PCI_DEVICE_ID_PEGASUS: 2347 m = (typeof(m)){"LP9802", "PCI-X", 2348 "Obsolete, Unsupported Fibre Channel Adapter"}; 2349 break; 2350 case PCI_DEVICE_ID_THOR: 2351 m = (typeof(m)){"LP10000", "PCI-X", 2352 "Obsolete, Unsupported Fibre Channel Adapter"}; 2353 break; 2354 case PCI_DEVICE_ID_VIPER: 2355 m = (typeof(m)){"LPX1000", "PCI-X", 2356 "Obsolete, Unsupported Fibre Channel Adapter"}; 2357 break; 2358 case PCI_DEVICE_ID_PFLY: 2359 m = (typeof(m)){"LP982", "PCI-X", 2360 "Obsolete, Unsupported Fibre Channel Adapter"}; 2361 break; 2362 case PCI_DEVICE_ID_TFLY: 2363 m = (typeof(m)){"LP1050", "PCI-X", 2364 "Obsolete, Unsupported Fibre Channel Adapter"}; 2365 break; 2366 case PCI_DEVICE_ID_HELIOS: 2367 m = (typeof(m)){"LP11000", "PCI-X2", 2368 "Obsolete, Unsupported Fibre Channel Adapter"}; 2369 break; 2370 case PCI_DEVICE_ID_HELIOS_SCSP: 2371 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2372 "Obsolete, Unsupported Fibre Channel Adapter"}; 2373 break; 2374 case PCI_DEVICE_ID_HELIOS_DCSP: 2375 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2376 "Obsolete, Unsupported Fibre Channel Adapter"}; 2377 break; 2378 case PCI_DEVICE_ID_NEPTUNE: 2379 m = (typeof(m)){"LPe1000", "PCIe", 2380 "Obsolete, Unsupported Fibre Channel Adapter"}; 2381 break; 2382 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2383 m = (typeof(m)){"LPe1000-SP", "PCIe", 2384 "Obsolete, Unsupported Fibre Channel Adapter"}; 2385 break; 2386 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2387 m = (typeof(m)){"LPe1002-SP", "PCIe", 2388 "Obsolete, Unsupported Fibre Channel Adapter"}; 2389 break; 2390 case PCI_DEVICE_ID_BMID: 2391 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2392 break; 2393 case PCI_DEVICE_ID_BSMB: 2394 m = (typeof(m)){"LP111", "PCI-X2", 2395 "Obsolete, Unsupported Fibre Channel Adapter"}; 2396 break; 2397 case PCI_DEVICE_ID_ZEPHYR: 2398 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2399 break; 2400 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2401 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2402 break; 2403 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2404 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2405 GE = 1; 2406 break; 2407 case PCI_DEVICE_ID_ZMID: 2408 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2409 break; 2410 case PCI_DEVICE_ID_ZSMB: 2411 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2412 break; 2413 case PCI_DEVICE_ID_LP101: 2414 m = (typeof(m)){"LP101", "PCI-X", 2415 "Obsolete, Unsupported Fibre Channel Adapter"}; 2416 break; 2417 case PCI_DEVICE_ID_LP10000S: 2418 m = (typeof(m)){"LP10000-S", "PCI", 2419 "Obsolete, Unsupported Fibre Channel Adapter"}; 2420 break; 2421 case PCI_DEVICE_ID_LP11000S: 2422 m = (typeof(m)){"LP11000-S", "PCI-X2", 2423 "Obsolete, Unsupported Fibre Channel Adapter"}; 2424 break; 2425 case PCI_DEVICE_ID_LPE11000S: 2426 m = (typeof(m)){"LPe11000-S", "PCIe", 2427 "Obsolete, Unsupported Fibre Channel Adapter"}; 2428 break; 2429 case PCI_DEVICE_ID_SAT: 2430 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2431 break; 2432 case PCI_DEVICE_ID_SAT_MID: 2433 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2434 break; 2435 case PCI_DEVICE_ID_SAT_SMB: 2436 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2437 break; 2438 case PCI_DEVICE_ID_SAT_DCSP: 2439 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2440 break; 2441 case PCI_DEVICE_ID_SAT_SCSP: 2442 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2443 break; 2444 case PCI_DEVICE_ID_SAT_S: 2445 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2446 break; 2447 case PCI_DEVICE_ID_HORNET: 2448 m = (typeof(m)){"LP21000", "PCIe", 2449 "Obsolete, Unsupported FCoE Adapter"}; 2450 GE = 1; 2451 break; 2452 case PCI_DEVICE_ID_PROTEUS_VF: 2453 m = (typeof(m)){"LPev12000", "PCIe IOV", 2454 "Obsolete, Unsupported Fibre Channel Adapter"}; 2455 break; 2456 case PCI_DEVICE_ID_PROTEUS_PF: 2457 m = (typeof(m)){"LPev12000", "PCIe IOV", 2458 "Obsolete, Unsupported Fibre Channel Adapter"}; 2459 break; 2460 case PCI_DEVICE_ID_PROTEUS_S: 2461 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2462 "Obsolete, Unsupported Fibre Channel Adapter"}; 2463 break; 2464 case PCI_DEVICE_ID_TIGERSHARK: 2465 oneConnect = 1; 2466 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2467 break; 2468 case PCI_DEVICE_ID_TOMCAT: 2469 oneConnect = 1; 2470 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2471 break; 2472 case PCI_DEVICE_ID_FALCON: 2473 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2474 "EmulexSecure Fibre"}; 2475 break; 2476 case PCI_DEVICE_ID_BALIUS: 2477 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2478 "Obsolete, Unsupported Fibre Channel Adapter"}; 2479 break; 2480 case PCI_DEVICE_ID_LANCER_FC: 2481 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2482 break; 2483 case PCI_DEVICE_ID_LANCER_FC_VF: 2484 m = (typeof(m)){"LPe16000", "PCIe", 2485 "Obsolete, Unsupported Fibre Channel Adapter"}; 2486 break; 2487 case PCI_DEVICE_ID_LANCER_FCOE: 2488 oneConnect = 1; 2489 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2490 break; 2491 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2492 oneConnect = 1; 2493 m = (typeof(m)){"OCe15100", "PCIe", 2494 "Obsolete, Unsupported FCoE"}; 2495 break; 2496 case PCI_DEVICE_ID_LANCER_G6_FC: 2497 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2498 break; 2499 case PCI_DEVICE_ID_LANCER_G7_FC: 2500 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2501 break; 2502 case PCI_DEVICE_ID_SKYHAWK: 2503 case PCI_DEVICE_ID_SKYHAWK_VF: 2504 oneConnect = 1; 2505 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2506 break; 2507 default: 2508 m = (typeof(m)){"Unknown", "", ""}; 2509 break; 2510 } 2511 2512 if (mdp && mdp[0] == '\0') 2513 snprintf(mdp, 79,"%s", m.name); 2514 /* 2515 * oneConnect hba requires special processing, they are all initiators 2516 * and we put the port number on the end 2517 */ 2518 if (descp && descp[0] == '\0') { 2519 if (oneConnect) 2520 snprintf(descp, 255, 2521 "Emulex OneConnect %s, %s Initiator %s", 2522 m.name, m.function, 2523 phba->Port); 2524 else if (max_speed == 0) 2525 snprintf(descp, 255, 2526 "Emulex %s %s %s", 2527 m.name, m.bus, m.function); 2528 else 2529 snprintf(descp, 255, 2530 "Emulex %s %d%s %s %s", 2531 m.name, max_speed, (GE) ? "GE" : "Gb", 2532 m.bus, m.function); 2533 } 2534 } 2535 2536 /** 2537 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2538 * @phba: pointer to lpfc hba data structure. 2539 * @pring: pointer to a IOCB ring. 2540 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2541 * 2542 * This routine posts a given number of IOCBs with the associated DMA buffer 2543 * descriptors specified by the cnt argument to the given IOCB ring. 2544 * 2545 * Return codes 2546 * The number of IOCBs NOT able to be posted to the IOCB ring. 2547 **/ 2548 int 2549 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2550 { 2551 IOCB_t *icmd; 2552 struct lpfc_iocbq *iocb; 2553 struct lpfc_dmabuf *mp1, *mp2; 2554 2555 cnt += pring->missbufcnt; 2556 2557 /* While there are buffers to post */ 2558 while (cnt > 0) { 2559 /* Allocate buffer for command iocb */ 2560 iocb = lpfc_sli_get_iocbq(phba); 2561 if (iocb == NULL) { 2562 pring->missbufcnt = cnt; 2563 return cnt; 2564 } 2565 icmd = &iocb->iocb; 2566 2567 /* 2 buffers can be posted per command */ 2568 /* Allocate buffer to post */ 2569 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2570 if (mp1) 2571 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2572 if (!mp1 || !mp1->virt) { 2573 kfree(mp1); 2574 lpfc_sli_release_iocbq(phba, iocb); 2575 pring->missbufcnt = cnt; 2576 return cnt; 2577 } 2578 2579 INIT_LIST_HEAD(&mp1->list); 2580 /* Allocate buffer to post */ 2581 if (cnt > 1) { 2582 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2583 if (mp2) 2584 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2585 &mp2->phys); 2586 if (!mp2 || !mp2->virt) { 2587 kfree(mp2); 2588 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2589 kfree(mp1); 2590 lpfc_sli_release_iocbq(phba, iocb); 2591 pring->missbufcnt = cnt; 2592 return cnt; 2593 } 2594 2595 INIT_LIST_HEAD(&mp2->list); 2596 } else { 2597 mp2 = NULL; 2598 } 2599 2600 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2601 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2602 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2603 icmd->ulpBdeCount = 1; 2604 cnt--; 2605 if (mp2) { 2606 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2607 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2608 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2609 cnt--; 2610 icmd->ulpBdeCount = 2; 2611 } 2612 2613 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2614 icmd->ulpLe = 1; 2615 2616 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2617 IOCB_ERROR) { 2618 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2619 kfree(mp1); 2620 cnt++; 2621 if (mp2) { 2622 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2623 kfree(mp2); 2624 cnt++; 2625 } 2626 lpfc_sli_release_iocbq(phba, iocb); 2627 pring->missbufcnt = cnt; 2628 return cnt; 2629 } 2630 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2631 if (mp2) 2632 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2633 } 2634 pring->missbufcnt = 0; 2635 return 0; 2636 } 2637 2638 /** 2639 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2640 * @phba: pointer to lpfc hba data structure. 2641 * 2642 * This routine posts initial receive IOCB buffers to the ELS ring. The 2643 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2644 * set to 64 IOCBs. SLI3 only. 2645 * 2646 * Return codes 2647 * 0 - success (currently always success) 2648 **/ 2649 static int 2650 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2651 { 2652 struct lpfc_sli *psli = &phba->sli; 2653 2654 /* Ring 0, ELS / CT buffers */ 2655 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2656 /* Ring 2 - FCP no buffers needed */ 2657 2658 return 0; 2659 } 2660 2661 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2662 2663 /** 2664 * lpfc_sha_init - Set up initial array of hash table entries 2665 * @HashResultPointer: pointer to an array as hash table. 2666 * 2667 * This routine sets up the initial values to the array of hash table entries 2668 * for the LC HBAs. 2669 **/ 2670 static void 2671 lpfc_sha_init(uint32_t * HashResultPointer) 2672 { 2673 HashResultPointer[0] = 0x67452301; 2674 HashResultPointer[1] = 0xEFCDAB89; 2675 HashResultPointer[2] = 0x98BADCFE; 2676 HashResultPointer[3] = 0x10325476; 2677 HashResultPointer[4] = 0xC3D2E1F0; 2678 } 2679 2680 /** 2681 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2682 * @HashResultPointer: pointer to an initial/result hash table. 2683 * @HashWorkingPointer: pointer to an working hash table. 2684 * 2685 * This routine iterates an initial hash table pointed by @HashResultPointer 2686 * with the values from the working hash table pointeed by @HashWorkingPointer. 2687 * The results are putting back to the initial hash table, returned through 2688 * the @HashResultPointer as the result hash table. 2689 **/ 2690 static void 2691 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2692 { 2693 int t; 2694 uint32_t TEMP; 2695 uint32_t A, B, C, D, E; 2696 t = 16; 2697 do { 2698 HashWorkingPointer[t] = 2699 S(1, 2700 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2701 8] ^ 2702 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2703 } while (++t <= 79); 2704 t = 0; 2705 A = HashResultPointer[0]; 2706 B = HashResultPointer[1]; 2707 C = HashResultPointer[2]; 2708 D = HashResultPointer[3]; 2709 E = HashResultPointer[4]; 2710 2711 do { 2712 if (t < 20) { 2713 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2714 } else if (t < 40) { 2715 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2716 } else if (t < 60) { 2717 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2718 } else { 2719 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2720 } 2721 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2722 E = D; 2723 D = C; 2724 C = S(30, B); 2725 B = A; 2726 A = TEMP; 2727 } while (++t <= 79); 2728 2729 HashResultPointer[0] += A; 2730 HashResultPointer[1] += B; 2731 HashResultPointer[2] += C; 2732 HashResultPointer[3] += D; 2733 HashResultPointer[4] += E; 2734 2735 } 2736 2737 /** 2738 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2739 * @RandomChallenge: pointer to the entry of host challenge random number array. 2740 * @HashWorking: pointer to the entry of the working hash array. 2741 * 2742 * This routine calculates the working hash array referred by @HashWorking 2743 * from the challenge random numbers associated with the host, referred by 2744 * @RandomChallenge. The result is put into the entry of the working hash 2745 * array and returned by reference through @HashWorking. 2746 **/ 2747 static void 2748 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2749 { 2750 *HashWorking = (*RandomChallenge ^ *HashWorking); 2751 } 2752 2753 /** 2754 * lpfc_hba_init - Perform special handling for LC HBA initialization 2755 * @phba: pointer to lpfc hba data structure. 2756 * @hbainit: pointer to an array of unsigned 32-bit integers. 2757 * 2758 * This routine performs the special handling for LC HBA initialization. 2759 **/ 2760 void 2761 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2762 { 2763 int t; 2764 uint32_t *HashWorking; 2765 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2766 2767 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2768 if (!HashWorking) 2769 return; 2770 2771 HashWorking[0] = HashWorking[78] = *pwwnn++; 2772 HashWorking[1] = HashWorking[79] = *pwwnn; 2773 2774 for (t = 0; t < 7; t++) 2775 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2776 2777 lpfc_sha_init(hbainit); 2778 lpfc_sha_iterate(hbainit, HashWorking); 2779 kfree(HashWorking); 2780 } 2781 2782 /** 2783 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2784 * @vport: pointer to a virtual N_Port data structure. 2785 * 2786 * This routine performs the necessary cleanups before deleting the @vport. 2787 * It invokes the discovery state machine to perform necessary state 2788 * transitions and to release the ndlps associated with the @vport. Note, 2789 * the physical port is treated as @vport 0. 2790 **/ 2791 void 2792 lpfc_cleanup(struct lpfc_vport *vport) 2793 { 2794 struct lpfc_hba *phba = vport->phba; 2795 struct lpfc_nodelist *ndlp, *next_ndlp; 2796 int i = 0; 2797 2798 if (phba->link_state > LPFC_LINK_DOWN) 2799 lpfc_port_link_failure(vport); 2800 2801 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2802 if (!NLP_CHK_NODE_ACT(ndlp)) { 2803 ndlp = lpfc_enable_node(vport, ndlp, 2804 NLP_STE_UNUSED_NODE); 2805 if (!ndlp) 2806 continue; 2807 spin_lock_irq(&phba->ndlp_lock); 2808 NLP_SET_FREE_REQ(ndlp); 2809 spin_unlock_irq(&phba->ndlp_lock); 2810 /* Trigger the release of the ndlp memory */ 2811 lpfc_nlp_put(ndlp); 2812 continue; 2813 } 2814 spin_lock_irq(&phba->ndlp_lock); 2815 if (NLP_CHK_FREE_REQ(ndlp)) { 2816 /* The ndlp should not be in memory free mode already */ 2817 spin_unlock_irq(&phba->ndlp_lock); 2818 continue; 2819 } else 2820 /* Indicate request for freeing ndlp memory */ 2821 NLP_SET_FREE_REQ(ndlp); 2822 spin_unlock_irq(&phba->ndlp_lock); 2823 2824 if (vport->port_type != LPFC_PHYSICAL_PORT && 2825 ndlp->nlp_DID == Fabric_DID) { 2826 /* Just free up ndlp with Fabric_DID for vports */ 2827 lpfc_nlp_put(ndlp); 2828 continue; 2829 } 2830 2831 /* take care of nodes in unused state before the state 2832 * machine taking action. 2833 */ 2834 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2835 lpfc_nlp_put(ndlp); 2836 continue; 2837 } 2838 2839 if (ndlp->nlp_type & NLP_FABRIC) 2840 lpfc_disc_state_machine(vport, ndlp, NULL, 2841 NLP_EVT_DEVICE_RECOVERY); 2842 2843 lpfc_disc_state_machine(vport, ndlp, NULL, 2844 NLP_EVT_DEVICE_RM); 2845 } 2846 2847 /* At this point, ALL ndlp's should be gone 2848 * because of the previous NLP_EVT_DEVICE_RM. 2849 * Lets wait for this to happen, if needed. 2850 */ 2851 while (!list_empty(&vport->fc_nodes)) { 2852 if (i++ > 3000) { 2853 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2854 "0233 Nodelist not empty\n"); 2855 list_for_each_entry_safe(ndlp, next_ndlp, 2856 &vport->fc_nodes, nlp_listp) { 2857 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2858 LOG_NODE, 2859 "0282 did:x%x ndlp:x%p " 2860 "usgmap:x%x refcnt:%d\n", 2861 ndlp->nlp_DID, (void *)ndlp, 2862 ndlp->nlp_usg_map, 2863 kref_read(&ndlp->kref)); 2864 } 2865 break; 2866 } 2867 2868 /* Wait for any activity on ndlps to settle */ 2869 msleep(10); 2870 } 2871 lpfc_cleanup_vports_rrqs(vport, NULL); 2872 } 2873 2874 /** 2875 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2876 * @vport: pointer to a virtual N_Port data structure. 2877 * 2878 * This routine stops all the timers associated with a @vport. This function 2879 * is invoked before disabling or deleting a @vport. Note that the physical 2880 * port is treated as @vport 0. 2881 **/ 2882 void 2883 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2884 { 2885 del_timer_sync(&vport->els_tmofunc); 2886 del_timer_sync(&vport->delayed_disc_tmo); 2887 lpfc_can_disctmo(vport); 2888 return; 2889 } 2890 2891 /** 2892 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2893 * @phba: pointer to lpfc hba data structure. 2894 * 2895 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2896 * caller of this routine should already hold the host lock. 2897 **/ 2898 void 2899 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2900 { 2901 /* Clear pending FCF rediscovery wait flag */ 2902 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2903 2904 /* Now, try to stop the timer */ 2905 del_timer(&phba->fcf.redisc_wait); 2906 } 2907 2908 /** 2909 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2910 * @phba: pointer to lpfc hba data structure. 2911 * 2912 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2913 * checks whether the FCF rediscovery wait timer is pending with the host 2914 * lock held before proceeding with disabling the timer and clearing the 2915 * wait timer pendig flag. 2916 **/ 2917 void 2918 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2919 { 2920 spin_lock_irq(&phba->hbalock); 2921 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2922 /* FCF rediscovery timer already fired or stopped */ 2923 spin_unlock_irq(&phba->hbalock); 2924 return; 2925 } 2926 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2927 /* Clear failover in progress flags */ 2928 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2929 spin_unlock_irq(&phba->hbalock); 2930 } 2931 2932 /** 2933 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2934 * @phba: pointer to lpfc hba data structure. 2935 * 2936 * This routine stops all the timers associated with a HBA. This function is 2937 * invoked before either putting a HBA offline or unloading the driver. 2938 **/ 2939 void 2940 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2941 { 2942 if (phba->pport) 2943 lpfc_stop_vport_timers(phba->pport); 2944 cancel_delayed_work_sync(&phba->eq_delay_work); 2945 del_timer_sync(&phba->sli.mbox_tmo); 2946 del_timer_sync(&phba->fabric_block_timer); 2947 del_timer_sync(&phba->eratt_poll); 2948 del_timer_sync(&phba->hb_tmofunc); 2949 if (phba->sli_rev == LPFC_SLI_REV4) { 2950 del_timer_sync(&phba->rrq_tmr); 2951 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2952 } 2953 phba->hb_outstanding = 0; 2954 2955 switch (phba->pci_dev_grp) { 2956 case LPFC_PCI_DEV_LP: 2957 /* Stop any LightPulse device specific driver timers */ 2958 del_timer_sync(&phba->fcp_poll_timer); 2959 break; 2960 case LPFC_PCI_DEV_OC: 2961 /* Stop any OneConnect device sepcific driver timers */ 2962 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2963 break; 2964 default: 2965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2966 "0297 Invalid device group (x%x)\n", 2967 phba->pci_dev_grp); 2968 break; 2969 } 2970 return; 2971 } 2972 2973 /** 2974 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2975 * @phba: pointer to lpfc hba data structure. 2976 * 2977 * This routine marks a HBA's management interface as blocked. Once the HBA's 2978 * management interface is marked as blocked, all the user space access to 2979 * the HBA, whether they are from sysfs interface or libdfc interface will 2980 * all be blocked. The HBA is set to block the management interface when the 2981 * driver prepares the HBA interface for online or offline. 2982 **/ 2983 static void 2984 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2985 { 2986 unsigned long iflag; 2987 uint8_t actcmd = MBX_HEARTBEAT; 2988 unsigned long timeout; 2989 2990 spin_lock_irqsave(&phba->hbalock, iflag); 2991 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2992 spin_unlock_irqrestore(&phba->hbalock, iflag); 2993 if (mbx_action == LPFC_MBX_NO_WAIT) 2994 return; 2995 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2996 spin_lock_irqsave(&phba->hbalock, iflag); 2997 if (phba->sli.mbox_active) { 2998 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2999 /* Determine how long we might wait for the active mailbox 3000 * command to be gracefully completed by firmware. 3001 */ 3002 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3003 phba->sli.mbox_active) * 1000) + jiffies; 3004 } 3005 spin_unlock_irqrestore(&phba->hbalock, iflag); 3006 3007 /* Wait for the outstnading mailbox command to complete */ 3008 while (phba->sli.mbox_active) { 3009 /* Check active mailbox complete status every 2ms */ 3010 msleep(2); 3011 if (time_after(jiffies, timeout)) { 3012 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3013 "2813 Mgmt IO is Blocked %x " 3014 "- mbox cmd %x still active\n", 3015 phba->sli.sli_flag, actcmd); 3016 break; 3017 } 3018 } 3019 } 3020 3021 /** 3022 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3023 * @phba: pointer to lpfc hba data structure. 3024 * 3025 * Allocate RPIs for all active remote nodes. This is needed whenever 3026 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3027 * is to fixup the temporary rpi assignments. 3028 **/ 3029 void 3030 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3031 { 3032 struct lpfc_nodelist *ndlp, *next_ndlp; 3033 struct lpfc_vport **vports; 3034 int i, rpi; 3035 unsigned long flags; 3036 3037 if (phba->sli_rev != LPFC_SLI_REV4) 3038 return; 3039 3040 vports = lpfc_create_vport_work_array(phba); 3041 if (vports == NULL) 3042 return; 3043 3044 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3045 if (vports[i]->load_flag & FC_UNLOADING) 3046 continue; 3047 3048 list_for_each_entry_safe(ndlp, next_ndlp, 3049 &vports[i]->fc_nodes, 3050 nlp_listp) { 3051 if (!NLP_CHK_NODE_ACT(ndlp)) 3052 continue; 3053 rpi = lpfc_sli4_alloc_rpi(phba); 3054 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3055 spin_lock_irqsave(&phba->ndlp_lock, flags); 3056 NLP_CLR_NODE_ACT(ndlp); 3057 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3058 continue; 3059 } 3060 ndlp->nlp_rpi = rpi; 3061 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3062 "0009 rpi:%x DID:%x " 3063 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 3064 ndlp->nlp_DID, ndlp->nlp_flag, 3065 ndlp->nlp_usg_map, ndlp); 3066 } 3067 } 3068 lpfc_destroy_vport_work_array(phba, vports); 3069 } 3070 3071 /** 3072 * lpfc_create_expedite_pool - create expedite pool 3073 * @phba: pointer to lpfc hba data structure. 3074 * 3075 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3076 * to expedite pool. Mark them as expedite. 3077 **/ 3078 void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3079 { 3080 struct lpfc_sli4_hdw_queue *qp; 3081 struct lpfc_io_buf *lpfc_ncmd; 3082 struct lpfc_io_buf *lpfc_ncmd_next; 3083 struct lpfc_epd_pool *epd_pool; 3084 unsigned long iflag; 3085 3086 epd_pool = &phba->epd_pool; 3087 qp = &phba->sli4_hba.hdwq[0]; 3088 3089 spin_lock_init(&epd_pool->lock); 3090 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3091 spin_lock(&epd_pool->lock); 3092 INIT_LIST_HEAD(&epd_pool->list); 3093 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3094 &qp->lpfc_io_buf_list_put, list) { 3095 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3096 lpfc_ncmd->expedite = true; 3097 qp->put_io_bufs--; 3098 epd_pool->count++; 3099 if (epd_pool->count >= XRI_BATCH) 3100 break; 3101 } 3102 spin_unlock(&epd_pool->lock); 3103 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3104 } 3105 3106 /** 3107 * lpfc_destroy_expedite_pool - destroy expedite pool 3108 * @phba: pointer to lpfc hba data structure. 3109 * 3110 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3111 * of HWQ 0. Clear the mark. 3112 **/ 3113 void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3114 { 3115 struct lpfc_sli4_hdw_queue *qp; 3116 struct lpfc_io_buf *lpfc_ncmd; 3117 struct lpfc_io_buf *lpfc_ncmd_next; 3118 struct lpfc_epd_pool *epd_pool; 3119 unsigned long iflag; 3120 3121 epd_pool = &phba->epd_pool; 3122 qp = &phba->sli4_hba.hdwq[0]; 3123 3124 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3125 spin_lock(&epd_pool->lock); 3126 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3127 &epd_pool->list, list) { 3128 list_move_tail(&lpfc_ncmd->list, 3129 &qp->lpfc_io_buf_list_put); 3130 lpfc_ncmd->flags = false; 3131 qp->put_io_bufs++; 3132 epd_pool->count--; 3133 } 3134 spin_unlock(&epd_pool->lock); 3135 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3136 } 3137 3138 /** 3139 * lpfc_create_multixri_pools - create multi-XRI pools 3140 * @phba: pointer to lpfc hba data structure. 3141 * 3142 * This routine initialize public, private per HWQ. Then, move XRIs from 3143 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3144 * Initialized. 3145 **/ 3146 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3147 { 3148 u32 i, j; 3149 u32 hwq_count; 3150 u32 count_per_hwq; 3151 struct lpfc_io_buf *lpfc_ncmd; 3152 struct lpfc_io_buf *lpfc_ncmd_next; 3153 unsigned long iflag; 3154 struct lpfc_sli4_hdw_queue *qp; 3155 struct lpfc_multixri_pool *multixri_pool; 3156 struct lpfc_pbl_pool *pbl_pool; 3157 struct lpfc_pvt_pool *pvt_pool; 3158 3159 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3160 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3161 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3162 phba->sli4_hba.io_xri_cnt); 3163 3164 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3165 lpfc_create_expedite_pool(phba); 3166 3167 hwq_count = phba->cfg_hdw_queue; 3168 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3169 3170 for (i = 0; i < hwq_count; i++) { 3171 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3172 3173 if (!multixri_pool) { 3174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3175 "1238 Failed to allocate memory for " 3176 "multixri_pool\n"); 3177 3178 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3179 lpfc_destroy_expedite_pool(phba); 3180 3181 j = 0; 3182 while (j < i) { 3183 qp = &phba->sli4_hba.hdwq[j]; 3184 kfree(qp->p_multixri_pool); 3185 j++; 3186 } 3187 phba->cfg_xri_rebalancing = 0; 3188 return; 3189 } 3190 3191 qp = &phba->sli4_hba.hdwq[i]; 3192 qp->p_multixri_pool = multixri_pool; 3193 3194 multixri_pool->xri_limit = count_per_hwq; 3195 multixri_pool->rrb_next_hwqid = i; 3196 3197 /* Deal with public free xri pool */ 3198 pbl_pool = &multixri_pool->pbl_pool; 3199 spin_lock_init(&pbl_pool->lock); 3200 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3201 spin_lock(&pbl_pool->lock); 3202 INIT_LIST_HEAD(&pbl_pool->list); 3203 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3204 &qp->lpfc_io_buf_list_put, list) { 3205 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3206 qp->put_io_bufs--; 3207 pbl_pool->count++; 3208 } 3209 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3210 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3211 pbl_pool->count, i); 3212 spin_unlock(&pbl_pool->lock); 3213 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3214 3215 /* Deal with private free xri pool */ 3216 pvt_pool = &multixri_pool->pvt_pool; 3217 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3218 pvt_pool->low_watermark = XRI_BATCH; 3219 spin_lock_init(&pvt_pool->lock); 3220 spin_lock_irqsave(&pvt_pool->lock, iflag); 3221 INIT_LIST_HEAD(&pvt_pool->list); 3222 pvt_pool->count = 0; 3223 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3224 } 3225 } 3226 3227 /** 3228 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3229 * @phba: pointer to lpfc hba data structure. 3230 * 3231 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3232 **/ 3233 void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3234 { 3235 u32 i; 3236 u32 hwq_count; 3237 struct lpfc_io_buf *lpfc_ncmd; 3238 struct lpfc_io_buf *lpfc_ncmd_next; 3239 unsigned long iflag; 3240 struct lpfc_sli4_hdw_queue *qp; 3241 struct lpfc_multixri_pool *multixri_pool; 3242 struct lpfc_pbl_pool *pbl_pool; 3243 struct lpfc_pvt_pool *pvt_pool; 3244 3245 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3246 lpfc_destroy_expedite_pool(phba); 3247 3248 hwq_count = phba->cfg_hdw_queue; 3249 3250 for (i = 0; i < hwq_count; i++) { 3251 qp = &phba->sli4_hba.hdwq[i]; 3252 multixri_pool = qp->p_multixri_pool; 3253 if (!multixri_pool) 3254 continue; 3255 3256 qp->p_multixri_pool = NULL; 3257 3258 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3259 3260 /* Deal with public free xri pool */ 3261 pbl_pool = &multixri_pool->pbl_pool; 3262 spin_lock(&pbl_pool->lock); 3263 3264 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3265 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3266 pbl_pool->count, i); 3267 3268 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3269 &pbl_pool->list, list) { 3270 list_move_tail(&lpfc_ncmd->list, 3271 &qp->lpfc_io_buf_list_put); 3272 qp->put_io_bufs++; 3273 pbl_pool->count--; 3274 } 3275 3276 INIT_LIST_HEAD(&pbl_pool->list); 3277 pbl_pool->count = 0; 3278 3279 spin_unlock(&pbl_pool->lock); 3280 3281 /* Deal with private free xri pool */ 3282 pvt_pool = &multixri_pool->pvt_pool; 3283 spin_lock(&pvt_pool->lock); 3284 3285 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3286 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3287 pvt_pool->count, i); 3288 3289 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3290 &pvt_pool->list, list) { 3291 list_move_tail(&lpfc_ncmd->list, 3292 &qp->lpfc_io_buf_list_put); 3293 qp->put_io_bufs++; 3294 pvt_pool->count--; 3295 } 3296 3297 INIT_LIST_HEAD(&pvt_pool->list); 3298 pvt_pool->count = 0; 3299 3300 spin_unlock(&pvt_pool->lock); 3301 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3302 3303 kfree(multixri_pool); 3304 } 3305 } 3306 3307 /** 3308 * lpfc_online - Initialize and bring a HBA online 3309 * @phba: pointer to lpfc hba data structure. 3310 * 3311 * This routine initializes the HBA and brings a HBA online. During this 3312 * process, the management interface is blocked to prevent user space access 3313 * to the HBA interfering with the driver initialization. 3314 * 3315 * Return codes 3316 * 0 - successful 3317 * 1 - failed 3318 **/ 3319 int 3320 lpfc_online(struct lpfc_hba *phba) 3321 { 3322 struct lpfc_vport *vport; 3323 struct lpfc_vport **vports; 3324 int i, error = 0; 3325 bool vpis_cleared = false; 3326 3327 if (!phba) 3328 return 0; 3329 vport = phba->pport; 3330 3331 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3332 return 0; 3333 3334 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3335 "0458 Bring Adapter online\n"); 3336 3337 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3338 3339 if (phba->sli_rev == LPFC_SLI_REV4) { 3340 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3341 lpfc_unblock_mgmt_io(phba); 3342 return 1; 3343 } 3344 spin_lock_irq(&phba->hbalock); 3345 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3346 vpis_cleared = true; 3347 spin_unlock_irq(&phba->hbalock); 3348 3349 /* Reestablish the local initiator port. 3350 * The offline process destroyed the previous lport. 3351 */ 3352 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3353 !phba->nvmet_support) { 3354 error = lpfc_nvme_create_localport(phba->pport); 3355 if (error) 3356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3357 "6132 NVME restore reg failed " 3358 "on nvmei error x%x\n", error); 3359 } 3360 } else { 3361 lpfc_sli_queue_init(phba); 3362 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3363 lpfc_unblock_mgmt_io(phba); 3364 return 1; 3365 } 3366 } 3367 3368 vports = lpfc_create_vport_work_array(phba); 3369 if (vports != NULL) { 3370 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3371 struct Scsi_Host *shost; 3372 shost = lpfc_shost_from_vport(vports[i]); 3373 spin_lock_irq(shost->host_lock); 3374 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3375 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3376 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3377 if (phba->sli_rev == LPFC_SLI_REV4) { 3378 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3379 if ((vpis_cleared) && 3380 (vports[i]->port_type != 3381 LPFC_PHYSICAL_PORT)) 3382 vports[i]->vpi = 0; 3383 } 3384 spin_unlock_irq(shost->host_lock); 3385 } 3386 } 3387 lpfc_destroy_vport_work_array(phba, vports); 3388 3389 if (phba->cfg_xri_rebalancing) 3390 lpfc_create_multixri_pools(phba); 3391 3392 lpfc_unblock_mgmt_io(phba); 3393 return 0; 3394 } 3395 3396 /** 3397 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3398 * @phba: pointer to lpfc hba data structure. 3399 * 3400 * This routine marks a HBA's management interface as not blocked. Once the 3401 * HBA's management interface is marked as not blocked, all the user space 3402 * access to the HBA, whether they are from sysfs interface or libdfc 3403 * interface will be allowed. The HBA is set to block the management interface 3404 * when the driver prepares the HBA interface for online or offline and then 3405 * set to unblock the management interface afterwards. 3406 **/ 3407 void 3408 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3409 { 3410 unsigned long iflag; 3411 3412 spin_lock_irqsave(&phba->hbalock, iflag); 3413 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3414 spin_unlock_irqrestore(&phba->hbalock, iflag); 3415 } 3416 3417 /** 3418 * lpfc_offline_prep - Prepare a HBA to be brought offline 3419 * @phba: pointer to lpfc hba data structure. 3420 * 3421 * This routine is invoked to prepare a HBA to be brought offline. It performs 3422 * unregistration login to all the nodes on all vports and flushes the mailbox 3423 * queue to make it ready to be brought offline. 3424 **/ 3425 void 3426 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3427 { 3428 struct lpfc_vport *vport = phba->pport; 3429 struct lpfc_nodelist *ndlp, *next_ndlp; 3430 struct lpfc_vport **vports; 3431 struct Scsi_Host *shost; 3432 int i; 3433 3434 if (vport->fc_flag & FC_OFFLINE_MODE) 3435 return; 3436 3437 lpfc_block_mgmt_io(phba, mbx_action); 3438 3439 lpfc_linkdown(phba); 3440 3441 /* Issue an unreg_login to all nodes on all vports */ 3442 vports = lpfc_create_vport_work_array(phba); 3443 if (vports != NULL) { 3444 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3445 if (vports[i]->load_flag & FC_UNLOADING) 3446 continue; 3447 shost = lpfc_shost_from_vport(vports[i]); 3448 spin_lock_irq(shost->host_lock); 3449 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3450 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3451 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3452 spin_unlock_irq(shost->host_lock); 3453 3454 shost = lpfc_shost_from_vport(vports[i]); 3455 list_for_each_entry_safe(ndlp, next_ndlp, 3456 &vports[i]->fc_nodes, 3457 nlp_listp) { 3458 if (!NLP_CHK_NODE_ACT(ndlp)) 3459 continue; 3460 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3461 continue; 3462 if (ndlp->nlp_type & NLP_FABRIC) { 3463 lpfc_disc_state_machine(vports[i], ndlp, 3464 NULL, NLP_EVT_DEVICE_RECOVERY); 3465 lpfc_disc_state_machine(vports[i], ndlp, 3466 NULL, NLP_EVT_DEVICE_RM); 3467 } 3468 spin_lock_irq(shost->host_lock); 3469 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3470 spin_unlock_irq(shost->host_lock); 3471 /* 3472 * Whenever an SLI4 port goes offline, free the 3473 * RPI. Get a new RPI when the adapter port 3474 * comes back online. 3475 */ 3476 if (phba->sli_rev == LPFC_SLI_REV4) { 3477 lpfc_printf_vlog(ndlp->vport, 3478 KERN_INFO, LOG_NODE, 3479 "0011 lpfc_offline: " 3480 "ndlp:x%p did %x " 3481 "usgmap:x%x rpi:%x\n", 3482 ndlp, ndlp->nlp_DID, 3483 ndlp->nlp_usg_map, 3484 ndlp->nlp_rpi); 3485 3486 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3487 } 3488 lpfc_unreg_rpi(vports[i], ndlp); 3489 } 3490 } 3491 } 3492 lpfc_destroy_vport_work_array(phba, vports); 3493 3494 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3495 3496 if (phba->wq) 3497 flush_workqueue(phba->wq); 3498 } 3499 3500 /** 3501 * lpfc_offline - Bring a HBA offline 3502 * @phba: pointer to lpfc hba data structure. 3503 * 3504 * This routine actually brings a HBA offline. It stops all the timers 3505 * associated with the HBA, brings down the SLI layer, and eventually 3506 * marks the HBA as in offline state for the upper layer protocol. 3507 **/ 3508 void 3509 lpfc_offline(struct lpfc_hba *phba) 3510 { 3511 struct Scsi_Host *shost; 3512 struct lpfc_vport **vports; 3513 int i; 3514 3515 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3516 return; 3517 3518 /* stop port and all timers associated with this hba */ 3519 lpfc_stop_port(phba); 3520 3521 /* Tear down the local and target port registrations. The 3522 * nvme transports need to cleanup. 3523 */ 3524 lpfc_nvmet_destroy_targetport(phba); 3525 lpfc_nvme_destroy_localport(phba->pport); 3526 3527 vports = lpfc_create_vport_work_array(phba); 3528 if (vports != NULL) 3529 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3530 lpfc_stop_vport_timers(vports[i]); 3531 lpfc_destroy_vport_work_array(phba, vports); 3532 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3533 "0460 Bring Adapter offline\n"); 3534 /* Bring down the SLI Layer and cleanup. The HBA is offline 3535 now. */ 3536 lpfc_sli_hba_down(phba); 3537 spin_lock_irq(&phba->hbalock); 3538 phba->work_ha = 0; 3539 spin_unlock_irq(&phba->hbalock); 3540 vports = lpfc_create_vport_work_array(phba); 3541 if (vports != NULL) 3542 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3543 shost = lpfc_shost_from_vport(vports[i]); 3544 spin_lock_irq(shost->host_lock); 3545 vports[i]->work_port_events = 0; 3546 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3547 spin_unlock_irq(shost->host_lock); 3548 } 3549 lpfc_destroy_vport_work_array(phba, vports); 3550 3551 if (phba->cfg_xri_rebalancing) 3552 lpfc_destroy_multixri_pools(phba); 3553 } 3554 3555 /** 3556 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3557 * @phba: pointer to lpfc hba data structure. 3558 * 3559 * This routine is to free all the SCSI buffers and IOCBs from the driver 3560 * list back to kernel. It is called from lpfc_pci_remove_one to free 3561 * the internal resources before the device is removed from the system. 3562 **/ 3563 static void 3564 lpfc_scsi_free(struct lpfc_hba *phba) 3565 { 3566 struct lpfc_io_buf *sb, *sb_next; 3567 3568 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3569 return; 3570 3571 spin_lock_irq(&phba->hbalock); 3572 3573 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3574 3575 spin_lock(&phba->scsi_buf_list_put_lock); 3576 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3577 list) { 3578 list_del(&sb->list); 3579 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3580 sb->dma_handle); 3581 kfree(sb); 3582 phba->total_scsi_bufs--; 3583 } 3584 spin_unlock(&phba->scsi_buf_list_put_lock); 3585 3586 spin_lock(&phba->scsi_buf_list_get_lock); 3587 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3588 list) { 3589 list_del(&sb->list); 3590 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3591 sb->dma_handle); 3592 kfree(sb); 3593 phba->total_scsi_bufs--; 3594 } 3595 spin_unlock(&phba->scsi_buf_list_get_lock); 3596 spin_unlock_irq(&phba->hbalock); 3597 } 3598 3599 /** 3600 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3601 * @phba: pointer to lpfc hba data structure. 3602 * 3603 * This routine is to free all the IO buffers and IOCBs from the driver 3604 * list back to kernel. It is called from lpfc_pci_remove_one to free 3605 * the internal resources before the device is removed from the system. 3606 **/ 3607 void 3608 lpfc_io_free(struct lpfc_hba *phba) 3609 { 3610 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3611 struct lpfc_sli4_hdw_queue *qp; 3612 int idx; 3613 3614 spin_lock_irq(&phba->hbalock); 3615 3616 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3617 qp = &phba->sli4_hba.hdwq[idx]; 3618 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3619 spin_lock(&qp->io_buf_list_put_lock); 3620 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3621 &qp->lpfc_io_buf_list_put, 3622 list) { 3623 list_del(&lpfc_ncmd->list); 3624 qp->put_io_bufs--; 3625 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3626 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3627 kfree(lpfc_ncmd); 3628 qp->total_io_bufs--; 3629 } 3630 spin_unlock(&qp->io_buf_list_put_lock); 3631 3632 spin_lock(&qp->io_buf_list_get_lock); 3633 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3634 &qp->lpfc_io_buf_list_get, 3635 list) { 3636 list_del(&lpfc_ncmd->list); 3637 qp->get_io_bufs--; 3638 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3639 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3640 kfree(lpfc_ncmd); 3641 qp->total_io_bufs--; 3642 } 3643 spin_unlock(&qp->io_buf_list_get_lock); 3644 } 3645 3646 spin_unlock_irq(&phba->hbalock); 3647 } 3648 3649 /** 3650 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3651 * @phba: pointer to lpfc hba data structure. 3652 * 3653 * This routine first calculates the sizes of the current els and allocated 3654 * scsi sgl lists, and then goes through all sgls to updates the physical 3655 * XRIs assigned due to port function reset. During port initialization, the 3656 * current els and allocated scsi sgl lists are 0s. 3657 * 3658 * Return codes 3659 * 0 - successful (for now, it always returns 0) 3660 **/ 3661 int 3662 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3663 { 3664 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3665 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3666 LIST_HEAD(els_sgl_list); 3667 int rc; 3668 3669 /* 3670 * update on pci function's els xri-sgl list 3671 */ 3672 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3673 3674 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3675 /* els xri-sgl expanded */ 3676 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3677 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3678 "3157 ELS xri-sgl count increased from " 3679 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3680 els_xri_cnt); 3681 /* allocate the additional els sgls */ 3682 for (i = 0; i < xri_cnt; i++) { 3683 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3684 GFP_KERNEL); 3685 if (sglq_entry == NULL) { 3686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3687 "2562 Failure to allocate an " 3688 "ELS sgl entry:%d\n", i); 3689 rc = -ENOMEM; 3690 goto out_free_mem; 3691 } 3692 sglq_entry->buff_type = GEN_BUFF_TYPE; 3693 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3694 &sglq_entry->phys); 3695 if (sglq_entry->virt == NULL) { 3696 kfree(sglq_entry); 3697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3698 "2563 Failure to allocate an " 3699 "ELS mbuf:%d\n", i); 3700 rc = -ENOMEM; 3701 goto out_free_mem; 3702 } 3703 sglq_entry->sgl = sglq_entry->virt; 3704 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3705 sglq_entry->state = SGL_FREED; 3706 list_add_tail(&sglq_entry->list, &els_sgl_list); 3707 } 3708 spin_lock_irq(&phba->hbalock); 3709 spin_lock(&phba->sli4_hba.sgl_list_lock); 3710 list_splice_init(&els_sgl_list, 3711 &phba->sli4_hba.lpfc_els_sgl_list); 3712 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3713 spin_unlock_irq(&phba->hbalock); 3714 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3715 /* els xri-sgl shrinked */ 3716 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3717 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3718 "3158 ELS xri-sgl count decreased from " 3719 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3720 els_xri_cnt); 3721 spin_lock_irq(&phba->hbalock); 3722 spin_lock(&phba->sli4_hba.sgl_list_lock); 3723 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3724 &els_sgl_list); 3725 /* release extra els sgls from list */ 3726 for (i = 0; i < xri_cnt; i++) { 3727 list_remove_head(&els_sgl_list, 3728 sglq_entry, struct lpfc_sglq, list); 3729 if (sglq_entry) { 3730 __lpfc_mbuf_free(phba, sglq_entry->virt, 3731 sglq_entry->phys); 3732 kfree(sglq_entry); 3733 } 3734 } 3735 list_splice_init(&els_sgl_list, 3736 &phba->sli4_hba.lpfc_els_sgl_list); 3737 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3738 spin_unlock_irq(&phba->hbalock); 3739 } else 3740 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3741 "3163 ELS xri-sgl count unchanged: %d\n", 3742 els_xri_cnt); 3743 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3744 3745 /* update xris to els sgls on the list */ 3746 sglq_entry = NULL; 3747 sglq_entry_next = NULL; 3748 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3749 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3750 lxri = lpfc_sli4_next_xritag(phba); 3751 if (lxri == NO_XRI) { 3752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3753 "2400 Failed to allocate xri for " 3754 "ELS sgl\n"); 3755 rc = -ENOMEM; 3756 goto out_free_mem; 3757 } 3758 sglq_entry->sli4_lxritag = lxri; 3759 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3760 } 3761 return 0; 3762 3763 out_free_mem: 3764 lpfc_free_els_sgl_list(phba); 3765 return rc; 3766 } 3767 3768 /** 3769 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3770 * @phba: pointer to lpfc hba data structure. 3771 * 3772 * This routine first calculates the sizes of the current els and allocated 3773 * scsi sgl lists, and then goes through all sgls to updates the physical 3774 * XRIs assigned due to port function reset. During port initialization, the 3775 * current els and allocated scsi sgl lists are 0s. 3776 * 3777 * Return codes 3778 * 0 - successful (for now, it always returns 0) 3779 **/ 3780 int 3781 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3782 { 3783 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3784 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3785 uint16_t nvmet_xri_cnt; 3786 LIST_HEAD(nvmet_sgl_list); 3787 int rc; 3788 3789 /* 3790 * update on pci function's nvmet xri-sgl list 3791 */ 3792 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3793 3794 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3795 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3796 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3797 /* els xri-sgl expanded */ 3798 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3799 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3800 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3801 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3802 /* allocate the additional nvmet sgls */ 3803 for (i = 0; i < xri_cnt; i++) { 3804 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3805 GFP_KERNEL); 3806 if (sglq_entry == NULL) { 3807 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3808 "6303 Failure to allocate an " 3809 "NVMET sgl entry:%d\n", i); 3810 rc = -ENOMEM; 3811 goto out_free_mem; 3812 } 3813 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3814 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3815 &sglq_entry->phys); 3816 if (sglq_entry->virt == NULL) { 3817 kfree(sglq_entry); 3818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3819 "6304 Failure to allocate an " 3820 "NVMET buf:%d\n", i); 3821 rc = -ENOMEM; 3822 goto out_free_mem; 3823 } 3824 sglq_entry->sgl = sglq_entry->virt; 3825 memset(sglq_entry->sgl, 0, 3826 phba->cfg_sg_dma_buf_size); 3827 sglq_entry->state = SGL_FREED; 3828 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3829 } 3830 spin_lock_irq(&phba->hbalock); 3831 spin_lock(&phba->sli4_hba.sgl_list_lock); 3832 list_splice_init(&nvmet_sgl_list, 3833 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3834 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3835 spin_unlock_irq(&phba->hbalock); 3836 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3837 /* nvmet xri-sgl shrunk */ 3838 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3839 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3840 "6305 NVMET xri-sgl count decreased from " 3841 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3842 nvmet_xri_cnt); 3843 spin_lock_irq(&phba->hbalock); 3844 spin_lock(&phba->sli4_hba.sgl_list_lock); 3845 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3846 &nvmet_sgl_list); 3847 /* release extra nvmet sgls from list */ 3848 for (i = 0; i < xri_cnt; i++) { 3849 list_remove_head(&nvmet_sgl_list, 3850 sglq_entry, struct lpfc_sglq, list); 3851 if (sglq_entry) { 3852 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3853 sglq_entry->phys); 3854 kfree(sglq_entry); 3855 } 3856 } 3857 list_splice_init(&nvmet_sgl_list, 3858 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3859 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3860 spin_unlock_irq(&phba->hbalock); 3861 } else 3862 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3863 "6306 NVMET xri-sgl count unchanged: %d\n", 3864 nvmet_xri_cnt); 3865 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3866 3867 /* update xris to nvmet sgls on the list */ 3868 sglq_entry = NULL; 3869 sglq_entry_next = NULL; 3870 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3871 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3872 lxri = lpfc_sli4_next_xritag(phba); 3873 if (lxri == NO_XRI) { 3874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3875 "6307 Failed to allocate xri for " 3876 "NVMET sgl\n"); 3877 rc = -ENOMEM; 3878 goto out_free_mem; 3879 } 3880 sglq_entry->sli4_lxritag = lxri; 3881 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3882 } 3883 return 0; 3884 3885 out_free_mem: 3886 lpfc_free_nvmet_sgl_list(phba); 3887 return rc; 3888 } 3889 3890 int 3891 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3892 { 3893 LIST_HEAD(blist); 3894 struct lpfc_sli4_hdw_queue *qp; 3895 struct lpfc_io_buf *lpfc_cmd; 3896 struct lpfc_io_buf *iobufp, *prev_iobufp; 3897 int idx, cnt, xri, inserted; 3898 3899 cnt = 0; 3900 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3901 qp = &phba->sli4_hba.hdwq[idx]; 3902 spin_lock_irq(&qp->io_buf_list_get_lock); 3903 spin_lock(&qp->io_buf_list_put_lock); 3904 3905 /* Take everything off the get and put lists */ 3906 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3907 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3908 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3909 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3910 cnt += qp->get_io_bufs + qp->put_io_bufs; 3911 qp->get_io_bufs = 0; 3912 qp->put_io_bufs = 0; 3913 qp->total_io_bufs = 0; 3914 spin_unlock(&qp->io_buf_list_put_lock); 3915 spin_unlock_irq(&qp->io_buf_list_get_lock); 3916 } 3917 3918 /* 3919 * Take IO buffers off blist and put on cbuf sorted by XRI. 3920 * This is because POST_SGL takes a sequential range of XRIs 3921 * to post to the firmware. 3922 */ 3923 for (idx = 0; idx < cnt; idx++) { 3924 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3925 if (!lpfc_cmd) 3926 return cnt; 3927 if (idx == 0) { 3928 list_add_tail(&lpfc_cmd->list, cbuf); 3929 continue; 3930 } 3931 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3932 inserted = 0; 3933 prev_iobufp = NULL; 3934 list_for_each_entry(iobufp, cbuf, list) { 3935 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3936 if (prev_iobufp) 3937 list_add(&lpfc_cmd->list, 3938 &prev_iobufp->list); 3939 else 3940 list_add(&lpfc_cmd->list, cbuf); 3941 inserted = 1; 3942 break; 3943 } 3944 prev_iobufp = iobufp; 3945 } 3946 if (!inserted) 3947 list_add_tail(&lpfc_cmd->list, cbuf); 3948 } 3949 return cnt; 3950 } 3951 3952 int 3953 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3954 { 3955 struct lpfc_sli4_hdw_queue *qp; 3956 struct lpfc_io_buf *lpfc_cmd; 3957 int idx, cnt; 3958 3959 qp = phba->sli4_hba.hdwq; 3960 cnt = 0; 3961 while (!list_empty(cbuf)) { 3962 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3963 list_remove_head(cbuf, lpfc_cmd, 3964 struct lpfc_io_buf, list); 3965 if (!lpfc_cmd) 3966 return cnt; 3967 cnt++; 3968 qp = &phba->sli4_hba.hdwq[idx]; 3969 lpfc_cmd->hdwq_no = idx; 3970 lpfc_cmd->hdwq = qp; 3971 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3972 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3973 spin_lock(&qp->io_buf_list_put_lock); 3974 list_add_tail(&lpfc_cmd->list, 3975 &qp->lpfc_io_buf_list_put); 3976 qp->put_io_bufs++; 3977 qp->total_io_bufs++; 3978 spin_unlock(&qp->io_buf_list_put_lock); 3979 } 3980 } 3981 return cnt; 3982 } 3983 3984 /** 3985 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3986 * @phba: pointer to lpfc hba data structure. 3987 * 3988 * This routine first calculates the sizes of the current els and allocated 3989 * scsi sgl lists, and then goes through all sgls to updates the physical 3990 * XRIs assigned due to port function reset. During port initialization, the 3991 * current els and allocated scsi sgl lists are 0s. 3992 * 3993 * Return codes 3994 * 0 - successful (for now, it always returns 0) 3995 **/ 3996 int 3997 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 3998 { 3999 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4000 uint16_t i, lxri, els_xri_cnt; 4001 uint16_t io_xri_cnt, io_xri_max; 4002 LIST_HEAD(io_sgl_list); 4003 int rc, cnt; 4004 4005 /* 4006 * update on pci function's allocated nvme xri-sgl list 4007 */ 4008 4009 /* maximum number of xris available for nvme buffers */ 4010 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4011 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4012 phba->sli4_hba.io_xri_max = io_xri_max; 4013 4014 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4015 "6074 Current allocated XRI sgl count:%d, " 4016 "maximum XRI count:%d\n", 4017 phba->sli4_hba.io_xri_cnt, 4018 phba->sli4_hba.io_xri_max); 4019 4020 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4021 4022 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4023 /* max nvme xri shrunk below the allocated nvme buffers */ 4024 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4025 phba->sli4_hba.io_xri_max; 4026 /* release the extra allocated nvme buffers */ 4027 for (i = 0; i < io_xri_cnt; i++) { 4028 list_remove_head(&io_sgl_list, lpfc_ncmd, 4029 struct lpfc_io_buf, list); 4030 if (lpfc_ncmd) { 4031 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4032 lpfc_ncmd->data, 4033 lpfc_ncmd->dma_handle); 4034 kfree(lpfc_ncmd); 4035 } 4036 } 4037 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4038 } 4039 4040 /* update xris associated to remaining allocated nvme buffers */ 4041 lpfc_ncmd = NULL; 4042 lpfc_ncmd_next = NULL; 4043 phba->sli4_hba.io_xri_cnt = cnt; 4044 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4045 &io_sgl_list, list) { 4046 lxri = lpfc_sli4_next_xritag(phba); 4047 if (lxri == NO_XRI) { 4048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4049 "6075 Failed to allocate xri for " 4050 "nvme buffer\n"); 4051 rc = -ENOMEM; 4052 goto out_free_mem; 4053 } 4054 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4055 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4056 } 4057 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4058 return 0; 4059 4060 out_free_mem: 4061 lpfc_io_free(phba); 4062 return rc; 4063 } 4064 4065 /** 4066 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4067 * @vport: The virtual port for which this call being executed. 4068 * @num_to_allocate: The requested number of buffers to allocate. 4069 * 4070 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4071 * the nvme buffer contains all the necessary information needed to initiate 4072 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4073 * them on a list, it post them to the port by using SGL block post. 4074 * 4075 * Return codes: 4076 * int - number of IO buffers that were allocated and posted. 4077 * 0 = failure, less than num_to_alloc is a partial failure. 4078 **/ 4079 int 4080 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4081 { 4082 struct lpfc_io_buf *lpfc_ncmd; 4083 struct lpfc_iocbq *pwqeq; 4084 uint16_t iotag, lxri = 0; 4085 int bcnt, num_posted; 4086 LIST_HEAD(prep_nblist); 4087 LIST_HEAD(post_nblist); 4088 LIST_HEAD(nvme_nblist); 4089 4090 /* Sanity check to ensure our sizing is right for both SCSI and NVME */ 4091 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { 4092 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4093 "6426 Common buffer size %zd exceeds %d\n", 4094 sizeof(struct lpfc_io_buf), 4095 LPFC_COMMON_IO_BUF_SZ); 4096 return 0; 4097 } 4098 4099 phba->sli4_hba.io_xri_cnt = 0; 4100 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4101 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); 4102 if (!lpfc_ncmd) 4103 break; 4104 /* 4105 * Get memory from the pci pool to map the virt space to 4106 * pci bus space for an I/O. The DMA buffer includes the 4107 * number of SGE's necessary to support the sg_tablesize. 4108 */ 4109 lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 4110 GFP_KERNEL, 4111 &lpfc_ncmd->dma_handle); 4112 if (!lpfc_ncmd->data) { 4113 kfree(lpfc_ncmd); 4114 break; 4115 } 4116 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); 4117 4118 /* 4119 * 4K Page alignment is CRITICAL to BlockGuard, double check 4120 * to be sure. 4121 */ 4122 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4123 (((unsigned long)(lpfc_ncmd->data) & 4124 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4125 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4126 "3369 Memory alignment err: addr=%lx\n", 4127 (unsigned long)lpfc_ncmd->data); 4128 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4129 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4130 kfree(lpfc_ncmd); 4131 break; 4132 } 4133 4134 lxri = lpfc_sli4_next_xritag(phba); 4135 if (lxri == NO_XRI) { 4136 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4137 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4138 kfree(lpfc_ncmd); 4139 break; 4140 } 4141 pwqeq = &lpfc_ncmd->cur_iocbq; 4142 4143 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4144 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4145 if (iotag == 0) { 4146 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4147 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4148 kfree(lpfc_ncmd); 4149 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4150 "6121 Failed to allocate IOTAG for" 4151 " XRI:0x%x\n", lxri); 4152 lpfc_sli4_free_xri(phba, lxri); 4153 break; 4154 } 4155 pwqeq->sli4_lxritag = lxri; 4156 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4157 pwqeq->context1 = lpfc_ncmd; 4158 4159 /* Initialize local short-hand pointers. */ 4160 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4161 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4162 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4163 spin_lock_init(&lpfc_ncmd->buf_lock); 4164 4165 /* add the nvme buffer to a post list */ 4166 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4167 phba->sli4_hba.io_xri_cnt++; 4168 } 4169 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4170 "6114 Allocate %d out of %d requested new NVME " 4171 "buffers\n", bcnt, num_to_alloc); 4172 4173 /* post the list of nvme buffer sgls to port if available */ 4174 if (!list_empty(&post_nblist)) 4175 num_posted = lpfc_sli4_post_io_sgl_list( 4176 phba, &post_nblist, bcnt); 4177 else 4178 num_posted = 0; 4179 4180 return num_posted; 4181 } 4182 4183 static uint64_t 4184 lpfc_get_wwpn(struct lpfc_hba *phba) 4185 { 4186 uint64_t wwn; 4187 int rc; 4188 LPFC_MBOXQ_t *mboxq; 4189 MAILBOX_t *mb; 4190 4191 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4192 GFP_KERNEL); 4193 if (!mboxq) 4194 return (uint64_t)-1; 4195 4196 /* First get WWN of HBA instance */ 4197 lpfc_read_nv(phba, mboxq); 4198 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4199 if (rc != MBX_SUCCESS) { 4200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4201 "6019 Mailbox failed , mbxCmd x%x " 4202 "READ_NV, mbxStatus x%x\n", 4203 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4204 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4205 mempool_free(mboxq, phba->mbox_mem_pool); 4206 return (uint64_t) -1; 4207 } 4208 mb = &mboxq->u.mb; 4209 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4210 /* wwn is WWPN of HBA instance */ 4211 mempool_free(mboxq, phba->mbox_mem_pool); 4212 if (phba->sli_rev == LPFC_SLI_REV4) 4213 return be64_to_cpu(wwn); 4214 else 4215 return rol64(wwn, 32); 4216 } 4217 4218 /** 4219 * lpfc_create_port - Create an FC port 4220 * @phba: pointer to lpfc hba data structure. 4221 * @instance: a unique integer ID to this FC port. 4222 * @dev: pointer to the device data structure. 4223 * 4224 * This routine creates a FC port for the upper layer protocol. The FC port 4225 * can be created on top of either a physical port or a virtual port provided 4226 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4227 * and associates the FC port created before adding the shost into the SCSI 4228 * layer. 4229 * 4230 * Return codes 4231 * @vport - pointer to the virtual N_Port data structure. 4232 * NULL - port create failed. 4233 **/ 4234 struct lpfc_vport * 4235 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4236 { 4237 struct lpfc_vport *vport; 4238 struct Scsi_Host *shost = NULL; 4239 int error = 0; 4240 int i; 4241 uint64_t wwn; 4242 bool use_no_reset_hba = false; 4243 int rc; 4244 4245 if (lpfc_no_hba_reset_cnt) { 4246 if (phba->sli_rev < LPFC_SLI_REV4 && 4247 dev == &phba->pcidev->dev) { 4248 /* Reset the port first */ 4249 lpfc_sli_brdrestart(phba); 4250 rc = lpfc_sli_chipset_init(phba); 4251 if (rc) 4252 return NULL; 4253 } 4254 wwn = lpfc_get_wwpn(phba); 4255 } 4256 4257 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4258 if (wwn == lpfc_no_hba_reset[i]) { 4259 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4260 "6020 Setting use_no_reset port=%llx\n", 4261 wwn); 4262 use_no_reset_hba = true; 4263 break; 4264 } 4265 } 4266 4267 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4268 if (dev != &phba->pcidev->dev) { 4269 shost = scsi_host_alloc(&lpfc_vport_template, 4270 sizeof(struct lpfc_vport)); 4271 } else { 4272 if (!use_no_reset_hba) 4273 shost = scsi_host_alloc(&lpfc_template, 4274 sizeof(struct lpfc_vport)); 4275 else 4276 shost = scsi_host_alloc(&lpfc_template_no_hr, 4277 sizeof(struct lpfc_vport)); 4278 } 4279 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4280 shost = scsi_host_alloc(&lpfc_template_nvme, 4281 sizeof(struct lpfc_vport)); 4282 } 4283 if (!shost) 4284 goto out; 4285 4286 vport = (struct lpfc_vport *) shost->hostdata; 4287 vport->phba = phba; 4288 vport->load_flag |= FC_LOADING; 4289 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4290 vport->fc_rscn_flush = 0; 4291 lpfc_get_vport_cfgparam(vport); 4292 4293 /* Adjust value in vport */ 4294 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4295 4296 shost->unique_id = instance; 4297 shost->max_id = LPFC_MAX_TARGET; 4298 shost->max_lun = vport->cfg_max_luns; 4299 shost->this_id = -1; 4300 shost->max_cmd_len = 16; 4301 4302 if (phba->sli_rev == LPFC_SLI_REV4) { 4303 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4304 shost->nr_hw_queues = phba->cfg_hdw_queue; 4305 else 4306 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4307 4308 shost->dma_boundary = 4309 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4310 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4311 } else 4312 /* SLI-3 has a limited number of hardware queues (3), 4313 * thus there is only one for FCP processing. 4314 */ 4315 shost->nr_hw_queues = 1; 4316 4317 /* 4318 * Set initial can_queue value since 0 is no longer supported and 4319 * scsi_add_host will fail. This will be adjusted later based on the 4320 * max xri value determined in hba setup. 4321 */ 4322 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4323 if (dev != &phba->pcidev->dev) { 4324 shost->transportt = lpfc_vport_transport_template; 4325 vport->port_type = LPFC_NPIV_PORT; 4326 } else { 4327 shost->transportt = lpfc_transport_template; 4328 vport->port_type = LPFC_PHYSICAL_PORT; 4329 } 4330 4331 /* Initialize all internally managed lists. */ 4332 INIT_LIST_HEAD(&vport->fc_nodes); 4333 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4334 spin_lock_init(&vport->work_port_lock); 4335 4336 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4337 4338 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4339 4340 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4341 4342 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4343 if (error) 4344 goto out_put_shost; 4345 4346 spin_lock_irq(&phba->port_list_lock); 4347 list_add_tail(&vport->listentry, &phba->port_list); 4348 spin_unlock_irq(&phba->port_list_lock); 4349 return vport; 4350 4351 out_put_shost: 4352 scsi_host_put(shost); 4353 out: 4354 return NULL; 4355 } 4356 4357 /** 4358 * destroy_port - destroy an FC port 4359 * @vport: pointer to an lpfc virtual N_Port data structure. 4360 * 4361 * This routine destroys a FC port from the upper layer protocol. All the 4362 * resources associated with the port are released. 4363 **/ 4364 void 4365 destroy_port(struct lpfc_vport *vport) 4366 { 4367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4368 struct lpfc_hba *phba = vport->phba; 4369 4370 lpfc_debugfs_terminate(vport); 4371 fc_remove_host(shost); 4372 scsi_remove_host(shost); 4373 4374 spin_lock_irq(&phba->port_list_lock); 4375 list_del_init(&vport->listentry); 4376 spin_unlock_irq(&phba->port_list_lock); 4377 4378 lpfc_cleanup(vport); 4379 return; 4380 } 4381 4382 /** 4383 * lpfc_get_instance - Get a unique integer ID 4384 * 4385 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4386 * uses the kernel idr facility to perform the task. 4387 * 4388 * Return codes: 4389 * instance - a unique integer ID allocated as the new instance. 4390 * -1 - lpfc get instance failed. 4391 **/ 4392 int 4393 lpfc_get_instance(void) 4394 { 4395 int ret; 4396 4397 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4398 return ret < 0 ? -1 : ret; 4399 } 4400 4401 /** 4402 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4403 * @shost: pointer to SCSI host data structure. 4404 * @time: elapsed time of the scan in jiffies. 4405 * 4406 * This routine is called by the SCSI layer with a SCSI host to determine 4407 * whether the scan host is finished. 4408 * 4409 * Note: there is no scan_start function as adapter initialization will have 4410 * asynchronously kicked off the link initialization. 4411 * 4412 * Return codes 4413 * 0 - SCSI host scan is not over yet. 4414 * 1 - SCSI host scan is over. 4415 **/ 4416 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4417 { 4418 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4419 struct lpfc_hba *phba = vport->phba; 4420 int stat = 0; 4421 4422 spin_lock_irq(shost->host_lock); 4423 4424 if (vport->load_flag & FC_UNLOADING) { 4425 stat = 1; 4426 goto finished; 4427 } 4428 if (time >= msecs_to_jiffies(30 * 1000)) { 4429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4430 "0461 Scanning longer than 30 " 4431 "seconds. Continuing initialization\n"); 4432 stat = 1; 4433 goto finished; 4434 } 4435 if (time >= msecs_to_jiffies(15 * 1000) && 4436 phba->link_state <= LPFC_LINK_DOWN) { 4437 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4438 "0465 Link down longer than 15 " 4439 "seconds. Continuing initialization\n"); 4440 stat = 1; 4441 goto finished; 4442 } 4443 4444 if (vport->port_state != LPFC_VPORT_READY) 4445 goto finished; 4446 if (vport->num_disc_nodes || vport->fc_prli_sent) 4447 goto finished; 4448 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4449 goto finished; 4450 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4451 goto finished; 4452 4453 stat = 1; 4454 4455 finished: 4456 spin_unlock_irq(shost->host_lock); 4457 return stat; 4458 } 4459 4460 void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4461 { 4462 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4463 struct lpfc_hba *phba = vport->phba; 4464 4465 fc_host_supported_speeds(shost) = 0; 4466 if (phba->lmt & LMT_128Gb) 4467 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4468 if (phba->lmt & LMT_64Gb) 4469 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4470 if (phba->lmt & LMT_32Gb) 4471 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4472 if (phba->lmt & LMT_16Gb) 4473 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4474 if (phba->lmt & LMT_10Gb) 4475 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4476 if (phba->lmt & LMT_8Gb) 4477 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4478 if (phba->lmt & LMT_4Gb) 4479 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4480 if (phba->lmt & LMT_2Gb) 4481 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4482 if (phba->lmt & LMT_1Gb) 4483 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4484 } 4485 4486 /** 4487 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4488 * @shost: pointer to SCSI host data structure. 4489 * 4490 * This routine initializes a given SCSI host attributes on a FC port. The 4491 * SCSI host can be either on top of a physical port or a virtual port. 4492 **/ 4493 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4494 { 4495 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4496 struct lpfc_hba *phba = vport->phba; 4497 /* 4498 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4499 */ 4500 4501 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4502 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4503 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4504 4505 memset(fc_host_supported_fc4s(shost), 0, 4506 sizeof(fc_host_supported_fc4s(shost))); 4507 fc_host_supported_fc4s(shost)[2] = 1; 4508 fc_host_supported_fc4s(shost)[7] = 1; 4509 4510 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4511 sizeof fc_host_symbolic_name(shost)); 4512 4513 lpfc_host_supported_speeds_set(shost); 4514 4515 fc_host_maxframe_size(shost) = 4516 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4517 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4518 4519 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4520 4521 /* This value is also unchanging */ 4522 memset(fc_host_active_fc4s(shost), 0, 4523 sizeof(fc_host_active_fc4s(shost))); 4524 fc_host_active_fc4s(shost)[2] = 1; 4525 fc_host_active_fc4s(shost)[7] = 1; 4526 4527 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4528 spin_lock_irq(shost->host_lock); 4529 vport->load_flag &= ~FC_LOADING; 4530 spin_unlock_irq(shost->host_lock); 4531 } 4532 4533 /** 4534 * lpfc_stop_port_s3 - Stop SLI3 device port 4535 * @phba: pointer to lpfc hba data structure. 4536 * 4537 * This routine is invoked to stop an SLI3 device port, it stops the device 4538 * from generating interrupts and stops the device driver's timers for the 4539 * device. 4540 **/ 4541 static void 4542 lpfc_stop_port_s3(struct lpfc_hba *phba) 4543 { 4544 /* Clear all interrupt enable conditions */ 4545 writel(0, phba->HCregaddr); 4546 readl(phba->HCregaddr); /* flush */ 4547 /* Clear all pending interrupts */ 4548 writel(0xffffffff, phba->HAregaddr); 4549 readl(phba->HAregaddr); /* flush */ 4550 4551 /* Reset some HBA SLI setup states */ 4552 lpfc_stop_hba_timers(phba); 4553 phba->pport->work_port_events = 0; 4554 } 4555 4556 /** 4557 * lpfc_stop_port_s4 - Stop SLI4 device port 4558 * @phba: pointer to lpfc hba data structure. 4559 * 4560 * This routine is invoked to stop an SLI4 device port, it stops the device 4561 * from generating interrupts and stops the device driver's timers for the 4562 * device. 4563 **/ 4564 static void 4565 lpfc_stop_port_s4(struct lpfc_hba *phba) 4566 { 4567 /* Reset some HBA SLI4 setup states */ 4568 lpfc_stop_hba_timers(phba); 4569 if (phba->pport) 4570 phba->pport->work_port_events = 0; 4571 phba->sli4_hba.intr_enable = 0; 4572 } 4573 4574 /** 4575 * lpfc_stop_port - Wrapper function for stopping hba port 4576 * @phba: Pointer to HBA context object. 4577 * 4578 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4579 * the API jump table function pointer from the lpfc_hba struct. 4580 **/ 4581 void 4582 lpfc_stop_port(struct lpfc_hba *phba) 4583 { 4584 phba->lpfc_stop_port(phba); 4585 4586 if (phba->wq) 4587 flush_workqueue(phba->wq); 4588 } 4589 4590 /** 4591 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4592 * @phba: Pointer to hba for which this call is being executed. 4593 * 4594 * This routine starts the timer waiting for the FCF rediscovery to complete. 4595 **/ 4596 void 4597 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4598 { 4599 unsigned long fcf_redisc_wait_tmo = 4600 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4601 /* Start fcf rediscovery wait period timer */ 4602 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4603 spin_lock_irq(&phba->hbalock); 4604 /* Allow action to new fcf asynchronous event */ 4605 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4606 /* Mark the FCF rediscovery pending state */ 4607 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4608 spin_unlock_irq(&phba->hbalock); 4609 } 4610 4611 /** 4612 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4613 * @ptr: Map to lpfc_hba data structure pointer. 4614 * 4615 * This routine is invoked when waiting for FCF table rediscover has been 4616 * timed out. If new FCF record(s) has (have) been discovered during the 4617 * wait period, a new FCF event shall be added to the FCOE async event 4618 * list, and then worker thread shall be waked up for processing from the 4619 * worker thread context. 4620 **/ 4621 static void 4622 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4623 { 4624 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4625 4626 /* Don't send FCF rediscovery event if timer cancelled */ 4627 spin_lock_irq(&phba->hbalock); 4628 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4629 spin_unlock_irq(&phba->hbalock); 4630 return; 4631 } 4632 /* Clear FCF rediscovery timer pending flag */ 4633 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4634 /* FCF rediscovery event to worker thread */ 4635 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4636 spin_unlock_irq(&phba->hbalock); 4637 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4638 "2776 FCF rediscover quiescent timer expired\n"); 4639 /* wake up worker thread */ 4640 lpfc_worker_wake_up(phba); 4641 } 4642 4643 /** 4644 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4645 * @phba: pointer to lpfc hba data structure. 4646 * @acqe_link: pointer to the async link completion queue entry. 4647 * 4648 * This routine is to parse the SLI4 link-attention link fault code. 4649 **/ 4650 static void 4651 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4652 struct lpfc_acqe_link *acqe_link) 4653 { 4654 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4655 case LPFC_ASYNC_LINK_FAULT_NONE: 4656 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4657 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4658 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4659 break; 4660 default: 4661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4662 "0398 Unknown link fault code: x%x\n", 4663 bf_get(lpfc_acqe_link_fault, acqe_link)); 4664 break; 4665 } 4666 } 4667 4668 /** 4669 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4670 * @phba: pointer to lpfc hba data structure. 4671 * @acqe_link: pointer to the async link completion queue entry. 4672 * 4673 * This routine is to parse the SLI4 link attention type and translate it 4674 * into the base driver's link attention type coding. 4675 * 4676 * Return: Link attention type in terms of base driver's coding. 4677 **/ 4678 static uint8_t 4679 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4680 struct lpfc_acqe_link *acqe_link) 4681 { 4682 uint8_t att_type; 4683 4684 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4685 case LPFC_ASYNC_LINK_STATUS_DOWN: 4686 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4687 att_type = LPFC_ATT_LINK_DOWN; 4688 break; 4689 case LPFC_ASYNC_LINK_STATUS_UP: 4690 /* Ignore physical link up events - wait for logical link up */ 4691 att_type = LPFC_ATT_RESERVED; 4692 break; 4693 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4694 att_type = LPFC_ATT_LINK_UP; 4695 break; 4696 default: 4697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4698 "0399 Invalid link attention type: x%x\n", 4699 bf_get(lpfc_acqe_link_status, acqe_link)); 4700 att_type = LPFC_ATT_RESERVED; 4701 break; 4702 } 4703 return att_type; 4704 } 4705 4706 /** 4707 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4708 * @phba: pointer to lpfc hba data structure. 4709 * 4710 * This routine is to get an SLI3 FC port's link speed in Mbps. 4711 * 4712 * Return: link speed in terms of Mbps. 4713 **/ 4714 uint32_t 4715 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4716 { 4717 uint32_t link_speed; 4718 4719 if (!lpfc_is_link_up(phba)) 4720 return 0; 4721 4722 if (phba->sli_rev <= LPFC_SLI_REV3) { 4723 switch (phba->fc_linkspeed) { 4724 case LPFC_LINK_SPEED_1GHZ: 4725 link_speed = 1000; 4726 break; 4727 case LPFC_LINK_SPEED_2GHZ: 4728 link_speed = 2000; 4729 break; 4730 case LPFC_LINK_SPEED_4GHZ: 4731 link_speed = 4000; 4732 break; 4733 case LPFC_LINK_SPEED_8GHZ: 4734 link_speed = 8000; 4735 break; 4736 case LPFC_LINK_SPEED_10GHZ: 4737 link_speed = 10000; 4738 break; 4739 case LPFC_LINK_SPEED_16GHZ: 4740 link_speed = 16000; 4741 break; 4742 default: 4743 link_speed = 0; 4744 } 4745 } else { 4746 if (phba->sli4_hba.link_state.logical_speed) 4747 link_speed = 4748 phba->sli4_hba.link_state.logical_speed; 4749 else 4750 link_speed = phba->sli4_hba.link_state.speed; 4751 } 4752 return link_speed; 4753 } 4754 4755 /** 4756 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4757 * @phba: pointer to lpfc hba data structure. 4758 * @evt_code: asynchronous event code. 4759 * @speed_code: asynchronous event link speed code. 4760 * 4761 * This routine is to parse the giving SLI4 async event link speed code into 4762 * value of Mbps for the link speed. 4763 * 4764 * Return: link speed in terms of Mbps. 4765 **/ 4766 static uint32_t 4767 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4768 uint8_t speed_code) 4769 { 4770 uint32_t port_speed; 4771 4772 switch (evt_code) { 4773 case LPFC_TRAILER_CODE_LINK: 4774 switch (speed_code) { 4775 case LPFC_ASYNC_LINK_SPEED_ZERO: 4776 port_speed = 0; 4777 break; 4778 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4779 port_speed = 10; 4780 break; 4781 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4782 port_speed = 100; 4783 break; 4784 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4785 port_speed = 1000; 4786 break; 4787 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4788 port_speed = 10000; 4789 break; 4790 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4791 port_speed = 20000; 4792 break; 4793 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4794 port_speed = 25000; 4795 break; 4796 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4797 port_speed = 40000; 4798 break; 4799 default: 4800 port_speed = 0; 4801 } 4802 break; 4803 case LPFC_TRAILER_CODE_FC: 4804 switch (speed_code) { 4805 case LPFC_FC_LA_SPEED_UNKNOWN: 4806 port_speed = 0; 4807 break; 4808 case LPFC_FC_LA_SPEED_1G: 4809 port_speed = 1000; 4810 break; 4811 case LPFC_FC_LA_SPEED_2G: 4812 port_speed = 2000; 4813 break; 4814 case LPFC_FC_LA_SPEED_4G: 4815 port_speed = 4000; 4816 break; 4817 case LPFC_FC_LA_SPEED_8G: 4818 port_speed = 8000; 4819 break; 4820 case LPFC_FC_LA_SPEED_10G: 4821 port_speed = 10000; 4822 break; 4823 case LPFC_FC_LA_SPEED_16G: 4824 port_speed = 16000; 4825 break; 4826 case LPFC_FC_LA_SPEED_32G: 4827 port_speed = 32000; 4828 break; 4829 case LPFC_FC_LA_SPEED_64G: 4830 port_speed = 64000; 4831 break; 4832 case LPFC_FC_LA_SPEED_128G: 4833 port_speed = 128000; 4834 break; 4835 default: 4836 port_speed = 0; 4837 } 4838 break; 4839 default: 4840 port_speed = 0; 4841 } 4842 return port_speed; 4843 } 4844 4845 /** 4846 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4847 * @phba: pointer to lpfc hba data structure. 4848 * @acqe_link: pointer to the async link completion queue entry. 4849 * 4850 * This routine is to handle the SLI4 asynchronous FCoE link event. 4851 **/ 4852 static void 4853 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4854 struct lpfc_acqe_link *acqe_link) 4855 { 4856 struct lpfc_dmabuf *mp; 4857 LPFC_MBOXQ_t *pmb; 4858 MAILBOX_t *mb; 4859 struct lpfc_mbx_read_top *la; 4860 uint8_t att_type; 4861 int rc; 4862 4863 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4864 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4865 return; 4866 phba->fcoe_eventtag = acqe_link->event_tag; 4867 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4868 if (!pmb) { 4869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4870 "0395 The mboxq allocation failed\n"); 4871 return; 4872 } 4873 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4874 if (!mp) { 4875 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4876 "0396 The lpfc_dmabuf allocation failed\n"); 4877 goto out_free_pmb; 4878 } 4879 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4880 if (!mp->virt) { 4881 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4882 "0397 The mbuf allocation failed\n"); 4883 goto out_free_dmabuf; 4884 } 4885 4886 /* Cleanup any outstanding ELS commands */ 4887 lpfc_els_flush_all_cmd(phba); 4888 4889 /* Block ELS IOCBs until we have done process link event */ 4890 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4891 4892 /* Update link event statistics */ 4893 phba->sli.slistat.link_event++; 4894 4895 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4896 lpfc_read_topology(phba, pmb, mp); 4897 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4898 pmb->vport = phba->pport; 4899 4900 /* Keep the link status for extra SLI4 state machine reference */ 4901 phba->sli4_hba.link_state.speed = 4902 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4903 bf_get(lpfc_acqe_link_speed, acqe_link)); 4904 phba->sli4_hba.link_state.duplex = 4905 bf_get(lpfc_acqe_link_duplex, acqe_link); 4906 phba->sli4_hba.link_state.status = 4907 bf_get(lpfc_acqe_link_status, acqe_link); 4908 phba->sli4_hba.link_state.type = 4909 bf_get(lpfc_acqe_link_type, acqe_link); 4910 phba->sli4_hba.link_state.number = 4911 bf_get(lpfc_acqe_link_number, acqe_link); 4912 phba->sli4_hba.link_state.fault = 4913 bf_get(lpfc_acqe_link_fault, acqe_link); 4914 phba->sli4_hba.link_state.logical_speed = 4915 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4916 4917 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4918 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4919 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4920 "Logical speed:%dMbps Fault:%d\n", 4921 phba->sli4_hba.link_state.speed, 4922 phba->sli4_hba.link_state.topology, 4923 phba->sli4_hba.link_state.status, 4924 phba->sli4_hba.link_state.type, 4925 phba->sli4_hba.link_state.number, 4926 phba->sli4_hba.link_state.logical_speed, 4927 phba->sli4_hba.link_state.fault); 4928 /* 4929 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4930 * topology info. Note: Optional for non FC-AL ports. 4931 */ 4932 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4933 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4934 if (rc == MBX_NOT_FINISHED) 4935 goto out_free_dmabuf; 4936 return; 4937 } 4938 /* 4939 * For FCoE Mode: fill in all the topology information we need and call 4940 * the READ_TOPOLOGY completion routine to continue without actually 4941 * sending the READ_TOPOLOGY mailbox command to the port. 4942 */ 4943 /* Initialize completion status */ 4944 mb = &pmb->u.mb; 4945 mb->mbxStatus = MBX_SUCCESS; 4946 4947 /* Parse port fault information field */ 4948 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4949 4950 /* Parse and translate link attention fields */ 4951 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4952 la->eventTag = acqe_link->event_tag; 4953 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4954 bf_set(lpfc_mbx_read_top_link_spd, la, 4955 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4956 4957 /* Fake the the following irrelvant fields */ 4958 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4959 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4960 bf_set(lpfc_mbx_read_top_il, la, 0); 4961 bf_set(lpfc_mbx_read_top_pb, la, 0); 4962 bf_set(lpfc_mbx_read_top_fa, la, 0); 4963 bf_set(lpfc_mbx_read_top_mm, la, 0); 4964 4965 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4966 lpfc_mbx_cmpl_read_topology(phba, pmb); 4967 4968 return; 4969 4970 out_free_dmabuf: 4971 kfree(mp); 4972 out_free_pmb: 4973 mempool_free(pmb, phba->mbox_mem_pool); 4974 } 4975 4976 /** 4977 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 4978 * topology. 4979 * @phba: pointer to lpfc hba data structure. 4980 * @evt_code: asynchronous event code. 4981 * @speed_code: asynchronous event link speed code. 4982 * 4983 * This routine is to parse the giving SLI4 async event link speed code into 4984 * value of Read topology link speed. 4985 * 4986 * Return: link speed in terms of Read topology. 4987 **/ 4988 static uint8_t 4989 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 4990 { 4991 uint8_t port_speed; 4992 4993 switch (speed_code) { 4994 case LPFC_FC_LA_SPEED_1G: 4995 port_speed = LPFC_LINK_SPEED_1GHZ; 4996 break; 4997 case LPFC_FC_LA_SPEED_2G: 4998 port_speed = LPFC_LINK_SPEED_2GHZ; 4999 break; 5000 case LPFC_FC_LA_SPEED_4G: 5001 port_speed = LPFC_LINK_SPEED_4GHZ; 5002 break; 5003 case LPFC_FC_LA_SPEED_8G: 5004 port_speed = LPFC_LINK_SPEED_8GHZ; 5005 break; 5006 case LPFC_FC_LA_SPEED_16G: 5007 port_speed = LPFC_LINK_SPEED_16GHZ; 5008 break; 5009 case LPFC_FC_LA_SPEED_32G: 5010 port_speed = LPFC_LINK_SPEED_32GHZ; 5011 break; 5012 case LPFC_FC_LA_SPEED_64G: 5013 port_speed = LPFC_LINK_SPEED_64GHZ; 5014 break; 5015 case LPFC_FC_LA_SPEED_128G: 5016 port_speed = LPFC_LINK_SPEED_128GHZ; 5017 break; 5018 case LPFC_FC_LA_SPEED_256G: 5019 port_speed = LPFC_LINK_SPEED_256GHZ; 5020 break; 5021 default: 5022 port_speed = 0; 5023 break; 5024 } 5025 5026 return port_speed; 5027 } 5028 5029 #define trunk_link_status(__idx)\ 5030 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5031 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5032 "Link up" : "Link down") : "NA" 5033 /* Did port __idx reported an error */ 5034 #define trunk_port_fault(__idx)\ 5035 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5036 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5037 5038 static void 5039 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5040 struct lpfc_acqe_fc_la *acqe_fc) 5041 { 5042 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5043 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5044 5045 phba->sli4_hba.link_state.speed = 5046 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5047 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5048 5049 phba->sli4_hba.link_state.logical_speed = 5050 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 5051 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5052 phba->fc_linkspeed = 5053 lpfc_async_link_speed_to_read_top( 5054 phba, 5055 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5056 5057 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5058 phba->trunk_link.link0.state = 5059 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5060 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5061 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5062 } 5063 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5064 phba->trunk_link.link1.state = 5065 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5066 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5067 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5068 } 5069 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5070 phba->trunk_link.link2.state = 5071 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5072 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5073 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5074 } 5075 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5076 phba->trunk_link.link3.state = 5077 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5078 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5079 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5080 } 5081 5082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5083 "2910 Async FC Trunking Event - Speed:%d\n" 5084 "\tLogical speed:%d " 5085 "port0: %s port1: %s port2: %s port3: %s\n", 5086 phba->sli4_hba.link_state.speed, 5087 phba->sli4_hba.link_state.logical_speed, 5088 trunk_link_status(0), trunk_link_status(1), 5089 trunk_link_status(2), trunk_link_status(3)); 5090 5091 if (port_fault) 5092 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5093 "3202 trunk error:0x%x (%s) seen on port0:%s " 5094 /* 5095 * SLI-4: We have only 0xA error codes 5096 * defined as of now. print an appropriate 5097 * message in case driver needs to be updated. 5098 */ 5099 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5100 "UNDEFINED. update driver." : trunk_errmsg[err], 5101 trunk_port_fault(0), trunk_port_fault(1), 5102 trunk_port_fault(2), trunk_port_fault(3)); 5103 } 5104 5105 5106 /** 5107 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5108 * @phba: pointer to lpfc hba data structure. 5109 * @acqe_fc: pointer to the async fc completion queue entry. 5110 * 5111 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5112 * that the event was received and then issue a read_topology mailbox command so 5113 * that the rest of the driver will treat it the same as SLI3. 5114 **/ 5115 static void 5116 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5117 { 5118 struct lpfc_dmabuf *mp; 5119 LPFC_MBOXQ_t *pmb; 5120 MAILBOX_t *mb; 5121 struct lpfc_mbx_read_top *la; 5122 int rc; 5123 5124 if (bf_get(lpfc_trailer_type, acqe_fc) != 5125 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5127 "2895 Non FC link Event detected.(%d)\n", 5128 bf_get(lpfc_trailer_type, acqe_fc)); 5129 return; 5130 } 5131 5132 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5133 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5134 lpfc_update_trunk_link_status(phba, acqe_fc); 5135 return; 5136 } 5137 5138 /* Keep the link status for extra SLI4 state machine reference */ 5139 phba->sli4_hba.link_state.speed = 5140 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5141 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5142 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5143 phba->sli4_hba.link_state.topology = 5144 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5145 phba->sli4_hba.link_state.status = 5146 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5147 phba->sli4_hba.link_state.type = 5148 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5149 phba->sli4_hba.link_state.number = 5150 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5151 phba->sli4_hba.link_state.fault = 5152 bf_get(lpfc_acqe_link_fault, acqe_fc); 5153 phba->sli4_hba.link_state.logical_speed = 5154 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5155 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5156 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5157 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5158 "%dMbps Fault:%d\n", 5159 phba->sli4_hba.link_state.speed, 5160 phba->sli4_hba.link_state.topology, 5161 phba->sli4_hba.link_state.status, 5162 phba->sli4_hba.link_state.type, 5163 phba->sli4_hba.link_state.number, 5164 phba->sli4_hba.link_state.logical_speed, 5165 phba->sli4_hba.link_state.fault); 5166 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5167 if (!pmb) { 5168 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5169 "2897 The mboxq allocation failed\n"); 5170 return; 5171 } 5172 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5173 if (!mp) { 5174 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5175 "2898 The lpfc_dmabuf allocation failed\n"); 5176 goto out_free_pmb; 5177 } 5178 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5179 if (!mp->virt) { 5180 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5181 "2899 The mbuf allocation failed\n"); 5182 goto out_free_dmabuf; 5183 } 5184 5185 /* Cleanup any outstanding ELS commands */ 5186 lpfc_els_flush_all_cmd(phba); 5187 5188 /* Block ELS IOCBs until we have done process link event */ 5189 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5190 5191 /* Update link event statistics */ 5192 phba->sli.slistat.link_event++; 5193 5194 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5195 lpfc_read_topology(phba, pmb, mp); 5196 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5197 pmb->vport = phba->pport; 5198 5199 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5200 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5201 5202 switch (phba->sli4_hba.link_state.status) { 5203 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5204 phba->link_flag |= LS_MDS_LINK_DOWN; 5205 break; 5206 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5207 phba->link_flag |= LS_MDS_LOOPBACK; 5208 break; 5209 default: 5210 break; 5211 } 5212 5213 /* Initialize completion status */ 5214 mb = &pmb->u.mb; 5215 mb->mbxStatus = MBX_SUCCESS; 5216 5217 /* Parse port fault information field */ 5218 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5219 5220 /* Parse and translate link attention fields */ 5221 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5222 la->eventTag = acqe_fc->event_tag; 5223 5224 if (phba->sli4_hba.link_state.status == 5225 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5226 bf_set(lpfc_mbx_read_top_att_type, la, 5227 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5228 } else { 5229 bf_set(lpfc_mbx_read_top_att_type, la, 5230 LPFC_FC_LA_TYPE_LINK_DOWN); 5231 } 5232 /* Invoke the mailbox command callback function */ 5233 lpfc_mbx_cmpl_read_topology(phba, pmb); 5234 5235 return; 5236 } 5237 5238 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5239 if (rc == MBX_NOT_FINISHED) 5240 goto out_free_dmabuf; 5241 return; 5242 5243 out_free_dmabuf: 5244 kfree(mp); 5245 out_free_pmb: 5246 mempool_free(pmb, phba->mbox_mem_pool); 5247 } 5248 5249 /** 5250 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5251 * @phba: pointer to lpfc hba data structure. 5252 * @acqe_fc: pointer to the async SLI completion queue entry. 5253 * 5254 * This routine is to handle the SLI4 asynchronous SLI events. 5255 **/ 5256 static void 5257 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5258 { 5259 char port_name; 5260 char message[128]; 5261 uint8_t status; 5262 uint8_t evt_type; 5263 uint8_t operational = 0; 5264 struct temp_event temp_event_data; 5265 struct lpfc_acqe_misconfigured_event *misconfigured; 5266 struct Scsi_Host *shost; 5267 struct lpfc_vport **vports; 5268 int rc, i; 5269 5270 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5271 5272 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5273 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 5274 "x%08x SLI Event Type:%d\n", 5275 acqe_sli->event_data1, acqe_sli->event_data2, 5276 evt_type); 5277 5278 port_name = phba->Port[0]; 5279 if (port_name == 0x00) 5280 port_name = '?'; /* get port name is empty */ 5281 5282 switch (evt_type) { 5283 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5284 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5285 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5286 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5287 5288 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5289 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5290 acqe_sli->event_data1, port_name); 5291 5292 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5293 shost = lpfc_shost_from_vport(phba->pport); 5294 fc_host_post_vendor_event(shost, fc_get_event_number(), 5295 sizeof(temp_event_data), 5296 (char *)&temp_event_data, 5297 SCSI_NL_VID_TYPE_PCI 5298 | PCI_VENDOR_ID_EMULEX); 5299 break; 5300 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5301 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5302 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5303 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5304 5305 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5306 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5307 acqe_sli->event_data1, port_name); 5308 5309 shost = lpfc_shost_from_vport(phba->pport); 5310 fc_host_post_vendor_event(shost, fc_get_event_number(), 5311 sizeof(temp_event_data), 5312 (char *)&temp_event_data, 5313 SCSI_NL_VID_TYPE_PCI 5314 | PCI_VENDOR_ID_EMULEX); 5315 break; 5316 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5317 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5318 &acqe_sli->event_data1; 5319 5320 /* fetch the status for this port */ 5321 switch (phba->sli4_hba.lnk_info.lnk_no) { 5322 case LPFC_LINK_NUMBER_0: 5323 status = bf_get(lpfc_sli_misconfigured_port0_state, 5324 &misconfigured->theEvent); 5325 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5326 &misconfigured->theEvent); 5327 break; 5328 case LPFC_LINK_NUMBER_1: 5329 status = bf_get(lpfc_sli_misconfigured_port1_state, 5330 &misconfigured->theEvent); 5331 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5332 &misconfigured->theEvent); 5333 break; 5334 case LPFC_LINK_NUMBER_2: 5335 status = bf_get(lpfc_sli_misconfigured_port2_state, 5336 &misconfigured->theEvent); 5337 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5338 &misconfigured->theEvent); 5339 break; 5340 case LPFC_LINK_NUMBER_3: 5341 status = bf_get(lpfc_sli_misconfigured_port3_state, 5342 &misconfigured->theEvent); 5343 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5344 &misconfigured->theEvent); 5345 break; 5346 default: 5347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5348 "3296 " 5349 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5350 "event: Invalid link %d", 5351 phba->sli4_hba.lnk_info.lnk_no); 5352 return; 5353 } 5354 5355 /* Skip if optic state unchanged */ 5356 if (phba->sli4_hba.lnk_info.optic_state == status) 5357 return; 5358 5359 switch (status) { 5360 case LPFC_SLI_EVENT_STATUS_VALID: 5361 sprintf(message, "Physical Link is functional"); 5362 break; 5363 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5364 sprintf(message, "Optics faulted/incorrectly " 5365 "installed/not installed - Reseat optics, " 5366 "if issue not resolved, replace."); 5367 break; 5368 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5369 sprintf(message, 5370 "Optics of two types installed - Remove one " 5371 "optic or install matching pair of optics."); 5372 break; 5373 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5374 sprintf(message, "Incompatible optics - Replace with " 5375 "compatible optics for card to function."); 5376 break; 5377 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5378 sprintf(message, "Unqualified optics - Replace with " 5379 "Avago optics for Warranty and Technical " 5380 "Support - Link is%s operational", 5381 (operational) ? " not" : ""); 5382 break; 5383 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5384 sprintf(message, "Uncertified optics - Replace with " 5385 "Avago-certified optics to enable link " 5386 "operation - Link is%s operational", 5387 (operational) ? " not" : ""); 5388 break; 5389 default: 5390 /* firmware is reporting a status we don't know about */ 5391 sprintf(message, "Unknown event status x%02x", status); 5392 break; 5393 } 5394 5395 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5396 rc = lpfc_sli4_read_config(phba); 5397 if (rc) { 5398 phba->lmt = 0; 5399 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5400 "3194 Unable to retrieve supported " 5401 "speeds, rc = 0x%x\n", rc); 5402 } 5403 vports = lpfc_create_vport_work_array(phba); 5404 if (vports != NULL) { 5405 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5406 i++) { 5407 shost = lpfc_shost_from_vport(vports[i]); 5408 lpfc_host_supported_speeds_set(shost); 5409 } 5410 } 5411 lpfc_destroy_vport_work_array(phba, vports); 5412 5413 phba->sli4_hba.lnk_info.optic_state = status; 5414 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5415 "3176 Port Name %c %s\n", port_name, message); 5416 break; 5417 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5419 "3192 Remote DPort Test Initiated - " 5420 "Event Data1:x%08x Event Data2: x%08x\n", 5421 acqe_sli->event_data1, acqe_sli->event_data2); 5422 break; 5423 default: 5424 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5425 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 5426 "x%08x SLI Event Type:%d\n", 5427 acqe_sli->event_data1, acqe_sli->event_data2, 5428 evt_type); 5429 break; 5430 } 5431 } 5432 5433 /** 5434 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5435 * @vport: pointer to vport data structure. 5436 * 5437 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5438 * response to a CVL event. 5439 * 5440 * Return the pointer to the ndlp with the vport if successful, otherwise 5441 * return NULL. 5442 **/ 5443 static struct lpfc_nodelist * 5444 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5445 { 5446 struct lpfc_nodelist *ndlp; 5447 struct Scsi_Host *shost; 5448 struct lpfc_hba *phba; 5449 5450 if (!vport) 5451 return NULL; 5452 phba = vport->phba; 5453 if (!phba) 5454 return NULL; 5455 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5456 if (!ndlp) { 5457 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5458 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5459 if (!ndlp) 5460 return 0; 5461 /* Set the node type */ 5462 ndlp->nlp_type |= NLP_FABRIC; 5463 /* Put ndlp onto node list */ 5464 lpfc_enqueue_node(vport, ndlp); 5465 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5466 /* re-setup ndlp without removing from node list */ 5467 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5468 if (!ndlp) 5469 return 0; 5470 } 5471 if ((phba->pport->port_state < LPFC_FLOGI) && 5472 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5473 return NULL; 5474 /* If virtual link is not yet instantiated ignore CVL */ 5475 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5476 && (vport->port_state != LPFC_VPORT_FAILED)) 5477 return NULL; 5478 shost = lpfc_shost_from_vport(vport); 5479 if (!shost) 5480 return NULL; 5481 lpfc_linkdown_port(vport); 5482 lpfc_cleanup_pending_mbox(vport); 5483 spin_lock_irq(shost->host_lock); 5484 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5485 spin_unlock_irq(shost->host_lock); 5486 5487 return ndlp; 5488 } 5489 5490 /** 5491 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5492 * @vport: pointer to lpfc hba data structure. 5493 * 5494 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5495 * response to a FCF dead event. 5496 **/ 5497 static void 5498 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5499 { 5500 struct lpfc_vport **vports; 5501 int i; 5502 5503 vports = lpfc_create_vport_work_array(phba); 5504 if (vports) 5505 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5506 lpfc_sli4_perform_vport_cvl(vports[i]); 5507 lpfc_destroy_vport_work_array(phba, vports); 5508 } 5509 5510 /** 5511 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5512 * @phba: pointer to lpfc hba data structure. 5513 * @acqe_link: pointer to the async fcoe completion queue entry. 5514 * 5515 * This routine is to handle the SLI4 asynchronous fcoe event. 5516 **/ 5517 static void 5518 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5519 struct lpfc_acqe_fip *acqe_fip) 5520 { 5521 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5522 int rc; 5523 struct lpfc_vport *vport; 5524 struct lpfc_nodelist *ndlp; 5525 struct Scsi_Host *shost; 5526 int active_vlink_present; 5527 struct lpfc_vport **vports; 5528 int i; 5529 5530 phba->fc_eventTag = acqe_fip->event_tag; 5531 phba->fcoe_eventtag = acqe_fip->event_tag; 5532 switch (event_type) { 5533 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5534 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5535 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5536 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5537 LOG_DISCOVERY, 5538 "2546 New FCF event, evt_tag:x%x, " 5539 "index:x%x\n", 5540 acqe_fip->event_tag, 5541 acqe_fip->index); 5542 else 5543 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5544 LOG_DISCOVERY, 5545 "2788 FCF param modified event, " 5546 "evt_tag:x%x, index:x%x\n", 5547 acqe_fip->event_tag, 5548 acqe_fip->index); 5549 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5550 /* 5551 * During period of FCF discovery, read the FCF 5552 * table record indexed by the event to update 5553 * FCF roundrobin failover eligible FCF bmask. 5554 */ 5555 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5556 LOG_DISCOVERY, 5557 "2779 Read FCF (x%x) for updating " 5558 "roundrobin FCF failover bmask\n", 5559 acqe_fip->index); 5560 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5561 } 5562 5563 /* If the FCF discovery is in progress, do nothing. */ 5564 spin_lock_irq(&phba->hbalock); 5565 if (phba->hba_flag & FCF_TS_INPROG) { 5566 spin_unlock_irq(&phba->hbalock); 5567 break; 5568 } 5569 /* If fast FCF failover rescan event is pending, do nothing */ 5570 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5571 spin_unlock_irq(&phba->hbalock); 5572 break; 5573 } 5574 5575 /* If the FCF has been in discovered state, do nothing. */ 5576 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5577 spin_unlock_irq(&phba->hbalock); 5578 break; 5579 } 5580 spin_unlock_irq(&phba->hbalock); 5581 5582 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5583 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5584 "2770 Start FCF table scan per async FCF " 5585 "event, evt_tag:x%x, index:x%x\n", 5586 acqe_fip->event_tag, acqe_fip->index); 5587 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5588 LPFC_FCOE_FCF_GET_FIRST); 5589 if (rc) 5590 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5591 "2547 Issue FCF scan read FCF mailbox " 5592 "command failed (x%x)\n", rc); 5593 break; 5594 5595 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5596 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5597 "2548 FCF Table full count 0x%x tag 0x%x\n", 5598 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5599 acqe_fip->event_tag); 5600 break; 5601 5602 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5603 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5604 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5605 "2549 FCF (x%x) disconnected from network, " 5606 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5607 /* 5608 * If we are in the middle of FCF failover process, clear 5609 * the corresponding FCF bit in the roundrobin bitmap. 5610 */ 5611 spin_lock_irq(&phba->hbalock); 5612 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5613 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5614 spin_unlock_irq(&phba->hbalock); 5615 /* Update FLOGI FCF failover eligible FCF bmask */ 5616 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5617 break; 5618 } 5619 spin_unlock_irq(&phba->hbalock); 5620 5621 /* If the event is not for currently used fcf do nothing */ 5622 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5623 break; 5624 5625 /* 5626 * Otherwise, request the port to rediscover the entire FCF 5627 * table for a fast recovery from case that the current FCF 5628 * is no longer valid as we are not in the middle of FCF 5629 * failover process already. 5630 */ 5631 spin_lock_irq(&phba->hbalock); 5632 /* Mark the fast failover process in progress */ 5633 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5634 spin_unlock_irq(&phba->hbalock); 5635 5636 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5637 "2771 Start FCF fast failover process due to " 5638 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5639 "\n", acqe_fip->event_tag, acqe_fip->index); 5640 rc = lpfc_sli4_redisc_fcf_table(phba); 5641 if (rc) { 5642 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5643 LOG_DISCOVERY, 5644 "2772 Issue FCF rediscover mailbox " 5645 "command failed, fail through to FCF " 5646 "dead event\n"); 5647 spin_lock_irq(&phba->hbalock); 5648 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5649 spin_unlock_irq(&phba->hbalock); 5650 /* 5651 * Last resort will fail over by treating this 5652 * as a link down to FCF registration. 5653 */ 5654 lpfc_sli4_fcf_dead_failthrough(phba); 5655 } else { 5656 /* Reset FCF roundrobin bmask for new discovery */ 5657 lpfc_sli4_clear_fcf_rr_bmask(phba); 5658 /* 5659 * Handling fast FCF failover to a DEAD FCF event is 5660 * considered equalivant to receiving CVL to all vports. 5661 */ 5662 lpfc_sli4_perform_all_vport_cvl(phba); 5663 } 5664 break; 5665 case LPFC_FIP_EVENT_TYPE_CVL: 5666 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5667 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5668 "2718 Clear Virtual Link Received for VPI 0x%x" 5669 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5670 5671 vport = lpfc_find_vport_by_vpid(phba, 5672 acqe_fip->index); 5673 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5674 if (!ndlp) 5675 break; 5676 active_vlink_present = 0; 5677 5678 vports = lpfc_create_vport_work_array(phba); 5679 if (vports) { 5680 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5681 i++) { 5682 if ((!(vports[i]->fc_flag & 5683 FC_VPORT_CVL_RCVD)) && 5684 (vports[i]->port_state > LPFC_FDISC)) { 5685 active_vlink_present = 1; 5686 break; 5687 } 5688 } 5689 lpfc_destroy_vport_work_array(phba, vports); 5690 } 5691 5692 /* 5693 * Don't re-instantiate if vport is marked for deletion. 5694 * If we are here first then vport_delete is going to wait 5695 * for discovery to complete. 5696 */ 5697 if (!(vport->load_flag & FC_UNLOADING) && 5698 active_vlink_present) { 5699 /* 5700 * If there are other active VLinks present, 5701 * re-instantiate the Vlink using FDISC. 5702 */ 5703 mod_timer(&ndlp->nlp_delayfunc, 5704 jiffies + msecs_to_jiffies(1000)); 5705 shost = lpfc_shost_from_vport(vport); 5706 spin_lock_irq(shost->host_lock); 5707 ndlp->nlp_flag |= NLP_DELAY_TMO; 5708 spin_unlock_irq(shost->host_lock); 5709 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5710 vport->port_state = LPFC_FDISC; 5711 } else { 5712 /* 5713 * Otherwise, we request port to rediscover 5714 * the entire FCF table for a fast recovery 5715 * from possible case that the current FCF 5716 * is no longer valid if we are not already 5717 * in the FCF failover process. 5718 */ 5719 spin_lock_irq(&phba->hbalock); 5720 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5721 spin_unlock_irq(&phba->hbalock); 5722 break; 5723 } 5724 /* Mark the fast failover process in progress */ 5725 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5726 spin_unlock_irq(&phba->hbalock); 5727 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5728 LOG_DISCOVERY, 5729 "2773 Start FCF failover per CVL, " 5730 "evt_tag:x%x\n", acqe_fip->event_tag); 5731 rc = lpfc_sli4_redisc_fcf_table(phba); 5732 if (rc) { 5733 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5734 LOG_DISCOVERY, 5735 "2774 Issue FCF rediscover " 5736 "mailbox command failed, " 5737 "through to CVL event\n"); 5738 spin_lock_irq(&phba->hbalock); 5739 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5740 spin_unlock_irq(&phba->hbalock); 5741 /* 5742 * Last resort will be re-try on the 5743 * the current registered FCF entry. 5744 */ 5745 lpfc_retry_pport_discovery(phba); 5746 } else 5747 /* 5748 * Reset FCF roundrobin bmask for new 5749 * discovery. 5750 */ 5751 lpfc_sli4_clear_fcf_rr_bmask(phba); 5752 } 5753 break; 5754 default: 5755 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5756 "0288 Unknown FCoE event type 0x%x event tag " 5757 "0x%x\n", event_type, acqe_fip->event_tag); 5758 break; 5759 } 5760 } 5761 5762 /** 5763 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5764 * @phba: pointer to lpfc hba data structure. 5765 * @acqe_link: pointer to the async dcbx completion queue entry. 5766 * 5767 * This routine is to handle the SLI4 asynchronous dcbx event. 5768 **/ 5769 static void 5770 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5771 struct lpfc_acqe_dcbx *acqe_dcbx) 5772 { 5773 phba->fc_eventTag = acqe_dcbx->event_tag; 5774 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5775 "0290 The SLI4 DCBX asynchronous event is not " 5776 "handled yet\n"); 5777 } 5778 5779 /** 5780 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5781 * @phba: pointer to lpfc hba data structure. 5782 * @acqe_link: pointer to the async grp5 completion queue entry. 5783 * 5784 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5785 * is an asynchronous notified of a logical link speed change. The Port 5786 * reports the logical link speed in units of 10Mbps. 5787 **/ 5788 static void 5789 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5790 struct lpfc_acqe_grp5 *acqe_grp5) 5791 { 5792 uint16_t prev_ll_spd; 5793 5794 phba->fc_eventTag = acqe_grp5->event_tag; 5795 phba->fcoe_eventtag = acqe_grp5->event_tag; 5796 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5797 phba->sli4_hba.link_state.logical_speed = 5798 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5799 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5800 "2789 GRP5 Async Event: Updating logical link speed " 5801 "from %dMbps to %dMbps\n", prev_ll_spd, 5802 phba->sli4_hba.link_state.logical_speed); 5803 } 5804 5805 /** 5806 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5807 * @phba: pointer to lpfc hba data structure. 5808 * 5809 * This routine is invoked by the worker thread to process all the pending 5810 * SLI4 asynchronous events. 5811 **/ 5812 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5813 { 5814 struct lpfc_cq_event *cq_event; 5815 5816 /* First, declare the async event has been handled */ 5817 spin_lock_irq(&phba->hbalock); 5818 phba->hba_flag &= ~ASYNC_EVENT; 5819 spin_unlock_irq(&phba->hbalock); 5820 /* Now, handle all the async events */ 5821 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5822 /* Get the first event from the head of the event queue */ 5823 spin_lock_irq(&phba->hbalock); 5824 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5825 cq_event, struct lpfc_cq_event, list); 5826 spin_unlock_irq(&phba->hbalock); 5827 /* Process the asynchronous event */ 5828 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5829 case LPFC_TRAILER_CODE_LINK: 5830 lpfc_sli4_async_link_evt(phba, 5831 &cq_event->cqe.acqe_link); 5832 break; 5833 case LPFC_TRAILER_CODE_FCOE: 5834 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5835 break; 5836 case LPFC_TRAILER_CODE_DCBX: 5837 lpfc_sli4_async_dcbx_evt(phba, 5838 &cq_event->cqe.acqe_dcbx); 5839 break; 5840 case LPFC_TRAILER_CODE_GRP5: 5841 lpfc_sli4_async_grp5_evt(phba, 5842 &cq_event->cqe.acqe_grp5); 5843 break; 5844 case LPFC_TRAILER_CODE_FC: 5845 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5846 break; 5847 case LPFC_TRAILER_CODE_SLI: 5848 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5849 break; 5850 default: 5851 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5852 "1804 Invalid asynchrous event code: " 5853 "x%x\n", bf_get(lpfc_trailer_code, 5854 &cq_event->cqe.mcqe_cmpl)); 5855 break; 5856 } 5857 /* Free the completion event processed to the free pool */ 5858 lpfc_sli4_cq_event_release(phba, cq_event); 5859 } 5860 } 5861 5862 /** 5863 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5864 * @phba: pointer to lpfc hba data structure. 5865 * 5866 * This routine is invoked by the worker thread to process FCF table 5867 * rediscovery pending completion event. 5868 **/ 5869 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5870 { 5871 int rc; 5872 5873 spin_lock_irq(&phba->hbalock); 5874 /* Clear FCF rediscovery timeout event */ 5875 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5876 /* Clear driver fast failover FCF record flag */ 5877 phba->fcf.failover_rec.flag = 0; 5878 /* Set state for FCF fast failover */ 5879 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5880 spin_unlock_irq(&phba->hbalock); 5881 5882 /* Scan FCF table from the first entry to re-discover SAN */ 5883 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5884 "2777 Start post-quiescent FCF table scan\n"); 5885 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5886 if (rc) 5887 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5888 "2747 Issue FCF scan read FCF mailbox " 5889 "command failed 0x%x\n", rc); 5890 } 5891 5892 /** 5893 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5894 * @phba: pointer to lpfc hba data structure. 5895 * @dev_grp: The HBA PCI-Device group number. 5896 * 5897 * This routine is invoked to set up the per HBA PCI-Device group function 5898 * API jump table entries. 5899 * 5900 * Return: 0 if success, otherwise -ENODEV 5901 **/ 5902 int 5903 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5904 { 5905 int rc; 5906 5907 /* Set up lpfc PCI-device group */ 5908 phba->pci_dev_grp = dev_grp; 5909 5910 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5911 if (dev_grp == LPFC_PCI_DEV_OC) 5912 phba->sli_rev = LPFC_SLI_REV4; 5913 5914 /* Set up device INIT API function jump table */ 5915 rc = lpfc_init_api_table_setup(phba, dev_grp); 5916 if (rc) 5917 return -ENODEV; 5918 /* Set up SCSI API function jump table */ 5919 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5920 if (rc) 5921 return -ENODEV; 5922 /* Set up SLI API function jump table */ 5923 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5924 if (rc) 5925 return -ENODEV; 5926 /* Set up MBOX API function jump table */ 5927 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5928 if (rc) 5929 return -ENODEV; 5930 5931 return 0; 5932 } 5933 5934 /** 5935 * lpfc_log_intr_mode - Log the active interrupt mode 5936 * @phba: pointer to lpfc hba data structure. 5937 * @intr_mode: active interrupt mode adopted. 5938 * 5939 * This routine it invoked to log the currently used active interrupt mode 5940 * to the device. 5941 **/ 5942 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5943 { 5944 switch (intr_mode) { 5945 case 0: 5946 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5947 "0470 Enable INTx interrupt mode.\n"); 5948 break; 5949 case 1: 5950 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5951 "0481 Enabled MSI interrupt mode.\n"); 5952 break; 5953 case 2: 5954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5955 "0480 Enabled MSI-X interrupt mode.\n"); 5956 break; 5957 default: 5958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5959 "0482 Illegal interrupt mode.\n"); 5960 break; 5961 } 5962 return; 5963 } 5964 5965 /** 5966 * lpfc_enable_pci_dev - Enable a generic PCI device. 5967 * @phba: pointer to lpfc hba data structure. 5968 * 5969 * This routine is invoked to enable the PCI device that is common to all 5970 * PCI devices. 5971 * 5972 * Return codes 5973 * 0 - successful 5974 * other values - error 5975 **/ 5976 static int 5977 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5978 { 5979 struct pci_dev *pdev; 5980 5981 /* Obtain PCI device reference */ 5982 if (!phba->pcidev) 5983 goto out_error; 5984 else 5985 pdev = phba->pcidev; 5986 /* Enable PCI device */ 5987 if (pci_enable_device_mem(pdev)) 5988 goto out_error; 5989 /* Request PCI resource for the device */ 5990 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 5991 goto out_disable_device; 5992 /* Set up device as PCI master and save state for EEH */ 5993 pci_set_master(pdev); 5994 pci_try_set_mwi(pdev); 5995 pci_save_state(pdev); 5996 5997 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 5998 if (pci_is_pcie(pdev)) 5999 pdev->needs_freset = 1; 6000 6001 return 0; 6002 6003 out_disable_device: 6004 pci_disable_device(pdev); 6005 out_error: 6006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6007 "1401 Failed to enable pci device\n"); 6008 return -ENODEV; 6009 } 6010 6011 /** 6012 * lpfc_disable_pci_dev - Disable a generic PCI device. 6013 * @phba: pointer to lpfc hba data structure. 6014 * 6015 * This routine is invoked to disable the PCI device that is common to all 6016 * PCI devices. 6017 **/ 6018 static void 6019 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6020 { 6021 struct pci_dev *pdev; 6022 6023 /* Obtain PCI device reference */ 6024 if (!phba->pcidev) 6025 return; 6026 else 6027 pdev = phba->pcidev; 6028 /* Release PCI resource and disable PCI device */ 6029 pci_release_mem_regions(pdev); 6030 pci_disable_device(pdev); 6031 6032 return; 6033 } 6034 6035 /** 6036 * lpfc_reset_hba - Reset a hba 6037 * @phba: pointer to lpfc hba data structure. 6038 * 6039 * This routine is invoked to reset a hba device. It brings the HBA 6040 * offline, performs a board restart, and then brings the board back 6041 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6042 * on outstanding mailbox commands. 6043 **/ 6044 void 6045 lpfc_reset_hba(struct lpfc_hba *phba) 6046 { 6047 /* If resets are disabled then set error state and return. */ 6048 if (!phba->cfg_enable_hba_reset) { 6049 phba->link_state = LPFC_HBA_ERROR; 6050 return; 6051 } 6052 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6053 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6054 else 6055 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6056 lpfc_offline(phba); 6057 lpfc_sli_brdrestart(phba); 6058 lpfc_online(phba); 6059 lpfc_unblock_mgmt_io(phba); 6060 } 6061 6062 /** 6063 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6064 * @phba: pointer to lpfc hba data structure. 6065 * 6066 * This function enables the PCI SR-IOV virtual functions to a physical 6067 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6068 * enable the number of virtual functions to the physical function. As 6069 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6070 * API call does not considered as an error condition for most of the device. 6071 **/ 6072 uint16_t 6073 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6074 { 6075 struct pci_dev *pdev = phba->pcidev; 6076 uint16_t nr_virtfn; 6077 int pos; 6078 6079 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6080 if (pos == 0) 6081 return 0; 6082 6083 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6084 return nr_virtfn; 6085 } 6086 6087 /** 6088 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6089 * @phba: pointer to lpfc hba data structure. 6090 * @nr_vfn: number of virtual functions to be enabled. 6091 * 6092 * This function enables the PCI SR-IOV virtual functions to a physical 6093 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6094 * enable the number of virtual functions to the physical function. As 6095 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6096 * API call does not considered as an error condition for most of the device. 6097 **/ 6098 int 6099 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6100 { 6101 struct pci_dev *pdev = phba->pcidev; 6102 uint16_t max_nr_vfn; 6103 int rc; 6104 6105 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6106 if (nr_vfn > max_nr_vfn) { 6107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6108 "3057 Requested vfs (%d) greater than " 6109 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6110 return -EINVAL; 6111 } 6112 6113 rc = pci_enable_sriov(pdev, nr_vfn); 6114 if (rc) { 6115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6116 "2806 Failed to enable sriov on this device " 6117 "with vfn number nr_vf:%d, rc:%d\n", 6118 nr_vfn, rc); 6119 } else 6120 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6121 "2807 Successful enable sriov on this device " 6122 "with vfn number nr_vf:%d\n", nr_vfn); 6123 return rc; 6124 } 6125 6126 /** 6127 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6128 * @phba: pointer to lpfc hba data structure. 6129 * 6130 * This routine is invoked to set up the driver internal resources before the 6131 * device specific resource setup to support the HBA device it attached to. 6132 * 6133 * Return codes 6134 * 0 - successful 6135 * other values - error 6136 **/ 6137 static int 6138 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6139 { 6140 struct lpfc_sli *psli = &phba->sli; 6141 6142 /* 6143 * Driver resources common to all SLI revisions 6144 */ 6145 atomic_set(&phba->fast_event_count, 0); 6146 spin_lock_init(&phba->hbalock); 6147 6148 /* Initialize ndlp management spinlock */ 6149 spin_lock_init(&phba->ndlp_lock); 6150 6151 /* Initialize port_list spinlock */ 6152 spin_lock_init(&phba->port_list_lock); 6153 INIT_LIST_HEAD(&phba->port_list); 6154 6155 INIT_LIST_HEAD(&phba->work_list); 6156 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6157 6158 /* Initialize the wait queue head for the kernel thread */ 6159 init_waitqueue_head(&phba->work_waitq); 6160 6161 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6162 "1403 Protocols supported %s %s %s\n", 6163 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6164 "SCSI" : " "), 6165 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6166 "NVME" : " "), 6167 (phba->nvmet_support ? "NVMET" : " ")); 6168 6169 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6170 spin_lock_init(&phba->scsi_buf_list_get_lock); 6171 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6172 spin_lock_init(&phba->scsi_buf_list_put_lock); 6173 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6174 6175 /* Initialize the fabric iocb list */ 6176 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6177 6178 /* Initialize list to save ELS buffers */ 6179 INIT_LIST_HEAD(&phba->elsbuf); 6180 6181 /* Initialize FCF connection rec list */ 6182 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6183 6184 /* Initialize OAS configuration list */ 6185 spin_lock_init(&phba->devicelock); 6186 INIT_LIST_HEAD(&phba->luns); 6187 6188 /* MBOX heartbeat timer */ 6189 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6190 /* Fabric block timer */ 6191 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6192 /* EA polling mode timer */ 6193 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6194 /* Heartbeat timer */ 6195 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6196 6197 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6198 6199 return 0; 6200 } 6201 6202 /** 6203 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6204 * @phba: pointer to lpfc hba data structure. 6205 * 6206 * This routine is invoked to set up the driver internal resources specific to 6207 * support the SLI-3 HBA device it attached to. 6208 * 6209 * Return codes 6210 * 0 - successful 6211 * other values - error 6212 **/ 6213 static int 6214 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6215 { 6216 int rc, entry_sz; 6217 6218 /* 6219 * Initialize timers used by driver 6220 */ 6221 6222 /* FCP polling mode timer */ 6223 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6224 6225 /* Host attention work mask setup */ 6226 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6227 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6228 6229 /* Get all the module params for configuring this host */ 6230 lpfc_get_cfgparam(phba); 6231 /* Set up phase-1 common device driver resources */ 6232 6233 rc = lpfc_setup_driver_resource_phase1(phba); 6234 if (rc) 6235 return -ENODEV; 6236 6237 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6238 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6239 /* check for menlo minimum sg count */ 6240 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6241 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6242 } 6243 6244 if (!phba->sli.sli3_ring) 6245 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6246 sizeof(struct lpfc_sli_ring), 6247 GFP_KERNEL); 6248 if (!phba->sli.sli3_ring) 6249 return -ENOMEM; 6250 6251 /* 6252 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6253 * used to create the sg_dma_buf_pool must be dynamically calculated. 6254 */ 6255 6256 /* Initialize the host templates the configured values. */ 6257 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6258 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6259 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6260 6261 if (phba->sli_rev == LPFC_SLI_REV4) 6262 entry_sz = sizeof(struct sli4_sge); 6263 else 6264 entry_sz = sizeof(struct ulp_bde64); 6265 6266 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6267 if (phba->cfg_enable_bg) { 6268 /* 6269 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6270 * the FCP rsp, and a BDE for each. Sice we have no control 6271 * over how many protection data segments the SCSI Layer 6272 * will hand us (ie: there could be one for every block 6273 * in the IO), we just allocate enough BDEs to accomidate 6274 * our max amount and we need to limit lpfc_sg_seg_cnt to 6275 * minimize the risk of running out. 6276 */ 6277 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6278 sizeof(struct fcp_rsp) + 6279 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6280 6281 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6282 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6283 6284 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6285 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6286 } else { 6287 /* 6288 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6289 * the FCP rsp, a BDE for each, and a BDE for up to 6290 * cfg_sg_seg_cnt data segments. 6291 */ 6292 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6293 sizeof(struct fcp_rsp) + 6294 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6295 6296 /* Total BDEs in BPL for scsi_sg_list */ 6297 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6298 } 6299 6300 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6301 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6302 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6303 phba->cfg_total_seg_cnt); 6304 6305 phba->max_vpi = LPFC_MAX_VPI; 6306 /* This will be set to correct value after config_port mbox */ 6307 phba->max_vports = 0; 6308 6309 /* 6310 * Initialize the SLI Layer to run with lpfc HBAs. 6311 */ 6312 lpfc_sli_setup(phba); 6313 lpfc_sli_queue_init(phba); 6314 6315 /* Allocate device driver memory */ 6316 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6317 return -ENOMEM; 6318 6319 /* 6320 * Enable sr-iov virtual functions if supported and configured 6321 * through the module parameter. 6322 */ 6323 if (phba->cfg_sriov_nr_virtfn > 0) { 6324 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6325 phba->cfg_sriov_nr_virtfn); 6326 if (rc) { 6327 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6328 "2808 Requested number of SR-IOV " 6329 "virtual functions (%d) is not " 6330 "supported\n", 6331 phba->cfg_sriov_nr_virtfn); 6332 phba->cfg_sriov_nr_virtfn = 0; 6333 } 6334 } 6335 6336 return 0; 6337 } 6338 6339 /** 6340 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6341 * @phba: pointer to lpfc hba data structure. 6342 * 6343 * This routine is invoked to unset the driver internal resources set up 6344 * specific for supporting the SLI-3 HBA device it attached to. 6345 **/ 6346 static void 6347 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6348 { 6349 /* Free device driver memory allocated */ 6350 lpfc_mem_free_all(phba); 6351 6352 return; 6353 } 6354 6355 /** 6356 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6357 * @phba: pointer to lpfc hba data structure. 6358 * 6359 * This routine is invoked to set up the driver internal resources specific to 6360 * support the SLI-4 HBA device it attached to. 6361 * 6362 * Return codes 6363 * 0 - successful 6364 * other values - error 6365 **/ 6366 static int 6367 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6368 { 6369 LPFC_MBOXQ_t *mboxq; 6370 MAILBOX_t *mb; 6371 int rc, i, max_buf_size; 6372 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6373 struct lpfc_mqe *mqe; 6374 int longs; 6375 int extra; 6376 uint64_t wwn; 6377 u32 if_type; 6378 u32 if_fam; 6379 6380 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6381 phba->sli4_hba.num_possible_cpu = num_possible_cpus(); 6382 phba->sli4_hba.curr_disp_cpu = 0; 6383 6384 /* Get all the module params for configuring this host */ 6385 lpfc_get_cfgparam(phba); 6386 6387 /* Set up phase-1 common device driver resources */ 6388 rc = lpfc_setup_driver_resource_phase1(phba); 6389 if (rc) 6390 return -ENODEV; 6391 6392 /* Before proceed, wait for POST done and device ready */ 6393 rc = lpfc_sli4_post_status_check(phba); 6394 if (rc) 6395 return -ENODEV; 6396 6397 /* 6398 * Initialize timers used by driver 6399 */ 6400 6401 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6402 6403 /* FCF rediscover timer */ 6404 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6405 6406 /* 6407 * Control structure for handling external multi-buffer mailbox 6408 * command pass-through. 6409 */ 6410 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6411 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6412 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6413 6414 phba->max_vpi = LPFC_MAX_VPI; 6415 6416 /* This will be set to correct value after the read_config mbox */ 6417 phba->max_vports = 0; 6418 6419 /* Program the default value of vlan_id and fc_map */ 6420 phba->valid_vlan = 0; 6421 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6422 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6423 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6424 6425 /* 6426 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6427 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6428 * The WQ create will allocate the ring. 6429 */ 6430 6431 /* 6432 * 1 for cmd, 1 for rsp, NVME adds an extra one 6433 * for boundary conditions in its max_sgl_segment template. 6434 */ 6435 extra = 2; 6436 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6437 extra++; 6438 6439 /* 6440 * It doesn't matter what family our adapter is in, we are 6441 * limited to 2 Pages, 512 SGEs, for our SGL. 6442 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6443 */ 6444 max_buf_size = (2 * SLI4_PAGE_SIZE); 6445 6446 /* 6447 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6448 * used to create the sg_dma_buf_pool must be calculated. 6449 */ 6450 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6451 /* 6452 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6453 * the FCP rsp, and a SGE. Sice we have no control 6454 * over how many protection segments the SCSI Layer 6455 * will hand us (ie: there could be one for every block 6456 * in the IO), just allocate enough SGEs to accomidate 6457 * our max amount and we need to limit lpfc_sg_seg_cnt 6458 * to minimize the risk of running out. 6459 */ 6460 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6461 sizeof(struct fcp_rsp) + max_buf_size; 6462 6463 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6464 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6465 6466 /* 6467 * If supporting DIF, reduce the seg count for scsi to 6468 * allow room for the DIF sges. 6469 */ 6470 if (phba->cfg_enable_bg && 6471 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6472 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6473 else 6474 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6475 6476 } else { 6477 /* 6478 * The scsi_buf for a regular I/O holds the FCP cmnd, 6479 * the FCP rsp, a SGE for each, and a SGE for up to 6480 * cfg_sg_seg_cnt data segments. 6481 */ 6482 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6483 sizeof(struct fcp_rsp) + 6484 ((phba->cfg_sg_seg_cnt + extra) * 6485 sizeof(struct sli4_sge)); 6486 6487 /* Total SGEs for scsi_sg_list */ 6488 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6489 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6490 6491 /* 6492 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6493 * need to post 1 page for the SGL. 6494 */ 6495 } 6496 6497 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6498 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6499 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6500 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6501 "6300 Reducing NVME sg segment " 6502 "cnt to %d\n", 6503 LPFC_MAX_NVME_SEG_CNT); 6504 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6505 } else 6506 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6507 } 6508 6509 /* Initialize the host templates with the updated values. */ 6510 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6511 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6512 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6513 6514 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6515 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6516 else 6517 phba->cfg_sg_dma_buf_size = 6518 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6519 6520 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6521 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6522 "total:%d scsi:%d nvme:%d\n", 6523 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6524 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6525 phba->cfg_nvme_seg_cnt); 6526 6527 /* Initialize buffer queue management fields */ 6528 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6529 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6530 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6531 6532 /* 6533 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6534 */ 6535 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 6536 /* Initialize the Abort scsi buffer list used by driver */ 6537 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 6538 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 6539 } 6540 6541 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6542 /* Initialize the Abort nvme buffer list used by driver */ 6543 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6544 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6545 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6546 } 6547 6548 /* This abort list used by worker thread */ 6549 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6550 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6551 6552 /* 6553 * Initialize driver internal slow-path work queues 6554 */ 6555 6556 /* Driver internel slow-path CQ Event pool */ 6557 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6558 /* Response IOCB work queue list */ 6559 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6560 /* Asynchronous event CQ Event work queue list */ 6561 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6562 /* Fast-path XRI aborted CQ Event work queue list */ 6563 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6564 /* Slow-path XRI aborted CQ Event work queue list */ 6565 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6566 /* Receive queue CQ Event work queue list */ 6567 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6568 6569 /* Initialize extent block lists. */ 6570 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6571 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6572 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6573 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6574 6575 /* Initialize mboxq lists. If the early init routines fail 6576 * these lists need to be correctly initialized. 6577 */ 6578 INIT_LIST_HEAD(&phba->sli.mboxq); 6579 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6580 6581 /* initialize optic_state to 0xFF */ 6582 phba->sli4_hba.lnk_info.optic_state = 0xff; 6583 6584 /* Allocate device driver memory */ 6585 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6586 if (rc) 6587 return -ENOMEM; 6588 6589 /* IF Type 2 ports get initialized now. */ 6590 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6591 LPFC_SLI_INTF_IF_TYPE_2) { 6592 rc = lpfc_pci_function_reset(phba); 6593 if (unlikely(rc)) { 6594 rc = -ENODEV; 6595 goto out_free_mem; 6596 } 6597 phba->temp_sensor_support = 1; 6598 } 6599 6600 /* Create the bootstrap mailbox command */ 6601 rc = lpfc_create_bootstrap_mbox(phba); 6602 if (unlikely(rc)) 6603 goto out_free_mem; 6604 6605 /* Set up the host's endian order with the device. */ 6606 rc = lpfc_setup_endian_order(phba); 6607 if (unlikely(rc)) 6608 goto out_free_bsmbx; 6609 6610 /* Set up the hba's configuration parameters. */ 6611 rc = lpfc_sli4_read_config(phba); 6612 if (unlikely(rc)) 6613 goto out_free_bsmbx; 6614 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6615 if (unlikely(rc)) 6616 goto out_free_bsmbx; 6617 6618 /* IF Type 0 ports get initialized now. */ 6619 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6620 LPFC_SLI_INTF_IF_TYPE_0) { 6621 rc = lpfc_pci_function_reset(phba); 6622 if (unlikely(rc)) 6623 goto out_free_bsmbx; 6624 } 6625 6626 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6627 GFP_KERNEL); 6628 if (!mboxq) { 6629 rc = -ENOMEM; 6630 goto out_free_bsmbx; 6631 } 6632 6633 /* Check for NVMET being configured */ 6634 phba->nvmet_support = 0; 6635 if (lpfc_enable_nvmet_cnt) { 6636 6637 /* First get WWN of HBA instance */ 6638 lpfc_read_nv(phba, mboxq); 6639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6640 if (rc != MBX_SUCCESS) { 6641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6642 "6016 Mailbox failed , mbxCmd x%x " 6643 "READ_NV, mbxStatus x%x\n", 6644 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6645 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6646 mempool_free(mboxq, phba->mbox_mem_pool); 6647 rc = -EIO; 6648 goto out_free_bsmbx; 6649 } 6650 mb = &mboxq->u.mb; 6651 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6652 sizeof(uint64_t)); 6653 wwn = cpu_to_be64(wwn); 6654 phba->sli4_hba.wwnn.u.name = wwn; 6655 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6656 sizeof(uint64_t)); 6657 /* wwn is WWPN of HBA instance */ 6658 wwn = cpu_to_be64(wwn); 6659 phba->sli4_hba.wwpn.u.name = wwn; 6660 6661 /* Check to see if it matches any module parameter */ 6662 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6663 if (wwn == lpfc_enable_nvmet[i]) { 6664 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6665 if (lpfc_nvmet_mem_alloc(phba)) 6666 break; 6667 6668 phba->nvmet_support = 1; /* a match */ 6669 6670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6671 "6017 NVME Target %016llx\n", 6672 wwn); 6673 #else 6674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6675 "6021 Can't enable NVME Target." 6676 " NVME_TARGET_FC infrastructure" 6677 " is not in kernel\n"); 6678 #endif 6679 /* Not supported for NVMET */ 6680 phba->cfg_xri_rebalancing = 0; 6681 break; 6682 } 6683 } 6684 } 6685 6686 lpfc_nvme_mod_param_dep(phba); 6687 6688 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6689 lpfc_supported_pages(mboxq); 6690 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6691 if (!rc) { 6692 mqe = &mboxq->u.mqe; 6693 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6694 LPFC_MAX_SUPPORTED_PAGES); 6695 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6696 switch (pn_page[i]) { 6697 case LPFC_SLI4_PARAMETERS: 6698 phba->sli4_hba.pc_sli4_params.supported = 1; 6699 break; 6700 default: 6701 break; 6702 } 6703 } 6704 /* Read the port's SLI4 Parameters capabilities if supported. */ 6705 if (phba->sli4_hba.pc_sli4_params.supported) 6706 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6707 if (rc) { 6708 mempool_free(mboxq, phba->mbox_mem_pool); 6709 rc = -EIO; 6710 goto out_free_bsmbx; 6711 } 6712 } 6713 6714 /* 6715 * Get sli4 parameters that override parameters from Port capabilities. 6716 * If this call fails, it isn't critical unless the SLI4 parameters come 6717 * back in conflict. 6718 */ 6719 rc = lpfc_get_sli4_parameters(phba, mboxq); 6720 if (rc) { 6721 if_type = bf_get(lpfc_sli_intf_if_type, 6722 &phba->sli4_hba.sli_intf); 6723 if_fam = bf_get(lpfc_sli_intf_sli_family, 6724 &phba->sli4_hba.sli_intf); 6725 if (phba->sli4_hba.extents_in_use && 6726 phba->sli4_hba.rpi_hdrs_in_use) { 6727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6728 "2999 Unsupported SLI4 Parameters " 6729 "Extents and RPI headers enabled.\n"); 6730 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6731 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6732 mempool_free(mboxq, phba->mbox_mem_pool); 6733 rc = -EIO; 6734 goto out_free_bsmbx; 6735 } 6736 } 6737 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6738 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6739 mempool_free(mboxq, phba->mbox_mem_pool); 6740 rc = -EIO; 6741 goto out_free_bsmbx; 6742 } 6743 } 6744 6745 mempool_free(mboxq, phba->mbox_mem_pool); 6746 6747 /* Verify OAS is supported */ 6748 lpfc_sli4_oas_verify(phba); 6749 6750 /* Verify RAS support on adapter */ 6751 lpfc_sli4_ras_init(phba); 6752 6753 /* Verify all the SLI4 queues */ 6754 rc = lpfc_sli4_queue_verify(phba); 6755 if (rc) 6756 goto out_free_bsmbx; 6757 6758 /* Create driver internal CQE event pool */ 6759 rc = lpfc_sli4_cq_event_pool_create(phba); 6760 if (rc) 6761 goto out_free_bsmbx; 6762 6763 /* Initialize sgl lists per host */ 6764 lpfc_init_sgl_list(phba); 6765 6766 /* Allocate and initialize active sgl array */ 6767 rc = lpfc_init_active_sgl_array(phba); 6768 if (rc) { 6769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6770 "1430 Failed to initialize sgl list.\n"); 6771 goto out_destroy_cq_event_pool; 6772 } 6773 rc = lpfc_sli4_init_rpi_hdrs(phba); 6774 if (rc) { 6775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6776 "1432 Failed to initialize rpi headers.\n"); 6777 goto out_free_active_sgl; 6778 } 6779 6780 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6781 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6782 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6783 GFP_KERNEL); 6784 if (!phba->fcf.fcf_rr_bmask) { 6785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6786 "2759 Failed allocate memory for FCF round " 6787 "robin failover bmask\n"); 6788 rc = -ENOMEM; 6789 goto out_remove_rpi_hdrs; 6790 } 6791 6792 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6793 sizeof(struct lpfc_hba_eq_hdl), 6794 GFP_KERNEL); 6795 if (!phba->sli4_hba.hba_eq_hdl) { 6796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6797 "2572 Failed allocate memory for " 6798 "fast-path per-EQ handle array\n"); 6799 rc = -ENOMEM; 6800 goto out_free_fcf_rr_bmask; 6801 } 6802 6803 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6804 sizeof(struct lpfc_vector_map_info), 6805 GFP_KERNEL); 6806 if (!phba->sli4_hba.cpu_map) { 6807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6808 "3327 Failed allocate memory for msi-x " 6809 "interrupt vector mapping\n"); 6810 rc = -ENOMEM; 6811 goto out_free_hba_eq_hdl; 6812 } 6813 6814 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6815 if (!phba->sli4_hba.eq_info) { 6816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6817 "3321 Failed allocation for per_cpu stats\n"); 6818 rc = -ENOMEM; 6819 goto out_free_hba_cpu_map; 6820 } 6821 /* 6822 * Enable sr-iov virtual functions if supported and configured 6823 * through the module parameter. 6824 */ 6825 if (phba->cfg_sriov_nr_virtfn > 0) { 6826 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6827 phba->cfg_sriov_nr_virtfn); 6828 if (rc) { 6829 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6830 "3020 Requested number of SR-IOV " 6831 "virtual functions (%d) is not " 6832 "supported\n", 6833 phba->cfg_sriov_nr_virtfn); 6834 phba->cfg_sriov_nr_virtfn = 0; 6835 } 6836 } 6837 6838 return 0; 6839 6840 out_free_hba_cpu_map: 6841 kfree(phba->sli4_hba.cpu_map); 6842 out_free_hba_eq_hdl: 6843 kfree(phba->sli4_hba.hba_eq_hdl); 6844 out_free_fcf_rr_bmask: 6845 kfree(phba->fcf.fcf_rr_bmask); 6846 out_remove_rpi_hdrs: 6847 lpfc_sli4_remove_rpi_hdrs(phba); 6848 out_free_active_sgl: 6849 lpfc_free_active_sgl(phba); 6850 out_destroy_cq_event_pool: 6851 lpfc_sli4_cq_event_pool_destroy(phba); 6852 out_free_bsmbx: 6853 lpfc_destroy_bootstrap_mbox(phba); 6854 out_free_mem: 6855 lpfc_mem_free(phba); 6856 return rc; 6857 } 6858 6859 /** 6860 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6861 * @phba: pointer to lpfc hba data structure. 6862 * 6863 * This routine is invoked to unset the driver internal resources set up 6864 * specific for supporting the SLI-4 HBA device it attached to. 6865 **/ 6866 static void 6867 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6868 { 6869 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6870 6871 free_percpu(phba->sli4_hba.eq_info); 6872 6873 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6874 kfree(phba->sli4_hba.cpu_map); 6875 phba->sli4_hba.num_possible_cpu = 0; 6876 phba->sli4_hba.num_present_cpu = 0; 6877 phba->sli4_hba.curr_disp_cpu = 0; 6878 6879 /* Free memory allocated for fast-path work queue handles */ 6880 kfree(phba->sli4_hba.hba_eq_hdl); 6881 6882 /* Free the allocated rpi headers. */ 6883 lpfc_sli4_remove_rpi_hdrs(phba); 6884 lpfc_sli4_remove_rpis(phba); 6885 6886 /* Free eligible FCF index bmask */ 6887 kfree(phba->fcf.fcf_rr_bmask); 6888 6889 /* Free the ELS sgl list */ 6890 lpfc_free_active_sgl(phba); 6891 lpfc_free_els_sgl_list(phba); 6892 lpfc_free_nvmet_sgl_list(phba); 6893 6894 /* Free the completion queue EQ event pool */ 6895 lpfc_sli4_cq_event_release_all(phba); 6896 lpfc_sli4_cq_event_pool_destroy(phba); 6897 6898 /* Release resource identifiers. */ 6899 lpfc_sli4_dealloc_resource_identifiers(phba); 6900 6901 /* Free the bsmbx region. */ 6902 lpfc_destroy_bootstrap_mbox(phba); 6903 6904 /* Free the SLI Layer memory with SLI4 HBAs */ 6905 lpfc_mem_free_all(phba); 6906 6907 /* Free the current connect table */ 6908 list_for_each_entry_safe(conn_entry, next_conn_entry, 6909 &phba->fcf_conn_rec_list, list) { 6910 list_del_init(&conn_entry->list); 6911 kfree(conn_entry); 6912 } 6913 6914 return; 6915 } 6916 6917 /** 6918 * lpfc_init_api_table_setup - Set up init api function jump table 6919 * @phba: The hba struct for which this call is being executed. 6920 * @dev_grp: The HBA PCI-Device group number. 6921 * 6922 * This routine sets up the device INIT interface API function jump table 6923 * in @phba struct. 6924 * 6925 * Returns: 0 - success, -ENODEV - failure. 6926 **/ 6927 int 6928 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6929 { 6930 phba->lpfc_hba_init_link = lpfc_hba_init_link; 6931 phba->lpfc_hba_down_link = lpfc_hba_down_link; 6932 phba->lpfc_selective_reset = lpfc_selective_reset; 6933 switch (dev_grp) { 6934 case LPFC_PCI_DEV_LP: 6935 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 6936 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 6937 phba->lpfc_stop_port = lpfc_stop_port_s3; 6938 break; 6939 case LPFC_PCI_DEV_OC: 6940 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 6941 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 6942 phba->lpfc_stop_port = lpfc_stop_port_s4; 6943 break; 6944 default: 6945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6946 "1431 Invalid HBA PCI-device group: 0x%x\n", 6947 dev_grp); 6948 return -ENODEV; 6949 break; 6950 } 6951 return 0; 6952 } 6953 6954 /** 6955 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 6956 * @phba: pointer to lpfc hba data structure. 6957 * 6958 * This routine is invoked to set up the driver internal resources after the 6959 * device specific resource setup to support the HBA device it attached to. 6960 * 6961 * Return codes 6962 * 0 - successful 6963 * other values - error 6964 **/ 6965 static int 6966 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 6967 { 6968 int error; 6969 6970 /* Startup the kernel thread for this host adapter. */ 6971 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6972 "lpfc_worker_%d", phba->brd_no); 6973 if (IS_ERR(phba->worker_thread)) { 6974 error = PTR_ERR(phba->worker_thread); 6975 return error; 6976 } 6977 6978 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */ 6979 if (phba->sli_rev == LPFC_SLI_REV4) 6980 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6981 else 6982 phba->wq = NULL; 6983 6984 return 0; 6985 } 6986 6987 /** 6988 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 6989 * @phba: pointer to lpfc hba data structure. 6990 * 6991 * This routine is invoked to unset the driver internal resources set up after 6992 * the device specific resource setup for supporting the HBA device it 6993 * attached to. 6994 **/ 6995 static void 6996 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 6997 { 6998 if (phba->wq) { 6999 flush_workqueue(phba->wq); 7000 destroy_workqueue(phba->wq); 7001 phba->wq = NULL; 7002 } 7003 7004 /* Stop kernel worker thread */ 7005 if (phba->worker_thread) 7006 kthread_stop(phba->worker_thread); 7007 } 7008 7009 /** 7010 * lpfc_free_iocb_list - Free iocb list. 7011 * @phba: pointer to lpfc hba data structure. 7012 * 7013 * This routine is invoked to free the driver's IOCB list and memory. 7014 **/ 7015 void 7016 lpfc_free_iocb_list(struct lpfc_hba *phba) 7017 { 7018 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7019 7020 spin_lock_irq(&phba->hbalock); 7021 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7022 &phba->lpfc_iocb_list, list) { 7023 list_del(&iocbq_entry->list); 7024 kfree(iocbq_entry); 7025 phba->total_iocbq_bufs--; 7026 } 7027 spin_unlock_irq(&phba->hbalock); 7028 7029 return; 7030 } 7031 7032 /** 7033 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7034 * @phba: pointer to lpfc hba data structure. 7035 * 7036 * This routine is invoked to allocate and initizlize the driver's IOCB 7037 * list and set up the IOCB tag array accordingly. 7038 * 7039 * Return codes 7040 * 0 - successful 7041 * other values - error 7042 **/ 7043 int 7044 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7045 { 7046 struct lpfc_iocbq *iocbq_entry = NULL; 7047 uint16_t iotag; 7048 int i; 7049 7050 /* Initialize and populate the iocb list per host. */ 7051 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7052 for (i = 0; i < iocb_count; i++) { 7053 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7054 if (iocbq_entry == NULL) { 7055 printk(KERN_ERR "%s: only allocated %d iocbs of " 7056 "expected %d count. Unloading driver.\n", 7057 __func__, i, LPFC_IOCB_LIST_CNT); 7058 goto out_free_iocbq; 7059 } 7060 7061 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7062 if (iotag == 0) { 7063 kfree(iocbq_entry); 7064 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7065 "Unloading driver.\n", __func__); 7066 goto out_free_iocbq; 7067 } 7068 iocbq_entry->sli4_lxritag = NO_XRI; 7069 iocbq_entry->sli4_xritag = NO_XRI; 7070 7071 spin_lock_irq(&phba->hbalock); 7072 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7073 phba->total_iocbq_bufs++; 7074 spin_unlock_irq(&phba->hbalock); 7075 } 7076 7077 return 0; 7078 7079 out_free_iocbq: 7080 lpfc_free_iocb_list(phba); 7081 7082 return -ENOMEM; 7083 } 7084 7085 /** 7086 * lpfc_free_sgl_list - Free a given sgl list. 7087 * @phba: pointer to lpfc hba data structure. 7088 * @sglq_list: pointer to the head of sgl list. 7089 * 7090 * This routine is invoked to free a give sgl list and memory. 7091 **/ 7092 void 7093 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7094 { 7095 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7096 7097 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7098 list_del(&sglq_entry->list); 7099 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7100 kfree(sglq_entry); 7101 } 7102 } 7103 7104 /** 7105 * lpfc_free_els_sgl_list - Free els sgl list. 7106 * @phba: pointer to lpfc hba data structure. 7107 * 7108 * This routine is invoked to free the driver's els sgl list and memory. 7109 **/ 7110 static void 7111 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7112 { 7113 LIST_HEAD(sglq_list); 7114 7115 /* Retrieve all els sgls from driver list */ 7116 spin_lock_irq(&phba->hbalock); 7117 spin_lock(&phba->sli4_hba.sgl_list_lock); 7118 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7119 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7120 spin_unlock_irq(&phba->hbalock); 7121 7122 /* Now free the sgl list */ 7123 lpfc_free_sgl_list(phba, &sglq_list); 7124 } 7125 7126 /** 7127 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7128 * @phba: pointer to lpfc hba data structure. 7129 * 7130 * This routine is invoked to free the driver's nvmet sgl list and memory. 7131 **/ 7132 static void 7133 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7134 { 7135 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7136 LIST_HEAD(sglq_list); 7137 7138 /* Retrieve all nvmet sgls from driver list */ 7139 spin_lock_irq(&phba->hbalock); 7140 spin_lock(&phba->sli4_hba.sgl_list_lock); 7141 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7142 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7143 spin_unlock_irq(&phba->hbalock); 7144 7145 /* Now free the sgl list */ 7146 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7147 list_del(&sglq_entry->list); 7148 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7149 kfree(sglq_entry); 7150 } 7151 7152 /* Update the nvmet_xri_cnt to reflect no current sgls. 7153 * The next initialization cycle sets the count and allocates 7154 * the sgls over again. 7155 */ 7156 phba->sli4_hba.nvmet_xri_cnt = 0; 7157 } 7158 7159 /** 7160 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7161 * @phba: pointer to lpfc hba data structure. 7162 * 7163 * This routine is invoked to allocate the driver's active sgl memory. 7164 * This array will hold the sglq_entry's for active IOs. 7165 **/ 7166 static int 7167 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7168 { 7169 int size; 7170 size = sizeof(struct lpfc_sglq *); 7171 size *= phba->sli4_hba.max_cfg_param.max_xri; 7172 7173 phba->sli4_hba.lpfc_sglq_active_list = 7174 kzalloc(size, GFP_KERNEL); 7175 if (!phba->sli4_hba.lpfc_sglq_active_list) 7176 return -ENOMEM; 7177 return 0; 7178 } 7179 7180 /** 7181 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7182 * @phba: pointer to lpfc hba data structure. 7183 * 7184 * This routine is invoked to walk through the array of active sglq entries 7185 * and free all of the resources. 7186 * This is just a place holder for now. 7187 **/ 7188 static void 7189 lpfc_free_active_sgl(struct lpfc_hba *phba) 7190 { 7191 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7192 } 7193 7194 /** 7195 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7196 * @phba: pointer to lpfc hba data structure. 7197 * 7198 * This routine is invoked to allocate and initizlize the driver's sgl 7199 * list and set up the sgl xritag tag array accordingly. 7200 * 7201 **/ 7202 static void 7203 lpfc_init_sgl_list(struct lpfc_hba *phba) 7204 { 7205 /* Initialize and populate the sglq list per host/VF. */ 7206 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7207 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7208 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7209 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7210 7211 /* els xri-sgl book keeping */ 7212 phba->sli4_hba.els_xri_cnt = 0; 7213 7214 /* nvme xri-buffer book keeping */ 7215 phba->sli4_hba.io_xri_cnt = 0; 7216 } 7217 7218 /** 7219 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7220 * @phba: pointer to lpfc hba data structure. 7221 * 7222 * This routine is invoked to post rpi header templates to the 7223 * port for those SLI4 ports that do not support extents. This routine 7224 * posts a PAGE_SIZE memory region to the port to hold up to 7225 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7226 * and should be called only when interrupts are disabled. 7227 * 7228 * Return codes 7229 * 0 - successful 7230 * -ERROR - otherwise. 7231 **/ 7232 int 7233 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7234 { 7235 int rc = 0; 7236 struct lpfc_rpi_hdr *rpi_hdr; 7237 7238 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7239 if (!phba->sli4_hba.rpi_hdrs_in_use) 7240 return rc; 7241 if (phba->sli4_hba.extents_in_use) 7242 return -EIO; 7243 7244 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7245 if (!rpi_hdr) { 7246 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7247 "0391 Error during rpi post operation\n"); 7248 lpfc_sli4_remove_rpis(phba); 7249 rc = -ENODEV; 7250 } 7251 7252 return rc; 7253 } 7254 7255 /** 7256 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7257 * @phba: pointer to lpfc hba data structure. 7258 * 7259 * This routine is invoked to allocate a single 4KB memory region to 7260 * support rpis and stores them in the phba. This single region 7261 * provides support for up to 64 rpis. The region is used globally 7262 * by the device. 7263 * 7264 * Returns: 7265 * A valid rpi hdr on success. 7266 * A NULL pointer on any failure. 7267 **/ 7268 struct lpfc_rpi_hdr * 7269 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7270 { 7271 uint16_t rpi_limit, curr_rpi_range; 7272 struct lpfc_dmabuf *dmabuf; 7273 struct lpfc_rpi_hdr *rpi_hdr; 7274 7275 /* 7276 * If the SLI4 port supports extents, posting the rpi header isn't 7277 * required. Set the expected maximum count and let the actual value 7278 * get set when extents are fully allocated. 7279 */ 7280 if (!phba->sli4_hba.rpi_hdrs_in_use) 7281 return NULL; 7282 if (phba->sli4_hba.extents_in_use) 7283 return NULL; 7284 7285 /* The limit on the logical index is just the max_rpi count. */ 7286 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7287 7288 spin_lock_irq(&phba->hbalock); 7289 /* 7290 * Establish the starting RPI in this header block. The starting 7291 * rpi is normalized to a zero base because the physical rpi is 7292 * port based. 7293 */ 7294 curr_rpi_range = phba->sli4_hba.next_rpi; 7295 spin_unlock_irq(&phba->hbalock); 7296 7297 /* Reached full RPI range */ 7298 if (curr_rpi_range == rpi_limit) 7299 return NULL; 7300 7301 /* 7302 * First allocate the protocol header region for the port. The 7303 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7304 */ 7305 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7306 if (!dmabuf) 7307 return NULL; 7308 7309 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7310 LPFC_HDR_TEMPLATE_SIZE, 7311 &dmabuf->phys, GFP_KERNEL); 7312 if (!dmabuf->virt) { 7313 rpi_hdr = NULL; 7314 goto err_free_dmabuf; 7315 } 7316 7317 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7318 rpi_hdr = NULL; 7319 goto err_free_coherent; 7320 } 7321 7322 /* Save the rpi header data for cleanup later. */ 7323 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7324 if (!rpi_hdr) 7325 goto err_free_coherent; 7326 7327 rpi_hdr->dmabuf = dmabuf; 7328 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7329 rpi_hdr->page_count = 1; 7330 spin_lock_irq(&phba->hbalock); 7331 7332 /* The rpi_hdr stores the logical index only. */ 7333 rpi_hdr->start_rpi = curr_rpi_range; 7334 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7335 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7336 7337 spin_unlock_irq(&phba->hbalock); 7338 return rpi_hdr; 7339 7340 err_free_coherent: 7341 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7342 dmabuf->virt, dmabuf->phys); 7343 err_free_dmabuf: 7344 kfree(dmabuf); 7345 return NULL; 7346 } 7347 7348 /** 7349 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7350 * @phba: pointer to lpfc hba data structure. 7351 * 7352 * This routine is invoked to remove all memory resources allocated 7353 * to support rpis for SLI4 ports not supporting extents. This routine 7354 * presumes the caller has released all rpis consumed by fabric or port 7355 * logins and is prepared to have the header pages removed. 7356 **/ 7357 void 7358 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7359 { 7360 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7361 7362 if (!phba->sli4_hba.rpi_hdrs_in_use) 7363 goto exit; 7364 7365 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7366 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7367 list_del(&rpi_hdr->list); 7368 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7369 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7370 kfree(rpi_hdr->dmabuf); 7371 kfree(rpi_hdr); 7372 } 7373 exit: 7374 /* There are no rpis available to the port now. */ 7375 phba->sli4_hba.next_rpi = 0; 7376 } 7377 7378 /** 7379 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7380 * @pdev: pointer to pci device data structure. 7381 * 7382 * This routine is invoked to allocate the driver hba data structure for an 7383 * HBA device. If the allocation is successful, the phba reference to the 7384 * PCI device data structure is set. 7385 * 7386 * Return codes 7387 * pointer to @phba - successful 7388 * NULL - error 7389 **/ 7390 static struct lpfc_hba * 7391 lpfc_hba_alloc(struct pci_dev *pdev) 7392 { 7393 struct lpfc_hba *phba; 7394 7395 /* Allocate memory for HBA structure */ 7396 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7397 if (!phba) { 7398 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7399 return NULL; 7400 } 7401 7402 /* Set reference to PCI device in HBA structure */ 7403 phba->pcidev = pdev; 7404 7405 /* Assign an unused board number */ 7406 phba->brd_no = lpfc_get_instance(); 7407 if (phba->brd_no < 0) { 7408 kfree(phba); 7409 return NULL; 7410 } 7411 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7412 7413 spin_lock_init(&phba->ct_ev_lock); 7414 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7415 7416 return phba; 7417 } 7418 7419 /** 7420 * lpfc_hba_free - Free driver hba data structure with a device. 7421 * @phba: pointer to lpfc hba data structure. 7422 * 7423 * This routine is invoked to free the driver hba data structure with an 7424 * HBA device. 7425 **/ 7426 static void 7427 lpfc_hba_free(struct lpfc_hba *phba) 7428 { 7429 if (phba->sli_rev == LPFC_SLI_REV4) 7430 kfree(phba->sli4_hba.hdwq); 7431 7432 /* Release the driver assigned board number */ 7433 idr_remove(&lpfc_hba_index, phba->brd_no); 7434 7435 /* Free memory allocated with sli3 rings */ 7436 kfree(phba->sli.sli3_ring); 7437 phba->sli.sli3_ring = NULL; 7438 7439 kfree(phba); 7440 return; 7441 } 7442 7443 /** 7444 * lpfc_create_shost - Create hba physical port with associated scsi host. 7445 * @phba: pointer to lpfc hba data structure. 7446 * 7447 * This routine is invoked to create HBA physical port and associate a SCSI 7448 * host with it. 7449 * 7450 * Return codes 7451 * 0 - successful 7452 * other values - error 7453 **/ 7454 static int 7455 lpfc_create_shost(struct lpfc_hba *phba) 7456 { 7457 struct lpfc_vport *vport; 7458 struct Scsi_Host *shost; 7459 7460 /* Initialize HBA FC structure */ 7461 phba->fc_edtov = FF_DEF_EDTOV; 7462 phba->fc_ratov = FF_DEF_RATOV; 7463 phba->fc_altov = FF_DEF_ALTOV; 7464 phba->fc_arbtov = FF_DEF_ARBTOV; 7465 7466 atomic_set(&phba->sdev_cnt, 0); 7467 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7468 if (!vport) 7469 return -ENODEV; 7470 7471 shost = lpfc_shost_from_vport(vport); 7472 phba->pport = vport; 7473 7474 if (phba->nvmet_support) { 7475 /* Only 1 vport (pport) will support NVME target */ 7476 if (phba->txrdy_payload_pool == NULL) { 7477 phba->txrdy_payload_pool = dma_pool_create( 7478 "txrdy_pool", &phba->pcidev->dev, 7479 TXRDY_PAYLOAD_LEN, 16, 0); 7480 if (phba->txrdy_payload_pool) { 7481 phba->targetport = NULL; 7482 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7483 lpfc_printf_log(phba, KERN_INFO, 7484 LOG_INIT | LOG_NVME_DISC, 7485 "6076 NVME Target Found\n"); 7486 } 7487 } 7488 } 7489 7490 lpfc_debugfs_initialize(vport); 7491 /* Put reference to SCSI host to driver's device private data */ 7492 pci_set_drvdata(phba->pcidev, shost); 7493 7494 /* 7495 * At this point we are fully registered with PSA. In addition, 7496 * any initial discovery should be completed. 7497 */ 7498 vport->load_flag |= FC_ALLOW_FDMI; 7499 if (phba->cfg_enable_SmartSAN || 7500 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7501 7502 /* Setup appropriate attribute masks */ 7503 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7504 if (phba->cfg_enable_SmartSAN) 7505 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7506 else 7507 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7508 } 7509 return 0; 7510 } 7511 7512 /** 7513 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7514 * @phba: pointer to lpfc hba data structure. 7515 * 7516 * This routine is invoked to destroy HBA physical port and the associated 7517 * SCSI host. 7518 **/ 7519 static void 7520 lpfc_destroy_shost(struct lpfc_hba *phba) 7521 { 7522 struct lpfc_vport *vport = phba->pport; 7523 7524 /* Destroy physical port that associated with the SCSI host */ 7525 destroy_port(vport); 7526 7527 return; 7528 } 7529 7530 /** 7531 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7532 * @phba: pointer to lpfc hba data structure. 7533 * @shost: the shost to be used to detect Block guard settings. 7534 * 7535 * This routine sets up the local Block guard protocol settings for @shost. 7536 * This routine also allocates memory for debugging bg buffers. 7537 **/ 7538 static void 7539 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7540 { 7541 uint32_t old_mask; 7542 uint32_t old_guard; 7543 7544 int pagecnt = 10; 7545 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7546 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7547 "1478 Registering BlockGuard with the " 7548 "SCSI layer\n"); 7549 7550 old_mask = phba->cfg_prot_mask; 7551 old_guard = phba->cfg_prot_guard; 7552 7553 /* Only allow supported values */ 7554 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7555 SHOST_DIX_TYPE0_PROTECTION | 7556 SHOST_DIX_TYPE1_PROTECTION); 7557 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7558 SHOST_DIX_GUARD_CRC); 7559 7560 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7561 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7562 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7563 7564 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7565 if ((old_mask != phba->cfg_prot_mask) || 7566 (old_guard != phba->cfg_prot_guard)) 7567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7568 "1475 Registering BlockGuard with the " 7569 "SCSI layer: mask %d guard %d\n", 7570 phba->cfg_prot_mask, 7571 phba->cfg_prot_guard); 7572 7573 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7574 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7575 } else 7576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7577 "1479 Not Registering BlockGuard with the SCSI " 7578 "layer, Bad protection parameters: %d %d\n", 7579 old_mask, old_guard); 7580 } 7581 7582 if (!_dump_buf_data) { 7583 while (pagecnt) { 7584 spin_lock_init(&_dump_buf_lock); 7585 _dump_buf_data = 7586 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7587 if (_dump_buf_data) { 7588 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7589 "9043 BLKGRD: allocated %d pages for " 7590 "_dump_buf_data at 0x%p\n", 7591 (1 << pagecnt), _dump_buf_data); 7592 _dump_buf_data_order = pagecnt; 7593 memset(_dump_buf_data, 0, 7594 ((1 << PAGE_SHIFT) << pagecnt)); 7595 break; 7596 } else 7597 --pagecnt; 7598 } 7599 if (!_dump_buf_data_order) 7600 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7601 "9044 BLKGRD: ERROR unable to allocate " 7602 "memory for hexdump\n"); 7603 } else 7604 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7605 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 7606 "\n", _dump_buf_data); 7607 if (!_dump_buf_dif) { 7608 while (pagecnt) { 7609 _dump_buf_dif = 7610 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7611 if (_dump_buf_dif) { 7612 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7613 "9046 BLKGRD: allocated %d pages for " 7614 "_dump_buf_dif at 0x%p\n", 7615 (1 << pagecnt), _dump_buf_dif); 7616 _dump_buf_dif_order = pagecnt; 7617 memset(_dump_buf_dif, 0, 7618 ((1 << PAGE_SHIFT) << pagecnt)); 7619 break; 7620 } else 7621 --pagecnt; 7622 } 7623 if (!_dump_buf_dif_order) 7624 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7625 "9047 BLKGRD: ERROR unable to allocate " 7626 "memory for hexdump\n"); 7627 } else 7628 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7629 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 7630 _dump_buf_dif); 7631 } 7632 7633 /** 7634 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7635 * @phba: pointer to lpfc hba data structure. 7636 * 7637 * This routine is invoked to perform all the necessary post initialization 7638 * setup for the device. 7639 **/ 7640 static void 7641 lpfc_post_init_setup(struct lpfc_hba *phba) 7642 { 7643 struct Scsi_Host *shost; 7644 struct lpfc_adapter_event_header adapter_event; 7645 7646 /* Get the default values for Model Name and Description */ 7647 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7648 7649 /* 7650 * hba setup may have changed the hba_queue_depth so we need to 7651 * adjust the value of can_queue. 7652 */ 7653 shost = pci_get_drvdata(phba->pcidev); 7654 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7655 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 7656 lpfc_setup_bg(phba, shost); 7657 7658 lpfc_host_attrib_init(shost); 7659 7660 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7661 spin_lock_irq(shost->host_lock); 7662 lpfc_poll_start_timer(phba); 7663 spin_unlock_irq(shost->host_lock); 7664 } 7665 7666 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7667 "0428 Perform SCSI scan\n"); 7668 /* Send board arrival event to upper layer */ 7669 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7670 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7671 fc_host_post_vendor_event(shost, fc_get_event_number(), 7672 sizeof(adapter_event), 7673 (char *) &adapter_event, 7674 LPFC_NL_VENDOR_ID); 7675 return; 7676 } 7677 7678 /** 7679 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7680 * @phba: pointer to lpfc hba data structure. 7681 * 7682 * This routine is invoked to set up the PCI device memory space for device 7683 * with SLI-3 interface spec. 7684 * 7685 * Return codes 7686 * 0 - successful 7687 * other values - error 7688 **/ 7689 static int 7690 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7691 { 7692 struct pci_dev *pdev = phba->pcidev; 7693 unsigned long bar0map_len, bar2map_len; 7694 int i, hbq_count; 7695 void *ptr; 7696 int error; 7697 7698 if (!pdev) 7699 return -ENODEV; 7700 7701 /* Set the device DMA mask size */ 7702 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7703 if (error) 7704 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7705 if (error) 7706 return error; 7707 error = -ENODEV; 7708 7709 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7710 * required by each mapping. 7711 */ 7712 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7713 bar0map_len = pci_resource_len(pdev, 0); 7714 7715 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7716 bar2map_len = pci_resource_len(pdev, 2); 7717 7718 /* Map HBA SLIM to a kernel virtual address. */ 7719 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7720 if (!phba->slim_memmap_p) { 7721 dev_printk(KERN_ERR, &pdev->dev, 7722 "ioremap failed for SLIM memory.\n"); 7723 goto out; 7724 } 7725 7726 /* Map HBA Control Registers to a kernel virtual address. */ 7727 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7728 if (!phba->ctrl_regs_memmap_p) { 7729 dev_printk(KERN_ERR, &pdev->dev, 7730 "ioremap failed for HBA control registers.\n"); 7731 goto out_iounmap_slim; 7732 } 7733 7734 /* Allocate memory for SLI-2 structures */ 7735 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7736 &phba->slim2p.phys, GFP_KERNEL); 7737 if (!phba->slim2p.virt) 7738 goto out_iounmap; 7739 7740 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7741 phba->mbox_ext = (phba->slim2p.virt + 7742 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7743 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7744 phba->IOCBs = (phba->slim2p.virt + 7745 offsetof(struct lpfc_sli2_slim, IOCBs)); 7746 7747 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7748 lpfc_sli_hbq_size(), 7749 &phba->hbqslimp.phys, 7750 GFP_KERNEL); 7751 if (!phba->hbqslimp.virt) 7752 goto out_free_slim; 7753 7754 hbq_count = lpfc_sli_hbq_count(); 7755 ptr = phba->hbqslimp.virt; 7756 for (i = 0; i < hbq_count; ++i) { 7757 phba->hbqs[i].hbq_virt = ptr; 7758 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7759 ptr += (lpfc_hbq_defs[i]->entry_count * 7760 sizeof(struct lpfc_hbq_entry)); 7761 } 7762 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7763 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7764 7765 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7766 7767 phba->MBslimaddr = phba->slim_memmap_p; 7768 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7769 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7770 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7771 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7772 7773 return 0; 7774 7775 out_free_slim: 7776 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7777 phba->slim2p.virt, phba->slim2p.phys); 7778 out_iounmap: 7779 iounmap(phba->ctrl_regs_memmap_p); 7780 out_iounmap_slim: 7781 iounmap(phba->slim_memmap_p); 7782 out: 7783 return error; 7784 } 7785 7786 /** 7787 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7788 * @phba: pointer to lpfc hba data structure. 7789 * 7790 * This routine is invoked to unset the PCI device memory space for device 7791 * with SLI-3 interface spec. 7792 **/ 7793 static void 7794 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7795 { 7796 struct pci_dev *pdev; 7797 7798 /* Obtain PCI device reference */ 7799 if (!phba->pcidev) 7800 return; 7801 else 7802 pdev = phba->pcidev; 7803 7804 /* Free coherent DMA memory allocated */ 7805 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7806 phba->hbqslimp.virt, phba->hbqslimp.phys); 7807 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7808 phba->slim2p.virt, phba->slim2p.phys); 7809 7810 /* I/O memory unmap */ 7811 iounmap(phba->ctrl_regs_memmap_p); 7812 iounmap(phba->slim_memmap_p); 7813 7814 return; 7815 } 7816 7817 /** 7818 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7819 * @phba: pointer to lpfc hba data structure. 7820 * 7821 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7822 * done and check status. 7823 * 7824 * Return 0 if successful, otherwise -ENODEV. 7825 **/ 7826 int 7827 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7828 { 7829 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7830 struct lpfc_register reg_data; 7831 int i, port_error = 0; 7832 uint32_t if_type; 7833 7834 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7835 memset(®_data, 0, sizeof(reg_data)); 7836 if (!phba->sli4_hba.PSMPHRregaddr) 7837 return -ENODEV; 7838 7839 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7840 for (i = 0; i < 3000; i++) { 7841 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7842 &portsmphr_reg.word0) || 7843 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7844 /* Port has a fatal POST error, break out */ 7845 port_error = -ENODEV; 7846 break; 7847 } 7848 if (LPFC_POST_STAGE_PORT_READY == 7849 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7850 break; 7851 msleep(10); 7852 } 7853 7854 /* 7855 * If there was a port error during POST, then don't proceed with 7856 * other register reads as the data may not be valid. Just exit. 7857 */ 7858 if (port_error) { 7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7860 "1408 Port Failed POST - portsmphr=0x%x, " 7861 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7862 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7863 portsmphr_reg.word0, 7864 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7865 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7866 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7867 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7868 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7869 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7870 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7871 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7872 } else { 7873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7874 "2534 Device Info: SLIFamily=0x%x, " 7875 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7876 "SLIHint_2=0x%x, FT=0x%x\n", 7877 bf_get(lpfc_sli_intf_sli_family, 7878 &phba->sli4_hba.sli_intf), 7879 bf_get(lpfc_sli_intf_slirev, 7880 &phba->sli4_hba.sli_intf), 7881 bf_get(lpfc_sli_intf_if_type, 7882 &phba->sli4_hba.sli_intf), 7883 bf_get(lpfc_sli_intf_sli_hint1, 7884 &phba->sli4_hba.sli_intf), 7885 bf_get(lpfc_sli_intf_sli_hint2, 7886 &phba->sli4_hba.sli_intf), 7887 bf_get(lpfc_sli_intf_func_type, 7888 &phba->sli4_hba.sli_intf)); 7889 /* 7890 * Check for other Port errors during the initialization 7891 * process. Fail the load if the port did not come up 7892 * correctly. 7893 */ 7894 if_type = bf_get(lpfc_sli_intf_if_type, 7895 &phba->sli4_hba.sli_intf); 7896 switch (if_type) { 7897 case LPFC_SLI_INTF_IF_TYPE_0: 7898 phba->sli4_hba.ue_mask_lo = 7899 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7900 phba->sli4_hba.ue_mask_hi = 7901 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7902 uerrlo_reg.word0 = 7903 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7904 uerrhi_reg.word0 = 7905 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7906 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7907 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7909 "1422 Unrecoverable Error " 7910 "Detected during POST " 7911 "uerr_lo_reg=0x%x, " 7912 "uerr_hi_reg=0x%x, " 7913 "ue_mask_lo_reg=0x%x, " 7914 "ue_mask_hi_reg=0x%x\n", 7915 uerrlo_reg.word0, 7916 uerrhi_reg.word0, 7917 phba->sli4_hba.ue_mask_lo, 7918 phba->sli4_hba.ue_mask_hi); 7919 port_error = -ENODEV; 7920 } 7921 break; 7922 case LPFC_SLI_INTF_IF_TYPE_2: 7923 case LPFC_SLI_INTF_IF_TYPE_6: 7924 /* Final checks. The port status should be clean. */ 7925 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7926 ®_data.word0) || 7927 (bf_get(lpfc_sliport_status_err, ®_data) && 7928 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7929 phba->work_status[0] = 7930 readl(phba->sli4_hba.u.if_type2. 7931 ERR1regaddr); 7932 phba->work_status[1] = 7933 readl(phba->sli4_hba.u.if_type2. 7934 ERR2regaddr); 7935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7936 "2888 Unrecoverable port error " 7937 "following POST: port status reg " 7938 "0x%x, port_smphr reg 0x%x, " 7939 "error 1=0x%x, error 2=0x%x\n", 7940 reg_data.word0, 7941 portsmphr_reg.word0, 7942 phba->work_status[0], 7943 phba->work_status[1]); 7944 port_error = -ENODEV; 7945 } 7946 break; 7947 case LPFC_SLI_INTF_IF_TYPE_1: 7948 default: 7949 break; 7950 } 7951 } 7952 return port_error; 7953 } 7954 7955 /** 7956 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7957 * @phba: pointer to lpfc hba data structure. 7958 * @if_type: The SLI4 interface type getting configured. 7959 * 7960 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7961 * memory map. 7962 **/ 7963 static void 7964 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7965 { 7966 switch (if_type) { 7967 case LPFC_SLI_INTF_IF_TYPE_0: 7968 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7969 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7970 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7971 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7972 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7973 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7974 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7975 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7976 phba->sli4_hba.SLIINTFregaddr = 7977 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7978 break; 7979 case LPFC_SLI_INTF_IF_TYPE_2: 7980 phba->sli4_hba.u.if_type2.EQDregaddr = 7981 phba->sli4_hba.conf_regs_memmap_p + 7982 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 7983 phba->sli4_hba.u.if_type2.ERR1regaddr = 7984 phba->sli4_hba.conf_regs_memmap_p + 7985 LPFC_CTL_PORT_ER1_OFFSET; 7986 phba->sli4_hba.u.if_type2.ERR2regaddr = 7987 phba->sli4_hba.conf_regs_memmap_p + 7988 LPFC_CTL_PORT_ER2_OFFSET; 7989 phba->sli4_hba.u.if_type2.CTRLregaddr = 7990 phba->sli4_hba.conf_regs_memmap_p + 7991 LPFC_CTL_PORT_CTL_OFFSET; 7992 phba->sli4_hba.u.if_type2.STATUSregaddr = 7993 phba->sli4_hba.conf_regs_memmap_p + 7994 LPFC_CTL_PORT_STA_OFFSET; 7995 phba->sli4_hba.SLIINTFregaddr = 7996 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7997 phba->sli4_hba.PSMPHRregaddr = 7998 phba->sli4_hba.conf_regs_memmap_p + 7999 LPFC_CTL_PORT_SEM_OFFSET; 8000 phba->sli4_hba.RQDBregaddr = 8001 phba->sli4_hba.conf_regs_memmap_p + 8002 LPFC_ULP0_RQ_DOORBELL; 8003 phba->sli4_hba.WQDBregaddr = 8004 phba->sli4_hba.conf_regs_memmap_p + 8005 LPFC_ULP0_WQ_DOORBELL; 8006 phba->sli4_hba.CQDBregaddr = 8007 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8008 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8009 phba->sli4_hba.MQDBregaddr = 8010 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8011 phba->sli4_hba.BMBXregaddr = 8012 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8013 break; 8014 case LPFC_SLI_INTF_IF_TYPE_6: 8015 phba->sli4_hba.u.if_type2.EQDregaddr = 8016 phba->sli4_hba.conf_regs_memmap_p + 8017 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8018 phba->sli4_hba.u.if_type2.ERR1regaddr = 8019 phba->sli4_hba.conf_regs_memmap_p + 8020 LPFC_CTL_PORT_ER1_OFFSET; 8021 phba->sli4_hba.u.if_type2.ERR2regaddr = 8022 phba->sli4_hba.conf_regs_memmap_p + 8023 LPFC_CTL_PORT_ER2_OFFSET; 8024 phba->sli4_hba.u.if_type2.CTRLregaddr = 8025 phba->sli4_hba.conf_regs_memmap_p + 8026 LPFC_CTL_PORT_CTL_OFFSET; 8027 phba->sli4_hba.u.if_type2.STATUSregaddr = 8028 phba->sli4_hba.conf_regs_memmap_p + 8029 LPFC_CTL_PORT_STA_OFFSET; 8030 phba->sli4_hba.PSMPHRregaddr = 8031 phba->sli4_hba.conf_regs_memmap_p + 8032 LPFC_CTL_PORT_SEM_OFFSET; 8033 phba->sli4_hba.BMBXregaddr = 8034 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8035 break; 8036 case LPFC_SLI_INTF_IF_TYPE_1: 8037 default: 8038 dev_printk(KERN_ERR, &phba->pcidev->dev, 8039 "FATAL - unsupported SLI4 interface type - %d\n", 8040 if_type); 8041 break; 8042 } 8043 } 8044 8045 /** 8046 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8047 * @phba: pointer to lpfc hba data structure. 8048 * 8049 * This routine is invoked to set up SLI4 BAR1 register memory map. 8050 **/ 8051 static void 8052 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8053 { 8054 switch (if_type) { 8055 case LPFC_SLI_INTF_IF_TYPE_0: 8056 phba->sli4_hba.PSMPHRregaddr = 8057 phba->sli4_hba.ctrl_regs_memmap_p + 8058 LPFC_SLIPORT_IF0_SMPHR; 8059 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8060 LPFC_HST_ISR0; 8061 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8062 LPFC_HST_IMR0; 8063 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8064 LPFC_HST_ISCR0; 8065 break; 8066 case LPFC_SLI_INTF_IF_TYPE_6: 8067 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8068 LPFC_IF6_RQ_DOORBELL; 8069 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8070 LPFC_IF6_WQ_DOORBELL; 8071 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8072 LPFC_IF6_CQ_DOORBELL; 8073 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8074 LPFC_IF6_EQ_DOORBELL; 8075 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8076 LPFC_IF6_MQ_DOORBELL; 8077 break; 8078 case LPFC_SLI_INTF_IF_TYPE_2: 8079 case LPFC_SLI_INTF_IF_TYPE_1: 8080 default: 8081 dev_err(&phba->pcidev->dev, 8082 "FATAL - unsupported SLI4 interface type - %d\n", 8083 if_type); 8084 break; 8085 } 8086 } 8087 8088 /** 8089 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8090 * @phba: pointer to lpfc hba data structure. 8091 * @vf: virtual function number 8092 * 8093 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8094 * based on the given viftual function number, @vf. 8095 * 8096 * Return 0 if successful, otherwise -ENODEV. 8097 **/ 8098 static int 8099 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8100 { 8101 if (vf > LPFC_VIR_FUNC_MAX) 8102 return -ENODEV; 8103 8104 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8105 vf * LPFC_VFR_PAGE_SIZE + 8106 LPFC_ULP0_RQ_DOORBELL); 8107 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8108 vf * LPFC_VFR_PAGE_SIZE + 8109 LPFC_ULP0_WQ_DOORBELL); 8110 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8111 vf * LPFC_VFR_PAGE_SIZE + 8112 LPFC_EQCQ_DOORBELL); 8113 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8114 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8115 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8116 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8117 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8118 return 0; 8119 } 8120 8121 /** 8122 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8123 * @phba: pointer to lpfc hba data structure. 8124 * 8125 * This routine is invoked to create the bootstrap mailbox 8126 * region consistent with the SLI-4 interface spec. This 8127 * routine allocates all memory necessary to communicate 8128 * mailbox commands to the port and sets up all alignment 8129 * needs. No locks are expected to be held when calling 8130 * this routine. 8131 * 8132 * Return codes 8133 * 0 - successful 8134 * -ENOMEM - could not allocated memory. 8135 **/ 8136 static int 8137 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8138 { 8139 uint32_t bmbx_size; 8140 struct lpfc_dmabuf *dmabuf; 8141 struct dma_address *dma_address; 8142 uint32_t pa_addr; 8143 uint64_t phys_addr; 8144 8145 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8146 if (!dmabuf) 8147 return -ENOMEM; 8148 8149 /* 8150 * The bootstrap mailbox region is comprised of 2 parts 8151 * plus an alignment restriction of 16 bytes. 8152 */ 8153 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8154 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8155 &dmabuf->phys, GFP_KERNEL); 8156 if (!dmabuf->virt) { 8157 kfree(dmabuf); 8158 return -ENOMEM; 8159 } 8160 8161 /* 8162 * Initialize the bootstrap mailbox pointers now so that the register 8163 * operations are simple later. The mailbox dma address is required 8164 * to be 16-byte aligned. Also align the virtual memory as each 8165 * maibox is copied into the bmbx mailbox region before issuing the 8166 * command to the port. 8167 */ 8168 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8169 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8170 8171 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8172 LPFC_ALIGN_16_BYTE); 8173 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8174 LPFC_ALIGN_16_BYTE); 8175 8176 /* 8177 * Set the high and low physical addresses now. The SLI4 alignment 8178 * requirement is 16 bytes and the mailbox is posted to the port 8179 * as two 30-bit addresses. The other data is a bit marking whether 8180 * the 30-bit address is the high or low address. 8181 * Upcast bmbx aphys to 64bits so shift instruction compiles 8182 * clean on 32 bit machines. 8183 */ 8184 dma_address = &phba->sli4_hba.bmbx.dma_address; 8185 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8186 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8187 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8188 LPFC_BMBX_BIT1_ADDR_HI); 8189 8190 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8191 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8192 LPFC_BMBX_BIT1_ADDR_LO); 8193 return 0; 8194 } 8195 8196 /** 8197 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8198 * @phba: pointer to lpfc hba data structure. 8199 * 8200 * This routine is invoked to teardown the bootstrap mailbox 8201 * region and release all host resources. This routine requires 8202 * the caller to ensure all mailbox commands recovered, no 8203 * additional mailbox comands are sent, and interrupts are disabled 8204 * before calling this routine. 8205 * 8206 **/ 8207 static void 8208 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8209 { 8210 dma_free_coherent(&phba->pcidev->dev, 8211 phba->sli4_hba.bmbx.bmbx_size, 8212 phba->sli4_hba.bmbx.dmabuf->virt, 8213 phba->sli4_hba.bmbx.dmabuf->phys); 8214 8215 kfree(phba->sli4_hba.bmbx.dmabuf); 8216 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8217 } 8218 8219 /** 8220 * lpfc_sli4_read_config - Get the config parameters. 8221 * @phba: pointer to lpfc hba data structure. 8222 * 8223 * This routine is invoked to read the configuration parameters from the HBA. 8224 * The configuration parameters are used to set the base and maximum values 8225 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8226 * allocation for the port. 8227 * 8228 * Return codes 8229 * 0 - successful 8230 * -ENOMEM - No available memory 8231 * -EIO - The mailbox failed to complete successfully. 8232 **/ 8233 int 8234 lpfc_sli4_read_config(struct lpfc_hba *phba) 8235 { 8236 LPFC_MBOXQ_t *pmb; 8237 struct lpfc_mbx_read_config *rd_config; 8238 union lpfc_sli4_cfg_shdr *shdr; 8239 uint32_t shdr_status, shdr_add_status; 8240 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8241 struct lpfc_rsrc_desc_fcfcoe *desc; 8242 char *pdesc_0; 8243 uint16_t forced_link_speed; 8244 uint32_t if_type, qmin; 8245 int length, i, rc = 0, rc2; 8246 8247 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8248 if (!pmb) { 8249 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8250 "2011 Unable to allocate memory for issuing " 8251 "SLI_CONFIG_SPECIAL mailbox command\n"); 8252 return -ENOMEM; 8253 } 8254 8255 lpfc_read_config(phba, pmb); 8256 8257 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8258 if (rc != MBX_SUCCESS) { 8259 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8260 "2012 Mailbox failed , mbxCmd x%x " 8261 "READ_CONFIG, mbxStatus x%x\n", 8262 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8263 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8264 rc = -EIO; 8265 } else { 8266 rd_config = &pmb->u.mqe.un.rd_config; 8267 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8268 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8269 phba->sli4_hba.lnk_info.lnk_tp = 8270 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8271 phba->sli4_hba.lnk_info.lnk_no = 8272 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8273 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8274 "3081 lnk_type:%d, lnk_numb:%d\n", 8275 phba->sli4_hba.lnk_info.lnk_tp, 8276 phba->sli4_hba.lnk_info.lnk_no); 8277 } else 8278 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8279 "3082 Mailbox (x%x) returned ldv:x0\n", 8280 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8281 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8282 phba->bbcredit_support = 1; 8283 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8284 } 8285 8286 phba->sli4_hba.conf_trunk = 8287 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8288 phba->sli4_hba.extents_in_use = 8289 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8290 phba->sli4_hba.max_cfg_param.max_xri = 8291 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8292 phba->sli4_hba.max_cfg_param.xri_base = 8293 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8294 phba->sli4_hba.max_cfg_param.max_vpi = 8295 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8296 /* Limit the max we support */ 8297 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8298 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8299 phba->sli4_hba.max_cfg_param.vpi_base = 8300 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8301 phba->sli4_hba.max_cfg_param.max_rpi = 8302 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8303 phba->sli4_hba.max_cfg_param.rpi_base = 8304 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8305 phba->sli4_hba.max_cfg_param.max_vfi = 8306 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8307 phba->sli4_hba.max_cfg_param.vfi_base = 8308 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8309 phba->sli4_hba.max_cfg_param.max_fcfi = 8310 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8311 phba->sli4_hba.max_cfg_param.max_eq = 8312 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8313 phba->sli4_hba.max_cfg_param.max_rq = 8314 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8315 phba->sli4_hba.max_cfg_param.max_wq = 8316 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8317 phba->sli4_hba.max_cfg_param.max_cq = 8318 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8319 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8320 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8321 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8322 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8323 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8324 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8325 phba->max_vports = phba->max_vpi; 8326 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8327 "2003 cfg params Extents? %d " 8328 "XRI(B:%d M:%d), " 8329 "VPI(B:%d M:%d) " 8330 "VFI(B:%d M:%d) " 8331 "RPI(B:%d M:%d) " 8332 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8333 phba->sli4_hba.extents_in_use, 8334 phba->sli4_hba.max_cfg_param.xri_base, 8335 phba->sli4_hba.max_cfg_param.max_xri, 8336 phba->sli4_hba.max_cfg_param.vpi_base, 8337 phba->sli4_hba.max_cfg_param.max_vpi, 8338 phba->sli4_hba.max_cfg_param.vfi_base, 8339 phba->sli4_hba.max_cfg_param.max_vfi, 8340 phba->sli4_hba.max_cfg_param.rpi_base, 8341 phba->sli4_hba.max_cfg_param.max_rpi, 8342 phba->sli4_hba.max_cfg_param.max_fcfi, 8343 phba->sli4_hba.max_cfg_param.max_eq, 8344 phba->sli4_hba.max_cfg_param.max_cq, 8345 phba->sli4_hba.max_cfg_param.max_wq, 8346 phba->sli4_hba.max_cfg_param.max_rq); 8347 8348 /* 8349 * Calculate queue resources based on how 8350 * many WQ/CQ/EQs are available. 8351 */ 8352 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8353 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8354 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8355 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8356 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8357 /* 8358 * Whats left after this can go toward NVME / FCP. 8359 * The minus 4 accounts for ELS, NVME LS, MBOX 8360 * plus one extra. When configured for 8361 * NVMET, FCP io channel WQs are not created. 8362 */ 8363 qmin -= 4; 8364 8365 /* If NVME is configured, double the number of CQ/WQs needed */ 8366 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 8367 !phba->nvmet_support) 8368 qmin /= 2; 8369 8370 /* Check to see if there is enough for NVME */ 8371 if ((phba->cfg_irq_chann > qmin) || 8372 (phba->cfg_hdw_queue > qmin)) { 8373 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8374 "2005 Reducing Queues: " 8375 "WQ %d CQ %d EQ %d: min %d: " 8376 "IRQ %d HDWQ %d\n", 8377 phba->sli4_hba.max_cfg_param.max_wq, 8378 phba->sli4_hba.max_cfg_param.max_cq, 8379 phba->sli4_hba.max_cfg_param.max_eq, 8380 qmin, phba->cfg_irq_chann, 8381 phba->cfg_hdw_queue); 8382 8383 if (phba->cfg_irq_chann > qmin) 8384 phba->cfg_irq_chann = qmin; 8385 if (phba->cfg_hdw_queue > qmin) 8386 phba->cfg_hdw_queue = qmin; 8387 } 8388 } 8389 8390 if (rc) 8391 goto read_cfg_out; 8392 8393 /* Update link speed if forced link speed is supported */ 8394 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8395 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8396 forced_link_speed = 8397 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8398 if (forced_link_speed) { 8399 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8400 8401 switch (forced_link_speed) { 8402 case LINK_SPEED_1G: 8403 phba->cfg_link_speed = 8404 LPFC_USER_LINK_SPEED_1G; 8405 break; 8406 case LINK_SPEED_2G: 8407 phba->cfg_link_speed = 8408 LPFC_USER_LINK_SPEED_2G; 8409 break; 8410 case LINK_SPEED_4G: 8411 phba->cfg_link_speed = 8412 LPFC_USER_LINK_SPEED_4G; 8413 break; 8414 case LINK_SPEED_8G: 8415 phba->cfg_link_speed = 8416 LPFC_USER_LINK_SPEED_8G; 8417 break; 8418 case LINK_SPEED_10G: 8419 phba->cfg_link_speed = 8420 LPFC_USER_LINK_SPEED_10G; 8421 break; 8422 case LINK_SPEED_16G: 8423 phba->cfg_link_speed = 8424 LPFC_USER_LINK_SPEED_16G; 8425 break; 8426 case LINK_SPEED_32G: 8427 phba->cfg_link_speed = 8428 LPFC_USER_LINK_SPEED_32G; 8429 break; 8430 case LINK_SPEED_64G: 8431 phba->cfg_link_speed = 8432 LPFC_USER_LINK_SPEED_64G; 8433 break; 8434 case 0xffff: 8435 phba->cfg_link_speed = 8436 LPFC_USER_LINK_SPEED_AUTO; 8437 break; 8438 default: 8439 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8440 "0047 Unrecognized link " 8441 "speed : %d\n", 8442 forced_link_speed); 8443 phba->cfg_link_speed = 8444 LPFC_USER_LINK_SPEED_AUTO; 8445 } 8446 } 8447 } 8448 8449 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8450 length = phba->sli4_hba.max_cfg_param.max_xri - 8451 lpfc_sli4_get_els_iocb_cnt(phba); 8452 if (phba->cfg_hba_queue_depth > length) { 8453 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8454 "3361 HBA queue depth changed from %d to %d\n", 8455 phba->cfg_hba_queue_depth, length); 8456 phba->cfg_hba_queue_depth = length; 8457 } 8458 8459 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8460 LPFC_SLI_INTF_IF_TYPE_2) 8461 goto read_cfg_out; 8462 8463 /* get the pf# and vf# for SLI4 if_type 2 port */ 8464 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8465 sizeof(struct lpfc_sli4_cfg_mhdr)); 8466 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8467 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8468 length, LPFC_SLI4_MBX_EMBED); 8469 8470 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8471 shdr = (union lpfc_sli4_cfg_shdr *) 8472 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8473 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8474 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8475 if (rc2 || shdr_status || shdr_add_status) { 8476 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8477 "3026 Mailbox failed , mbxCmd x%x " 8478 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8479 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8480 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8481 goto read_cfg_out; 8482 } 8483 8484 /* search for fc_fcoe resrouce descriptor */ 8485 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8486 8487 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8488 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8489 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8490 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8491 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8492 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8493 goto read_cfg_out; 8494 8495 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8496 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8497 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8498 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8499 phba->sli4_hba.iov.pf_number = 8500 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8501 phba->sli4_hba.iov.vf_number = 8502 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8503 break; 8504 } 8505 } 8506 8507 if (i < LPFC_RSRC_DESC_MAX_NUM) 8508 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8509 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8510 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8511 phba->sli4_hba.iov.vf_number); 8512 else 8513 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8514 "3028 GET_FUNCTION_CONFIG: failed to find " 8515 "Resource Descriptor:x%x\n", 8516 LPFC_RSRC_DESC_TYPE_FCFCOE); 8517 8518 read_cfg_out: 8519 mempool_free(pmb, phba->mbox_mem_pool); 8520 return rc; 8521 } 8522 8523 /** 8524 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8525 * @phba: pointer to lpfc hba data structure. 8526 * 8527 * This routine is invoked to setup the port-side endian order when 8528 * the port if_type is 0. This routine has no function for other 8529 * if_types. 8530 * 8531 * Return codes 8532 * 0 - successful 8533 * -ENOMEM - No available memory 8534 * -EIO - The mailbox failed to complete successfully. 8535 **/ 8536 static int 8537 lpfc_setup_endian_order(struct lpfc_hba *phba) 8538 { 8539 LPFC_MBOXQ_t *mboxq; 8540 uint32_t if_type, rc = 0; 8541 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8542 HOST_ENDIAN_HIGH_WORD1}; 8543 8544 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8545 switch (if_type) { 8546 case LPFC_SLI_INTF_IF_TYPE_0: 8547 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8548 GFP_KERNEL); 8549 if (!mboxq) { 8550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8551 "0492 Unable to allocate memory for " 8552 "issuing SLI_CONFIG_SPECIAL mailbox " 8553 "command\n"); 8554 return -ENOMEM; 8555 } 8556 8557 /* 8558 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8559 * two words to contain special data values and no other data. 8560 */ 8561 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8562 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8563 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8564 if (rc != MBX_SUCCESS) { 8565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8566 "0493 SLI_CONFIG_SPECIAL mailbox " 8567 "failed with status x%x\n", 8568 rc); 8569 rc = -EIO; 8570 } 8571 mempool_free(mboxq, phba->mbox_mem_pool); 8572 break; 8573 case LPFC_SLI_INTF_IF_TYPE_6: 8574 case LPFC_SLI_INTF_IF_TYPE_2: 8575 case LPFC_SLI_INTF_IF_TYPE_1: 8576 default: 8577 break; 8578 } 8579 return rc; 8580 } 8581 8582 /** 8583 * lpfc_sli4_queue_verify - Verify and update EQ counts 8584 * @phba: pointer to lpfc hba data structure. 8585 * 8586 * This routine is invoked to check the user settable queue counts for EQs. 8587 * After this routine is called the counts will be set to valid values that 8588 * adhere to the constraints of the system's interrupt vectors and the port's 8589 * queue resources. 8590 * 8591 * Return codes 8592 * 0 - successful 8593 * -ENOMEM - No available memory 8594 **/ 8595 static int 8596 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8597 { 8598 /* 8599 * Sanity check for configured queue parameters against the run-time 8600 * device parameters 8601 */ 8602 8603 if (phba->nvmet_support) { 8604 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) 8605 phba->cfg_nvmet_mrq = phba->cfg_irq_chann; 8606 } 8607 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8608 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8609 8610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8611 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8612 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8613 phba->cfg_nvmet_mrq); 8614 8615 /* Get EQ depth from module parameter, fake the default for now */ 8616 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8617 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8618 8619 /* Get CQ depth from module parameter, fake the default for now */ 8620 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8621 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8622 return 0; 8623 } 8624 8625 static int 8626 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 8627 { 8628 struct lpfc_queue *qdesc; 8629 8630 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8631 phba->sli4_hba.cq_esize, 8632 LPFC_CQE_EXP_COUNT); 8633 if (!qdesc) { 8634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8635 "0508 Failed allocate fast-path NVME CQ (%d)\n", 8636 wqidx); 8637 return 1; 8638 } 8639 qdesc->qe_valid = 1; 8640 qdesc->hdwq = wqidx; 8641 qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8642 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; 8643 8644 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8645 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); 8646 if (!qdesc) { 8647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8648 "0509 Failed allocate fast-path NVME WQ (%d)\n", 8649 wqidx); 8650 return 1; 8651 } 8652 qdesc->hdwq = wqidx; 8653 qdesc->chann = wqidx; 8654 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; 8655 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8656 return 0; 8657 } 8658 8659 static int 8660 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 8661 { 8662 struct lpfc_queue *qdesc; 8663 uint32_t wqesize; 8664 8665 /* Create Fast Path FCP CQs */ 8666 if (phba->enab_exp_wqcq_pages) 8667 /* Increase the CQ size when WQEs contain an embedded cdb */ 8668 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8669 phba->sli4_hba.cq_esize, 8670 LPFC_CQE_EXP_COUNT); 8671 8672 else 8673 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8674 phba->sli4_hba.cq_esize, 8675 phba->sli4_hba.cq_ecount); 8676 if (!qdesc) { 8677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8678 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8679 return 1; 8680 } 8681 qdesc->qe_valid = 1; 8682 qdesc->hdwq = wqidx; 8683 qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8684 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; 8685 8686 /* Create Fast Path FCP WQs */ 8687 if (phba->enab_exp_wqcq_pages) { 8688 /* Increase the WQ size when WQEs contain an embedded cdb */ 8689 wqesize = (phba->fcp_embed_io) ? 8690 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8691 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8692 wqesize, 8693 LPFC_WQE_EXP_COUNT); 8694 } else 8695 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8696 phba->sli4_hba.wq_esize, 8697 phba->sli4_hba.wq_ecount); 8698 8699 if (!qdesc) { 8700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8701 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8702 wqidx); 8703 return 1; 8704 } 8705 qdesc->hdwq = wqidx; 8706 qdesc->chann = wqidx; 8707 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; 8708 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8709 return 0; 8710 } 8711 8712 /** 8713 * lpfc_sli4_queue_create - Create all the SLI4 queues 8714 * @phba: pointer to lpfc hba data structure. 8715 * 8716 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8717 * operation. For each SLI4 queue type, the parameters such as queue entry 8718 * count (queue depth) shall be taken from the module parameter. For now, 8719 * we just use some constant number as place holder. 8720 * 8721 * Return codes 8722 * 0 - successful 8723 * -ENOMEM - No availble memory 8724 * -EIO - The mailbox failed to complete successfully. 8725 **/ 8726 int 8727 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8728 { 8729 struct lpfc_queue *qdesc; 8730 int idx, eqidx; 8731 struct lpfc_sli4_hdw_queue *qp; 8732 struct lpfc_eq_intr_info *eqi; 8733 8734 /* 8735 * Create HBA Record arrays. 8736 * Both NVME and FCP will share that same vectors / EQs 8737 */ 8738 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8739 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8740 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8741 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8742 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8743 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8744 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8745 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8746 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8747 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8748 8749 if (!phba->sli4_hba.hdwq) { 8750 phba->sli4_hba.hdwq = kcalloc( 8751 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8752 GFP_KERNEL); 8753 if (!phba->sli4_hba.hdwq) { 8754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8755 "6427 Failed allocate memory for " 8756 "fast-path Hardware Queue array\n"); 8757 goto out_error; 8758 } 8759 /* Prepare hardware queues to take IO buffers */ 8760 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8761 qp = &phba->sli4_hba.hdwq[idx]; 8762 spin_lock_init(&qp->io_buf_list_get_lock); 8763 spin_lock_init(&qp->io_buf_list_put_lock); 8764 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8765 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8766 qp->get_io_bufs = 0; 8767 qp->put_io_bufs = 0; 8768 qp->total_io_bufs = 0; 8769 spin_lock_init(&qp->abts_scsi_buf_list_lock); 8770 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); 8771 qp->abts_scsi_io_bufs = 0; 8772 spin_lock_init(&qp->abts_nvme_buf_list_lock); 8773 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list); 8774 qp->abts_nvme_io_bufs = 0; 8775 } 8776 } 8777 8778 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8779 if (phba->nvmet_support) { 8780 phba->sli4_hba.nvmet_cqset = kcalloc( 8781 phba->cfg_nvmet_mrq, 8782 sizeof(struct lpfc_queue *), 8783 GFP_KERNEL); 8784 if (!phba->sli4_hba.nvmet_cqset) { 8785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8786 "3121 Fail allocate memory for " 8787 "fast-path CQ set array\n"); 8788 goto out_error; 8789 } 8790 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8791 phba->cfg_nvmet_mrq, 8792 sizeof(struct lpfc_queue *), 8793 GFP_KERNEL); 8794 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8796 "3122 Fail allocate memory for " 8797 "fast-path RQ set hdr array\n"); 8798 goto out_error; 8799 } 8800 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8801 phba->cfg_nvmet_mrq, 8802 sizeof(struct lpfc_queue *), 8803 GFP_KERNEL); 8804 if (!phba->sli4_hba.nvmet_mrq_data) { 8805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8806 "3124 Fail allocate memory for " 8807 "fast-path RQ set data array\n"); 8808 goto out_error; 8809 } 8810 } 8811 } 8812 8813 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8814 8815 /* Create HBA Event Queues (EQs) */ 8816 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8817 /* 8818 * If there are more Hardware Queues than available 8819 * CQs, multiple Hardware Queues may share a common EQ. 8820 */ 8821 if (idx >= phba->cfg_irq_chann) { 8822 /* Share an existing EQ */ 8823 eqidx = lpfc_find_eq_handle(phba, idx); 8824 phba->sli4_hba.hdwq[idx].hba_eq = 8825 phba->sli4_hba.hdwq[eqidx].hba_eq; 8826 continue; 8827 } 8828 /* Create an EQ */ 8829 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8830 phba->sli4_hba.eq_esize, 8831 phba->sli4_hba.eq_ecount); 8832 if (!qdesc) { 8833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8834 "0497 Failed allocate EQ (%d)\n", idx); 8835 goto out_error; 8836 } 8837 qdesc->qe_valid = 1; 8838 qdesc->hdwq = idx; 8839 8840 /* Save the CPU this EQ is affinitised to */ 8841 eqidx = lpfc_find_eq_handle(phba, idx); 8842 qdesc->chann = lpfc_find_cpu_handle(phba, eqidx, 8843 LPFC_FIND_BY_EQ); 8844 phba->sli4_hba.hdwq[idx].hba_eq = qdesc; 8845 qdesc->last_cpu = qdesc->chann; 8846 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8847 list_add(&qdesc->cpu_list, &eqi->list); 8848 } 8849 8850 8851 /* Allocate SCSI SLI4 CQ/WQs */ 8852 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8853 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8854 goto out_error; 8855 } 8856 8857 /* Allocate NVME SLI4 CQ/WQs */ 8858 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8859 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8860 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8861 goto out_error; 8862 } 8863 8864 if (phba->nvmet_support) { 8865 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8866 qdesc = lpfc_sli4_queue_alloc( 8867 phba, 8868 LPFC_DEFAULT_PAGE_SIZE, 8869 phba->sli4_hba.cq_esize, 8870 phba->sli4_hba.cq_ecount); 8871 if (!qdesc) { 8872 lpfc_printf_log( 8873 phba, KERN_ERR, LOG_INIT, 8874 "3142 Failed allocate NVME " 8875 "CQ Set (%d)\n", idx); 8876 goto out_error; 8877 } 8878 qdesc->qe_valid = 1; 8879 qdesc->hdwq = idx; 8880 qdesc->chann = idx; 8881 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8882 } 8883 } 8884 } 8885 8886 /* 8887 * Create Slow Path Completion Queues (CQs) 8888 */ 8889 8890 /* Create slow-path Mailbox Command Complete Queue */ 8891 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8892 phba->sli4_hba.cq_esize, 8893 phba->sli4_hba.cq_ecount); 8894 if (!qdesc) { 8895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8896 "0500 Failed allocate slow-path mailbox CQ\n"); 8897 goto out_error; 8898 } 8899 qdesc->qe_valid = 1; 8900 phba->sli4_hba.mbx_cq = qdesc; 8901 8902 /* Create slow-path ELS Complete Queue */ 8903 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8904 phba->sli4_hba.cq_esize, 8905 phba->sli4_hba.cq_ecount); 8906 if (!qdesc) { 8907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8908 "0501 Failed allocate slow-path ELS CQ\n"); 8909 goto out_error; 8910 } 8911 qdesc->qe_valid = 1; 8912 qdesc->chann = 0; 8913 phba->sli4_hba.els_cq = qdesc; 8914 8915 8916 /* 8917 * Create Slow Path Work Queues (WQs) 8918 */ 8919 8920 /* Create Mailbox Command Queue */ 8921 8922 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8923 phba->sli4_hba.mq_esize, 8924 phba->sli4_hba.mq_ecount); 8925 if (!qdesc) { 8926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8927 "0505 Failed allocate slow-path MQ\n"); 8928 goto out_error; 8929 } 8930 qdesc->chann = 0; 8931 phba->sli4_hba.mbx_wq = qdesc; 8932 8933 /* 8934 * Create ELS Work Queues 8935 */ 8936 8937 /* Create slow-path ELS Work Queue */ 8938 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8939 phba->sli4_hba.wq_esize, 8940 phba->sli4_hba.wq_ecount); 8941 if (!qdesc) { 8942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8943 "0504 Failed allocate slow-path ELS WQ\n"); 8944 goto out_error; 8945 } 8946 qdesc->chann = 0; 8947 phba->sli4_hba.els_wq = qdesc; 8948 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8949 8950 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8951 /* Create NVME LS Complete Queue */ 8952 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8953 phba->sli4_hba.cq_esize, 8954 phba->sli4_hba.cq_ecount); 8955 if (!qdesc) { 8956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8957 "6079 Failed allocate NVME LS CQ\n"); 8958 goto out_error; 8959 } 8960 qdesc->chann = 0; 8961 qdesc->qe_valid = 1; 8962 phba->sli4_hba.nvmels_cq = qdesc; 8963 8964 /* Create NVME LS Work Queue */ 8965 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8966 phba->sli4_hba.wq_esize, 8967 phba->sli4_hba.wq_ecount); 8968 if (!qdesc) { 8969 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8970 "6080 Failed allocate NVME LS WQ\n"); 8971 goto out_error; 8972 } 8973 qdesc->chann = 0; 8974 phba->sli4_hba.nvmels_wq = qdesc; 8975 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8976 } 8977 8978 /* 8979 * Create Receive Queue (RQ) 8980 */ 8981 8982 /* Create Receive Queue for header */ 8983 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8984 phba->sli4_hba.rq_esize, 8985 phba->sli4_hba.rq_ecount); 8986 if (!qdesc) { 8987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8988 "0506 Failed allocate receive HRQ\n"); 8989 goto out_error; 8990 } 8991 phba->sli4_hba.hdr_rq = qdesc; 8992 8993 /* Create Receive Queue for data */ 8994 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8995 phba->sli4_hba.rq_esize, 8996 phba->sli4_hba.rq_ecount); 8997 if (!qdesc) { 8998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8999 "0507 Failed allocate receive DRQ\n"); 9000 goto out_error; 9001 } 9002 phba->sli4_hba.dat_rq = qdesc; 9003 9004 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9005 phba->nvmet_support) { 9006 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9007 /* Create NVMET Receive Queue for header */ 9008 qdesc = lpfc_sli4_queue_alloc(phba, 9009 LPFC_DEFAULT_PAGE_SIZE, 9010 phba->sli4_hba.rq_esize, 9011 LPFC_NVMET_RQE_DEF_COUNT); 9012 if (!qdesc) { 9013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9014 "3146 Failed allocate " 9015 "receive HRQ\n"); 9016 goto out_error; 9017 } 9018 qdesc->hdwq = idx; 9019 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9020 9021 /* Only needed for header of RQ pair */ 9022 qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), 9023 GFP_KERNEL); 9024 if (qdesc->rqbp == NULL) { 9025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9026 "6131 Failed allocate " 9027 "Header RQBP\n"); 9028 goto out_error; 9029 } 9030 9031 /* Put list in known state in case driver load fails. */ 9032 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9033 9034 /* Create NVMET Receive Queue for data */ 9035 qdesc = lpfc_sli4_queue_alloc(phba, 9036 LPFC_DEFAULT_PAGE_SIZE, 9037 phba->sli4_hba.rq_esize, 9038 LPFC_NVMET_RQE_DEF_COUNT); 9039 if (!qdesc) { 9040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9041 "3156 Failed allocate " 9042 "receive DRQ\n"); 9043 goto out_error; 9044 } 9045 qdesc->hdwq = idx; 9046 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9047 } 9048 } 9049 9050 #if defined(BUILD_NVME) 9051 /* Clear NVME stats */ 9052 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9053 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9054 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9055 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9056 } 9057 } 9058 #endif 9059 9060 /* Clear SCSI stats */ 9061 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9062 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9063 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9064 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9065 } 9066 } 9067 9068 return 0; 9069 9070 out_error: 9071 lpfc_sli4_queue_destroy(phba); 9072 return -ENOMEM; 9073 } 9074 9075 static inline void 9076 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9077 { 9078 if (*qp != NULL) { 9079 lpfc_sli4_queue_free(*qp); 9080 *qp = NULL; 9081 } 9082 } 9083 9084 static inline void 9085 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9086 { 9087 int idx; 9088 9089 if (*qs == NULL) 9090 return; 9091 9092 for (idx = 0; idx < max; idx++) 9093 __lpfc_sli4_release_queue(&(*qs)[idx]); 9094 9095 kfree(*qs); 9096 *qs = NULL; 9097 } 9098 9099 static inline void 9100 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9101 { 9102 struct lpfc_sli4_hdw_queue *hdwq; 9103 uint32_t idx; 9104 9105 hdwq = phba->sli4_hba.hdwq; 9106 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9107 if (idx < phba->cfg_irq_chann) 9108 lpfc_sli4_queue_free(hdwq[idx].hba_eq); 9109 hdwq[idx].hba_eq = NULL; 9110 9111 lpfc_sli4_queue_free(hdwq[idx].fcp_cq); 9112 lpfc_sli4_queue_free(hdwq[idx].nvme_cq); 9113 lpfc_sli4_queue_free(hdwq[idx].fcp_wq); 9114 lpfc_sli4_queue_free(hdwq[idx].nvme_wq); 9115 hdwq[idx].fcp_cq = NULL; 9116 hdwq[idx].nvme_cq = NULL; 9117 hdwq[idx].fcp_wq = NULL; 9118 hdwq[idx].nvme_wq = NULL; 9119 } 9120 } 9121 9122 /** 9123 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9124 * @phba: pointer to lpfc hba data structure. 9125 * 9126 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9127 * operation. 9128 * 9129 * Return codes 9130 * 0 - successful 9131 * -ENOMEM - No available memory 9132 * -EIO - The mailbox failed to complete successfully. 9133 **/ 9134 void 9135 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9136 { 9137 /* Release HBA eqs */ 9138 if (phba->sli4_hba.hdwq) 9139 lpfc_sli4_release_hdwq(phba); 9140 9141 if (phba->nvmet_support) { 9142 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9143 phba->cfg_nvmet_mrq); 9144 9145 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9146 phba->cfg_nvmet_mrq); 9147 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9148 phba->cfg_nvmet_mrq); 9149 } 9150 9151 /* Release mailbox command work queue */ 9152 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9153 9154 /* Release ELS work queue */ 9155 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9156 9157 /* Release ELS work queue */ 9158 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9159 9160 /* Release unsolicited receive queue */ 9161 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9162 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9163 9164 /* Release ELS complete queue */ 9165 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9166 9167 /* Release NVME LS complete queue */ 9168 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9169 9170 /* Release mailbox command complete queue */ 9171 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9172 9173 /* Everything on this list has been freed */ 9174 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9175 } 9176 9177 int 9178 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9179 { 9180 struct lpfc_rqb *rqbp; 9181 struct lpfc_dmabuf *h_buf; 9182 struct rqb_dmabuf *rqb_buffer; 9183 9184 rqbp = rq->rqbp; 9185 while (!list_empty(&rqbp->rqb_buffer_list)) { 9186 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9187 struct lpfc_dmabuf, list); 9188 9189 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9190 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9191 rqbp->buffer_count--; 9192 } 9193 return 1; 9194 } 9195 9196 static int 9197 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9198 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9199 int qidx, uint32_t qtype) 9200 { 9201 struct lpfc_sli_ring *pring; 9202 int rc; 9203 9204 if (!eq || !cq || !wq) { 9205 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9206 "6085 Fast-path %s (%d) not allocated\n", 9207 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9208 return -ENOMEM; 9209 } 9210 9211 /* create the Cq first */ 9212 rc = lpfc_cq_create(phba, cq, eq, 9213 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9214 if (rc) { 9215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9216 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9217 qidx, (uint32_t)rc); 9218 return rc; 9219 } 9220 9221 if (qtype != LPFC_MBOX) { 9222 /* Setup cq_map for fast lookup */ 9223 if (cq_map) 9224 *cq_map = cq->queue_id; 9225 9226 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9227 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9228 qidx, cq->queue_id, qidx, eq->queue_id); 9229 9230 /* create the wq */ 9231 rc = lpfc_wq_create(phba, wq, cq, qtype); 9232 if (rc) { 9233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9234 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9235 qidx, (uint32_t)rc); 9236 /* no need to tear down cq - caller will do so */ 9237 return rc; 9238 } 9239 9240 /* Bind this CQ/WQ to the NVME ring */ 9241 pring = wq->pring; 9242 pring->sli.sli4.wqp = (void *)wq; 9243 cq->pring = pring; 9244 9245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9246 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9247 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9248 } else { 9249 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9250 if (rc) { 9251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9252 "0539 Failed setup of slow-path MQ: " 9253 "rc = 0x%x\n", rc); 9254 /* no need to tear down cq - caller will do so */ 9255 return rc; 9256 } 9257 9258 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9259 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9260 phba->sli4_hba.mbx_wq->queue_id, 9261 phba->sli4_hba.mbx_cq->queue_id); 9262 } 9263 9264 return 0; 9265 } 9266 9267 /** 9268 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9269 * @phba: pointer to lpfc hba data structure. 9270 * 9271 * This routine will populate the cq_lookup table by all 9272 * available CQ queue_id's. 9273 **/ 9274 void 9275 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9276 { 9277 struct lpfc_queue *eq, *childq; 9278 struct lpfc_sli4_hdw_queue *qp; 9279 int qidx; 9280 9281 qp = phba->sli4_hba.hdwq; 9282 memset(phba->sli4_hba.cq_lookup, 0, 9283 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9284 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9285 eq = qp[qidx].hba_eq; 9286 if (!eq) 9287 continue; 9288 list_for_each_entry(childq, &eq->child_list, list) { 9289 if (childq->queue_id > phba->sli4_hba.cq_max) 9290 continue; 9291 if ((childq->subtype == LPFC_FCP) || 9292 (childq->subtype == LPFC_NVME)) 9293 phba->sli4_hba.cq_lookup[childq->queue_id] = 9294 childq; 9295 } 9296 } 9297 } 9298 9299 /** 9300 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9301 * @phba: pointer to lpfc hba data structure. 9302 * 9303 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9304 * operation. 9305 * 9306 * Return codes 9307 * 0 - successful 9308 * -ENOMEM - No available memory 9309 * -EIO - The mailbox failed to complete successfully. 9310 **/ 9311 int 9312 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9313 { 9314 uint32_t shdr_status, shdr_add_status; 9315 union lpfc_sli4_cfg_shdr *shdr; 9316 struct lpfc_sli4_hdw_queue *qp; 9317 LPFC_MBOXQ_t *mboxq; 9318 int qidx; 9319 uint32_t length, usdelay; 9320 int rc = -ENOMEM; 9321 9322 /* Check for dual-ULP support */ 9323 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9324 if (!mboxq) { 9325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9326 "3249 Unable to allocate memory for " 9327 "QUERY_FW_CFG mailbox command\n"); 9328 return -ENOMEM; 9329 } 9330 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9331 sizeof(struct lpfc_sli4_cfg_mhdr)); 9332 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9333 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9334 length, LPFC_SLI4_MBX_EMBED); 9335 9336 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9337 9338 shdr = (union lpfc_sli4_cfg_shdr *) 9339 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9340 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9341 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9342 if (shdr_status || shdr_add_status || rc) { 9343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9344 "3250 QUERY_FW_CFG mailbox failed with status " 9345 "x%x add_status x%x, mbx status x%x\n", 9346 shdr_status, shdr_add_status, rc); 9347 if (rc != MBX_TIMEOUT) 9348 mempool_free(mboxq, phba->mbox_mem_pool); 9349 rc = -ENXIO; 9350 goto out_error; 9351 } 9352 9353 phba->sli4_hba.fw_func_mode = 9354 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9355 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9356 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9357 phba->sli4_hba.physical_port = 9358 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9360 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9361 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9362 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9363 9364 if (rc != MBX_TIMEOUT) 9365 mempool_free(mboxq, phba->mbox_mem_pool); 9366 9367 /* 9368 * Set up HBA Event Queues (EQs) 9369 */ 9370 qp = phba->sli4_hba.hdwq; 9371 9372 /* Set up HBA event queue */ 9373 if (!qp) { 9374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9375 "3147 Fast-path EQs not allocated\n"); 9376 rc = -ENOMEM; 9377 goto out_error; 9378 } 9379 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9380 if (!qp[qidx].hba_eq) { 9381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9382 "0522 Fast-path EQ (%d) not " 9383 "allocated\n", qidx); 9384 rc = -ENOMEM; 9385 goto out_destroy; 9386 } 9387 rc = lpfc_eq_create(phba, qp[qidx].hba_eq, 9388 phba->cfg_fcp_imax); 9389 if (rc) { 9390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9391 "0523 Failed setup of fast-path EQ " 9392 "(%d), rc = 0x%x\n", qidx, 9393 (uint32_t)rc); 9394 goto out_destroy; 9395 } 9396 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9397 "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx, 9398 qp[qidx].hba_eq->queue_id); 9399 } 9400 9401 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9402 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9403 rc = lpfc_create_wq_cq(phba, 9404 qp[qidx].hba_eq, 9405 qp[qidx].nvme_cq, 9406 qp[qidx].nvme_wq, 9407 &phba->sli4_hba.hdwq[qidx].nvme_cq_map, 9408 qidx, LPFC_NVME); 9409 if (rc) { 9410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9411 "6123 Failed to setup fastpath " 9412 "NVME WQ/CQ (%d), rc = 0x%x\n", 9413 qidx, (uint32_t)rc); 9414 goto out_destroy; 9415 } 9416 } 9417 } 9418 9419 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9420 rc = lpfc_create_wq_cq(phba, 9421 qp[qidx].hba_eq, 9422 qp[qidx].fcp_cq, 9423 qp[qidx].fcp_wq, 9424 &phba->sli4_hba.hdwq[qidx].fcp_cq_map, 9425 qidx, LPFC_FCP); 9426 if (rc) { 9427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9428 "0535 Failed to setup fastpath " 9429 "FCP WQ/CQ (%d), rc = 0x%x\n", 9430 qidx, (uint32_t)rc); 9431 goto out_destroy; 9432 } 9433 } 9434 9435 /* 9436 * Set up Slow Path Complete Queues (CQs) 9437 */ 9438 9439 /* Set up slow-path MBOX CQ/MQ */ 9440 9441 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9443 "0528 %s not allocated\n", 9444 phba->sli4_hba.mbx_cq ? 9445 "Mailbox WQ" : "Mailbox CQ"); 9446 rc = -ENOMEM; 9447 goto out_destroy; 9448 } 9449 9450 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9451 phba->sli4_hba.mbx_cq, 9452 phba->sli4_hba.mbx_wq, 9453 NULL, 0, LPFC_MBOX); 9454 if (rc) { 9455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9456 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9457 (uint32_t)rc); 9458 goto out_destroy; 9459 } 9460 if (phba->nvmet_support) { 9461 if (!phba->sli4_hba.nvmet_cqset) { 9462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9463 "3165 Fast-path NVME CQ Set " 9464 "array not allocated\n"); 9465 rc = -ENOMEM; 9466 goto out_destroy; 9467 } 9468 if (phba->cfg_nvmet_mrq > 1) { 9469 rc = lpfc_cq_create_set(phba, 9470 phba->sli4_hba.nvmet_cqset, 9471 qp, 9472 LPFC_WCQ, LPFC_NVMET); 9473 if (rc) { 9474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9475 "3164 Failed setup of NVME CQ " 9476 "Set, rc = 0x%x\n", 9477 (uint32_t)rc); 9478 goto out_destroy; 9479 } 9480 } else { 9481 /* Set up NVMET Receive Complete Queue */ 9482 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9483 qp[0].hba_eq, 9484 LPFC_WCQ, LPFC_NVMET); 9485 if (rc) { 9486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9487 "6089 Failed setup NVMET CQ: " 9488 "rc = 0x%x\n", (uint32_t)rc); 9489 goto out_destroy; 9490 } 9491 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9492 9493 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9494 "6090 NVMET CQ setup: cq-id=%d, " 9495 "parent eq-id=%d\n", 9496 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9497 qp[0].hba_eq->queue_id); 9498 } 9499 } 9500 9501 /* Set up slow-path ELS WQ/CQ */ 9502 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9504 "0530 ELS %s not allocated\n", 9505 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9506 rc = -ENOMEM; 9507 goto out_destroy; 9508 } 9509 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9510 phba->sli4_hba.els_cq, 9511 phba->sli4_hba.els_wq, 9512 NULL, 0, LPFC_ELS); 9513 if (rc) { 9514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9515 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9516 (uint32_t)rc); 9517 goto out_destroy; 9518 } 9519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9520 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9521 phba->sli4_hba.els_wq->queue_id, 9522 phba->sli4_hba.els_cq->queue_id); 9523 9524 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9525 /* Set up NVME LS Complete Queue */ 9526 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9528 "6091 LS %s not allocated\n", 9529 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9530 rc = -ENOMEM; 9531 goto out_destroy; 9532 } 9533 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9534 phba->sli4_hba.nvmels_cq, 9535 phba->sli4_hba.nvmels_wq, 9536 NULL, 0, LPFC_NVME_LS); 9537 if (rc) { 9538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9539 "0526 Failed setup of NVVME LS WQ/CQ: " 9540 "rc = 0x%x\n", (uint32_t)rc); 9541 goto out_destroy; 9542 } 9543 9544 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9545 "6096 ELS WQ setup: wq-id=%d, " 9546 "parent cq-id=%d\n", 9547 phba->sli4_hba.nvmels_wq->queue_id, 9548 phba->sli4_hba.nvmels_cq->queue_id); 9549 } 9550 9551 /* 9552 * Create NVMET Receive Queue (RQ) 9553 */ 9554 if (phba->nvmet_support) { 9555 if ((!phba->sli4_hba.nvmet_cqset) || 9556 (!phba->sli4_hba.nvmet_mrq_hdr) || 9557 (!phba->sli4_hba.nvmet_mrq_data)) { 9558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9559 "6130 MRQ CQ Queues not " 9560 "allocated\n"); 9561 rc = -ENOMEM; 9562 goto out_destroy; 9563 } 9564 if (phba->cfg_nvmet_mrq > 1) { 9565 rc = lpfc_mrq_create(phba, 9566 phba->sli4_hba.nvmet_mrq_hdr, 9567 phba->sli4_hba.nvmet_mrq_data, 9568 phba->sli4_hba.nvmet_cqset, 9569 LPFC_NVMET); 9570 if (rc) { 9571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9572 "6098 Failed setup of NVMET " 9573 "MRQ: rc = 0x%x\n", 9574 (uint32_t)rc); 9575 goto out_destroy; 9576 } 9577 9578 } else { 9579 rc = lpfc_rq_create(phba, 9580 phba->sli4_hba.nvmet_mrq_hdr[0], 9581 phba->sli4_hba.nvmet_mrq_data[0], 9582 phba->sli4_hba.nvmet_cqset[0], 9583 LPFC_NVMET); 9584 if (rc) { 9585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9586 "6057 Failed setup of NVMET " 9587 "Receive Queue: rc = 0x%x\n", 9588 (uint32_t)rc); 9589 goto out_destroy; 9590 } 9591 9592 lpfc_printf_log( 9593 phba, KERN_INFO, LOG_INIT, 9594 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9595 "dat-rq-id=%d parent cq-id=%d\n", 9596 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9597 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9598 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9599 9600 } 9601 } 9602 9603 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9605 "0540 Receive Queue not allocated\n"); 9606 rc = -ENOMEM; 9607 goto out_destroy; 9608 } 9609 9610 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9611 phba->sli4_hba.els_cq, LPFC_USOL); 9612 if (rc) { 9613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9614 "0541 Failed setup of Receive Queue: " 9615 "rc = 0x%x\n", (uint32_t)rc); 9616 goto out_destroy; 9617 } 9618 9619 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9620 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9621 "parent cq-id=%d\n", 9622 phba->sli4_hba.hdr_rq->queue_id, 9623 phba->sli4_hba.dat_rq->queue_id, 9624 phba->sli4_hba.els_cq->queue_id); 9625 9626 if (phba->cfg_fcp_imax) 9627 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9628 else 9629 usdelay = 0; 9630 9631 for (qidx = 0; qidx < phba->cfg_irq_chann; 9632 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9633 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9634 usdelay); 9635 9636 if (phba->sli4_hba.cq_max) { 9637 kfree(phba->sli4_hba.cq_lookup); 9638 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9639 sizeof(struct lpfc_queue *), GFP_KERNEL); 9640 if (!phba->sli4_hba.cq_lookup) { 9641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9642 "0549 Failed setup of CQ Lookup table: " 9643 "size 0x%x\n", phba->sli4_hba.cq_max); 9644 rc = -ENOMEM; 9645 goto out_destroy; 9646 } 9647 lpfc_setup_cq_lookup(phba); 9648 } 9649 return 0; 9650 9651 out_destroy: 9652 lpfc_sli4_queue_unset(phba); 9653 out_error: 9654 return rc; 9655 } 9656 9657 /** 9658 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9659 * @phba: pointer to lpfc hba data structure. 9660 * 9661 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9662 * operation. 9663 * 9664 * Return codes 9665 * 0 - successful 9666 * -ENOMEM - No available memory 9667 * -EIO - The mailbox failed to complete successfully. 9668 **/ 9669 void 9670 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9671 { 9672 struct lpfc_sli4_hdw_queue *qp; 9673 int qidx; 9674 9675 /* Unset mailbox command work queue */ 9676 if (phba->sli4_hba.mbx_wq) 9677 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9678 9679 /* Unset NVME LS work queue */ 9680 if (phba->sli4_hba.nvmels_wq) 9681 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9682 9683 /* Unset ELS work queue */ 9684 if (phba->sli4_hba.els_wq) 9685 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9686 9687 /* Unset unsolicited receive queue */ 9688 if (phba->sli4_hba.hdr_rq) 9689 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9690 phba->sli4_hba.dat_rq); 9691 9692 /* Unset mailbox command complete queue */ 9693 if (phba->sli4_hba.mbx_cq) 9694 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9695 9696 /* Unset ELS complete queue */ 9697 if (phba->sli4_hba.els_cq) 9698 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9699 9700 /* Unset NVME LS complete queue */ 9701 if (phba->sli4_hba.nvmels_cq) 9702 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9703 9704 if (phba->nvmet_support) { 9705 /* Unset NVMET MRQ queue */ 9706 if (phba->sli4_hba.nvmet_mrq_hdr) { 9707 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9708 lpfc_rq_destroy( 9709 phba, 9710 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9711 phba->sli4_hba.nvmet_mrq_data[qidx]); 9712 } 9713 9714 /* Unset NVMET CQ Set complete queue */ 9715 if (phba->sli4_hba.nvmet_cqset) { 9716 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9717 lpfc_cq_destroy( 9718 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9719 } 9720 } 9721 9722 /* Unset fast-path SLI4 queues */ 9723 if (phba->sli4_hba.hdwq) { 9724 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9725 qp = &phba->sli4_hba.hdwq[qidx]; 9726 lpfc_wq_destroy(phba, qp->fcp_wq); 9727 lpfc_wq_destroy(phba, qp->nvme_wq); 9728 lpfc_cq_destroy(phba, qp->fcp_cq); 9729 lpfc_cq_destroy(phba, qp->nvme_cq); 9730 if (qidx < phba->cfg_irq_chann) 9731 lpfc_eq_destroy(phba, qp->hba_eq); 9732 } 9733 } 9734 9735 kfree(phba->sli4_hba.cq_lookup); 9736 phba->sli4_hba.cq_lookup = NULL; 9737 phba->sli4_hba.cq_max = 0; 9738 } 9739 9740 /** 9741 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9742 * @phba: pointer to lpfc hba data structure. 9743 * 9744 * This routine is invoked to allocate and set up a pool of completion queue 9745 * events. The body of the completion queue event is a completion queue entry 9746 * CQE. For now, this pool is used for the interrupt service routine to queue 9747 * the following HBA completion queue events for the worker thread to process: 9748 * - Mailbox asynchronous events 9749 * - Receive queue completion unsolicited events 9750 * Later, this can be used for all the slow-path events. 9751 * 9752 * Return codes 9753 * 0 - successful 9754 * -ENOMEM - No available memory 9755 **/ 9756 static int 9757 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9758 { 9759 struct lpfc_cq_event *cq_event; 9760 int i; 9761 9762 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9763 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9764 if (!cq_event) 9765 goto out_pool_create_fail; 9766 list_add_tail(&cq_event->list, 9767 &phba->sli4_hba.sp_cqe_event_pool); 9768 } 9769 return 0; 9770 9771 out_pool_create_fail: 9772 lpfc_sli4_cq_event_pool_destroy(phba); 9773 return -ENOMEM; 9774 } 9775 9776 /** 9777 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9778 * @phba: pointer to lpfc hba data structure. 9779 * 9780 * This routine is invoked to free the pool of completion queue events at 9781 * driver unload time. Note that, it is the responsibility of the driver 9782 * cleanup routine to free all the outstanding completion-queue events 9783 * allocated from this pool back into the pool before invoking this routine 9784 * to destroy the pool. 9785 **/ 9786 static void 9787 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9788 { 9789 struct lpfc_cq_event *cq_event, *next_cq_event; 9790 9791 list_for_each_entry_safe(cq_event, next_cq_event, 9792 &phba->sli4_hba.sp_cqe_event_pool, list) { 9793 list_del(&cq_event->list); 9794 kfree(cq_event); 9795 } 9796 } 9797 9798 /** 9799 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9800 * @phba: pointer to lpfc hba data structure. 9801 * 9802 * This routine is the lock free version of the API invoked to allocate a 9803 * completion-queue event from the free pool. 9804 * 9805 * Return: Pointer to the newly allocated completion-queue event if successful 9806 * NULL otherwise. 9807 **/ 9808 struct lpfc_cq_event * 9809 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9810 { 9811 struct lpfc_cq_event *cq_event = NULL; 9812 9813 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9814 struct lpfc_cq_event, list); 9815 return cq_event; 9816 } 9817 9818 /** 9819 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9820 * @phba: pointer to lpfc hba data structure. 9821 * 9822 * This routine is the lock version of the API invoked to allocate a 9823 * completion-queue event from the free pool. 9824 * 9825 * Return: Pointer to the newly allocated completion-queue event if successful 9826 * NULL otherwise. 9827 **/ 9828 struct lpfc_cq_event * 9829 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9830 { 9831 struct lpfc_cq_event *cq_event; 9832 unsigned long iflags; 9833 9834 spin_lock_irqsave(&phba->hbalock, iflags); 9835 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9836 spin_unlock_irqrestore(&phba->hbalock, iflags); 9837 return cq_event; 9838 } 9839 9840 /** 9841 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9842 * @phba: pointer to lpfc hba data structure. 9843 * @cq_event: pointer to the completion queue event to be freed. 9844 * 9845 * This routine is the lock free version of the API invoked to release a 9846 * completion-queue event back into the free pool. 9847 **/ 9848 void 9849 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9850 struct lpfc_cq_event *cq_event) 9851 { 9852 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 9853 } 9854 9855 /** 9856 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9857 * @phba: pointer to lpfc hba data structure. 9858 * @cq_event: pointer to the completion queue event to be freed. 9859 * 9860 * This routine is the lock version of the API invoked to release a 9861 * completion-queue event back into the free pool. 9862 **/ 9863 void 9864 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9865 struct lpfc_cq_event *cq_event) 9866 { 9867 unsigned long iflags; 9868 spin_lock_irqsave(&phba->hbalock, iflags); 9869 __lpfc_sli4_cq_event_release(phba, cq_event); 9870 spin_unlock_irqrestore(&phba->hbalock, iflags); 9871 } 9872 9873 /** 9874 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9875 * @phba: pointer to lpfc hba data structure. 9876 * 9877 * This routine is to free all the pending completion-queue events to the 9878 * back into the free pool for device reset. 9879 **/ 9880 static void 9881 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 9882 { 9883 LIST_HEAD(cqelist); 9884 struct lpfc_cq_event *cqe; 9885 unsigned long iflags; 9886 9887 /* Retrieve all the pending WCQEs from pending WCQE lists */ 9888 spin_lock_irqsave(&phba->hbalock, iflags); 9889 /* Pending FCP XRI abort events */ 9890 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9891 &cqelist); 9892 /* Pending ELS XRI abort events */ 9893 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9894 &cqelist); 9895 /* Pending asynnc events */ 9896 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9897 &cqelist); 9898 spin_unlock_irqrestore(&phba->hbalock, iflags); 9899 9900 while (!list_empty(&cqelist)) { 9901 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 9902 lpfc_sli4_cq_event_release(phba, cqe); 9903 } 9904 } 9905 9906 /** 9907 * lpfc_pci_function_reset - Reset pci function. 9908 * @phba: pointer to lpfc hba data structure. 9909 * 9910 * This routine is invoked to request a PCI function reset. It will destroys 9911 * all resources assigned to the PCI function which originates this request. 9912 * 9913 * Return codes 9914 * 0 - successful 9915 * -ENOMEM - No available memory 9916 * -EIO - The mailbox failed to complete successfully. 9917 **/ 9918 int 9919 lpfc_pci_function_reset(struct lpfc_hba *phba) 9920 { 9921 LPFC_MBOXQ_t *mboxq; 9922 uint32_t rc = 0, if_type; 9923 uint32_t shdr_status, shdr_add_status; 9924 uint32_t rdy_chk; 9925 uint32_t port_reset = 0; 9926 union lpfc_sli4_cfg_shdr *shdr; 9927 struct lpfc_register reg_data; 9928 uint16_t devid; 9929 9930 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9931 switch (if_type) { 9932 case LPFC_SLI_INTF_IF_TYPE_0: 9933 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 9934 GFP_KERNEL); 9935 if (!mboxq) { 9936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9937 "0494 Unable to allocate memory for " 9938 "issuing SLI_FUNCTION_RESET mailbox " 9939 "command\n"); 9940 return -ENOMEM; 9941 } 9942 9943 /* Setup PCI function reset mailbox-ioctl command */ 9944 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9945 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 9946 LPFC_SLI4_MBX_EMBED); 9947 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9948 shdr = (union lpfc_sli4_cfg_shdr *) 9949 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9950 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9951 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 9952 &shdr->response); 9953 if (rc != MBX_TIMEOUT) 9954 mempool_free(mboxq, phba->mbox_mem_pool); 9955 if (shdr_status || shdr_add_status || rc) { 9956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9957 "0495 SLI_FUNCTION_RESET mailbox " 9958 "failed with status x%x add_status x%x," 9959 " mbx status x%x\n", 9960 shdr_status, shdr_add_status, rc); 9961 rc = -ENXIO; 9962 } 9963 break; 9964 case LPFC_SLI_INTF_IF_TYPE_2: 9965 case LPFC_SLI_INTF_IF_TYPE_6: 9966 wait: 9967 /* 9968 * Poll the Port Status Register and wait for RDY for 9969 * up to 30 seconds. If the port doesn't respond, treat 9970 * it as an error. 9971 */ 9972 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 9973 if (lpfc_readl(phba->sli4_hba.u.if_type2. 9974 STATUSregaddr, ®_data.word0)) { 9975 rc = -ENODEV; 9976 goto out; 9977 } 9978 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 9979 break; 9980 msleep(20); 9981 } 9982 9983 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 9984 phba->work_status[0] = readl( 9985 phba->sli4_hba.u.if_type2.ERR1regaddr); 9986 phba->work_status[1] = readl( 9987 phba->sli4_hba.u.if_type2.ERR2regaddr); 9988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9989 "2890 Port not ready, port status reg " 9990 "0x%x error 1=0x%x, error 2=0x%x\n", 9991 reg_data.word0, 9992 phba->work_status[0], 9993 phba->work_status[1]); 9994 rc = -ENODEV; 9995 goto out; 9996 } 9997 9998 if (!port_reset) { 9999 /* 10000 * Reset the port now 10001 */ 10002 reg_data.word0 = 0; 10003 bf_set(lpfc_sliport_ctrl_end, ®_data, 10004 LPFC_SLIPORT_LITTLE_ENDIAN); 10005 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10006 LPFC_SLIPORT_INIT_PORT); 10007 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10008 CTRLregaddr); 10009 /* flush */ 10010 pci_read_config_word(phba->pcidev, 10011 PCI_DEVICE_ID, &devid); 10012 10013 port_reset = 1; 10014 msleep(20); 10015 goto wait; 10016 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10017 rc = -ENODEV; 10018 goto out; 10019 } 10020 break; 10021 10022 case LPFC_SLI_INTF_IF_TYPE_1: 10023 default: 10024 break; 10025 } 10026 10027 out: 10028 /* Catch the not-ready port failure after a port reset. */ 10029 if (rc) { 10030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10031 "3317 HBA not functional: IP Reset Failed " 10032 "try: echo fw_reset > board_mode\n"); 10033 rc = -ENODEV; 10034 } 10035 10036 return rc; 10037 } 10038 10039 /** 10040 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10041 * @phba: pointer to lpfc hba data structure. 10042 * 10043 * This routine is invoked to set up the PCI device memory space for device 10044 * with SLI-4 interface spec. 10045 * 10046 * Return codes 10047 * 0 - successful 10048 * other values - error 10049 **/ 10050 static int 10051 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10052 { 10053 struct pci_dev *pdev = phba->pcidev; 10054 unsigned long bar0map_len, bar1map_len, bar2map_len; 10055 int error; 10056 uint32_t if_type; 10057 10058 if (!pdev) 10059 return -ENODEV; 10060 10061 /* Set the device DMA mask size */ 10062 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10063 if (error) 10064 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10065 if (error) 10066 return error; 10067 10068 /* 10069 * The BARs and register set definitions and offset locations are 10070 * dependent on the if_type. 10071 */ 10072 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10073 &phba->sli4_hba.sli_intf.word0)) { 10074 return -ENODEV; 10075 } 10076 10077 /* There is no SLI3 failback for SLI4 devices. */ 10078 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10079 LPFC_SLI_INTF_VALID) { 10080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10081 "2894 SLI_INTF reg contents invalid " 10082 "sli_intf reg 0x%x\n", 10083 phba->sli4_hba.sli_intf.word0); 10084 return -ENODEV; 10085 } 10086 10087 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10088 /* 10089 * Get the bus address of SLI4 device Bar regions and the 10090 * number of bytes required by each mapping. The mapping of the 10091 * particular PCI BARs regions is dependent on the type of 10092 * SLI4 device. 10093 */ 10094 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10095 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10096 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10097 10098 /* 10099 * Map SLI4 PCI Config Space Register base to a kernel virtual 10100 * addr 10101 */ 10102 phba->sli4_hba.conf_regs_memmap_p = 10103 ioremap(phba->pci_bar0_map, bar0map_len); 10104 if (!phba->sli4_hba.conf_regs_memmap_p) { 10105 dev_printk(KERN_ERR, &pdev->dev, 10106 "ioremap failed for SLI4 PCI config " 10107 "registers.\n"); 10108 return -ENODEV; 10109 } 10110 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10111 /* Set up BAR0 PCI config space register memory map */ 10112 lpfc_sli4_bar0_register_memmap(phba, if_type); 10113 } else { 10114 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10115 bar0map_len = pci_resource_len(pdev, 1); 10116 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10117 dev_printk(KERN_ERR, &pdev->dev, 10118 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10119 return -ENODEV; 10120 } 10121 phba->sli4_hba.conf_regs_memmap_p = 10122 ioremap(phba->pci_bar0_map, bar0map_len); 10123 if (!phba->sli4_hba.conf_regs_memmap_p) { 10124 dev_printk(KERN_ERR, &pdev->dev, 10125 "ioremap failed for SLI4 PCI config " 10126 "registers.\n"); 10127 return -ENODEV; 10128 } 10129 lpfc_sli4_bar0_register_memmap(phba, if_type); 10130 } 10131 10132 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10133 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10134 /* 10135 * Map SLI4 if type 0 HBA Control Register base to a 10136 * kernel virtual address and setup the registers. 10137 */ 10138 phba->pci_bar1_map = pci_resource_start(pdev, 10139 PCI_64BIT_BAR2); 10140 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10141 phba->sli4_hba.ctrl_regs_memmap_p = 10142 ioremap(phba->pci_bar1_map, 10143 bar1map_len); 10144 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10145 dev_err(&pdev->dev, 10146 "ioremap failed for SLI4 HBA " 10147 "control registers.\n"); 10148 error = -ENOMEM; 10149 goto out_iounmap_conf; 10150 } 10151 phba->pci_bar2_memmap_p = 10152 phba->sli4_hba.ctrl_regs_memmap_p; 10153 lpfc_sli4_bar1_register_memmap(phba, if_type); 10154 } else { 10155 error = -ENOMEM; 10156 goto out_iounmap_conf; 10157 } 10158 } 10159 10160 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10161 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10162 /* 10163 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10164 * virtual address and setup the registers. 10165 */ 10166 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10167 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10168 phba->sli4_hba.drbl_regs_memmap_p = 10169 ioremap(phba->pci_bar1_map, bar1map_len); 10170 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10171 dev_err(&pdev->dev, 10172 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10173 error = -ENOMEM; 10174 goto out_iounmap_conf; 10175 } 10176 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10177 lpfc_sli4_bar1_register_memmap(phba, if_type); 10178 } 10179 10180 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10181 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10182 /* 10183 * Map SLI4 if type 0 HBA Doorbell Register base to 10184 * a kernel virtual address and setup the registers. 10185 */ 10186 phba->pci_bar2_map = pci_resource_start(pdev, 10187 PCI_64BIT_BAR4); 10188 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10189 phba->sli4_hba.drbl_regs_memmap_p = 10190 ioremap(phba->pci_bar2_map, 10191 bar2map_len); 10192 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10193 dev_err(&pdev->dev, 10194 "ioremap failed for SLI4 HBA" 10195 " doorbell registers.\n"); 10196 error = -ENOMEM; 10197 goto out_iounmap_ctrl; 10198 } 10199 phba->pci_bar4_memmap_p = 10200 phba->sli4_hba.drbl_regs_memmap_p; 10201 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10202 if (error) 10203 goto out_iounmap_all; 10204 } else { 10205 error = -ENOMEM; 10206 goto out_iounmap_all; 10207 } 10208 } 10209 10210 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10211 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10212 /* 10213 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10214 * virtual address and setup the registers. 10215 */ 10216 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10217 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10218 phba->sli4_hba.dpp_regs_memmap_p = 10219 ioremap(phba->pci_bar2_map, bar2map_len); 10220 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10221 dev_err(&pdev->dev, 10222 "ioremap failed for SLI4 HBA dpp registers.\n"); 10223 error = -ENOMEM; 10224 goto out_iounmap_ctrl; 10225 } 10226 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10227 } 10228 10229 /* Set up the EQ/CQ register handeling functions now */ 10230 switch (if_type) { 10231 case LPFC_SLI_INTF_IF_TYPE_0: 10232 case LPFC_SLI_INTF_IF_TYPE_2: 10233 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10234 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10235 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10236 break; 10237 case LPFC_SLI_INTF_IF_TYPE_6: 10238 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10239 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10240 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10241 break; 10242 default: 10243 break; 10244 } 10245 10246 return 0; 10247 10248 out_iounmap_all: 10249 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10250 out_iounmap_ctrl: 10251 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10252 out_iounmap_conf: 10253 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10254 10255 return error; 10256 } 10257 10258 /** 10259 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10260 * @phba: pointer to lpfc hba data structure. 10261 * 10262 * This routine is invoked to unset the PCI device memory space for device 10263 * with SLI-4 interface spec. 10264 **/ 10265 static void 10266 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10267 { 10268 uint32_t if_type; 10269 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10270 10271 switch (if_type) { 10272 case LPFC_SLI_INTF_IF_TYPE_0: 10273 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10274 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10275 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10276 break; 10277 case LPFC_SLI_INTF_IF_TYPE_2: 10278 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10279 break; 10280 case LPFC_SLI_INTF_IF_TYPE_6: 10281 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10282 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10283 break; 10284 case LPFC_SLI_INTF_IF_TYPE_1: 10285 default: 10286 dev_printk(KERN_ERR, &phba->pcidev->dev, 10287 "FATAL - unsupported SLI4 interface type - %d\n", 10288 if_type); 10289 break; 10290 } 10291 } 10292 10293 /** 10294 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10295 * @phba: pointer to lpfc hba data structure. 10296 * 10297 * This routine is invoked to enable the MSI-X interrupt vectors to device 10298 * with SLI-3 interface specs. 10299 * 10300 * Return codes 10301 * 0 - successful 10302 * other values - error 10303 **/ 10304 static int 10305 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10306 { 10307 int rc; 10308 LPFC_MBOXQ_t *pmb; 10309 10310 /* Set up MSI-X multi-message vectors */ 10311 rc = pci_alloc_irq_vectors(phba->pcidev, 10312 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10313 if (rc < 0) { 10314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10315 "0420 PCI enable MSI-X failed (%d)\n", rc); 10316 goto vec_fail_out; 10317 } 10318 10319 /* 10320 * Assign MSI-X vectors to interrupt handlers 10321 */ 10322 10323 /* vector-0 is associated to slow-path handler */ 10324 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10325 &lpfc_sli_sp_intr_handler, 0, 10326 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10327 if (rc) { 10328 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10329 "0421 MSI-X slow-path request_irq failed " 10330 "(%d)\n", rc); 10331 goto msi_fail_out; 10332 } 10333 10334 /* vector-1 is associated to fast-path handler */ 10335 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10336 &lpfc_sli_fp_intr_handler, 0, 10337 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10338 10339 if (rc) { 10340 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10341 "0429 MSI-X fast-path request_irq failed " 10342 "(%d)\n", rc); 10343 goto irq_fail_out; 10344 } 10345 10346 /* 10347 * Configure HBA MSI-X attention conditions to messages 10348 */ 10349 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10350 10351 if (!pmb) { 10352 rc = -ENOMEM; 10353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10354 "0474 Unable to allocate memory for issuing " 10355 "MBOX_CONFIG_MSI command\n"); 10356 goto mem_fail_out; 10357 } 10358 rc = lpfc_config_msi(phba, pmb); 10359 if (rc) 10360 goto mbx_fail_out; 10361 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10362 if (rc != MBX_SUCCESS) { 10363 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10364 "0351 Config MSI mailbox command failed, " 10365 "mbxCmd x%x, mbxStatus x%x\n", 10366 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10367 goto mbx_fail_out; 10368 } 10369 10370 /* Free memory allocated for mailbox command */ 10371 mempool_free(pmb, phba->mbox_mem_pool); 10372 return rc; 10373 10374 mbx_fail_out: 10375 /* Free memory allocated for mailbox command */ 10376 mempool_free(pmb, phba->mbox_mem_pool); 10377 10378 mem_fail_out: 10379 /* free the irq already requested */ 10380 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10381 10382 irq_fail_out: 10383 /* free the irq already requested */ 10384 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10385 10386 msi_fail_out: 10387 /* Unconfigure MSI-X capability structure */ 10388 pci_free_irq_vectors(phba->pcidev); 10389 10390 vec_fail_out: 10391 return rc; 10392 } 10393 10394 /** 10395 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10396 * @phba: pointer to lpfc hba data structure. 10397 * 10398 * This routine is invoked to enable the MSI interrupt mode to device with 10399 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10400 * enable the MSI vector. The device driver is responsible for calling the 10401 * request_irq() to register MSI vector with a interrupt the handler, which 10402 * is done in this function. 10403 * 10404 * Return codes 10405 * 0 - successful 10406 * other values - error 10407 */ 10408 static int 10409 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10410 { 10411 int rc; 10412 10413 rc = pci_enable_msi(phba->pcidev); 10414 if (!rc) 10415 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10416 "0462 PCI enable MSI mode success.\n"); 10417 else { 10418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10419 "0471 PCI enable MSI mode failed (%d)\n", rc); 10420 return rc; 10421 } 10422 10423 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10424 0, LPFC_DRIVER_NAME, phba); 10425 if (rc) { 10426 pci_disable_msi(phba->pcidev); 10427 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10428 "0478 MSI request_irq failed (%d)\n", rc); 10429 } 10430 return rc; 10431 } 10432 10433 /** 10434 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10435 * @phba: pointer to lpfc hba data structure. 10436 * 10437 * This routine is invoked to enable device interrupt and associate driver's 10438 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10439 * spec. Depends on the interrupt mode configured to the driver, the driver 10440 * will try to fallback from the configured interrupt mode to an interrupt 10441 * mode which is supported by the platform, kernel, and device in the order 10442 * of: 10443 * MSI-X -> MSI -> IRQ. 10444 * 10445 * Return codes 10446 * 0 - successful 10447 * other values - error 10448 **/ 10449 static uint32_t 10450 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10451 { 10452 uint32_t intr_mode = LPFC_INTR_ERROR; 10453 int retval; 10454 10455 if (cfg_mode == 2) { 10456 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10457 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10458 if (!retval) { 10459 /* Now, try to enable MSI-X interrupt mode */ 10460 retval = lpfc_sli_enable_msix(phba); 10461 if (!retval) { 10462 /* Indicate initialization to MSI-X mode */ 10463 phba->intr_type = MSIX; 10464 intr_mode = 2; 10465 } 10466 } 10467 } 10468 10469 /* Fallback to MSI if MSI-X initialization failed */ 10470 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10471 retval = lpfc_sli_enable_msi(phba); 10472 if (!retval) { 10473 /* Indicate initialization to MSI mode */ 10474 phba->intr_type = MSI; 10475 intr_mode = 1; 10476 } 10477 } 10478 10479 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10480 if (phba->intr_type == NONE) { 10481 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10482 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10483 if (!retval) { 10484 /* Indicate initialization to INTx mode */ 10485 phba->intr_type = INTx; 10486 intr_mode = 0; 10487 } 10488 } 10489 return intr_mode; 10490 } 10491 10492 /** 10493 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10494 * @phba: pointer to lpfc hba data structure. 10495 * 10496 * This routine is invoked to disable device interrupt and disassociate the 10497 * driver's interrupt handler(s) from interrupt vector(s) to device with 10498 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10499 * release the interrupt vector(s) for the message signaled interrupt. 10500 **/ 10501 static void 10502 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10503 { 10504 int nr_irqs, i; 10505 10506 if (phba->intr_type == MSIX) 10507 nr_irqs = LPFC_MSIX_VECTORS; 10508 else 10509 nr_irqs = 1; 10510 10511 for (i = 0; i < nr_irqs; i++) 10512 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10513 pci_free_irq_vectors(phba->pcidev); 10514 10515 /* Reset interrupt management states */ 10516 phba->intr_type = NONE; 10517 phba->sli.slistat.sli_intr = 0; 10518 } 10519 10520 /** 10521 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ 10522 * @phba: pointer to lpfc hba data structure. 10523 * @id: EQ vector index or Hardware Queue index 10524 * @match: LPFC_FIND_BY_EQ = match by EQ 10525 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10526 */ 10527 static uint16_t 10528 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10529 { 10530 struct lpfc_vector_map_info *cpup; 10531 int cpu; 10532 10533 /* Find the desired phys_id for the specified EQ */ 10534 for_each_present_cpu(cpu) { 10535 cpup = &phba->sli4_hba.cpu_map[cpu]; 10536 if ((match == LPFC_FIND_BY_EQ) && 10537 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10538 (cpup->eq == id)) 10539 return cpu; 10540 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10541 return cpu; 10542 } 10543 return 0; 10544 } 10545 10546 /** 10547 * lpfc_find_eq_handle - Find the EQ that corresponds to the specified 10548 * Hardware Queue 10549 * @phba: pointer to lpfc hba data structure. 10550 * @hdwq: Hardware Queue index 10551 */ 10552 static uint16_t 10553 lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq) 10554 { 10555 struct lpfc_vector_map_info *cpup; 10556 int cpu; 10557 10558 /* Find the desired phys_id for the specified EQ */ 10559 for_each_present_cpu(cpu) { 10560 cpup = &phba->sli4_hba.cpu_map[cpu]; 10561 if (cpup->hdwq == hdwq) 10562 return cpup->eq; 10563 } 10564 return 0; 10565 } 10566 10567 #ifdef CONFIG_X86 10568 /** 10569 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10570 * @phba: pointer to lpfc hba data structure. 10571 * @cpu: CPU map index 10572 * @phys_id: CPU package physical id 10573 * @core_id: CPU core id 10574 */ 10575 static int 10576 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10577 uint16_t phys_id, uint16_t core_id) 10578 { 10579 struct lpfc_vector_map_info *cpup; 10580 int idx; 10581 10582 for_each_present_cpu(idx) { 10583 cpup = &phba->sli4_hba.cpu_map[idx]; 10584 /* Does the cpup match the one we are looking for */ 10585 if ((cpup->phys_id == phys_id) && 10586 (cpup->core_id == core_id) && 10587 (cpu != idx)) 10588 return 1; 10589 } 10590 return 0; 10591 } 10592 #endif 10593 10594 /** 10595 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10596 * @phba: pointer to lpfc hba data structure. 10597 * @vectors: number of msix vectors allocated. 10598 * 10599 * The routine will figure out the CPU affinity assignment for every 10600 * MSI-X vector allocated for the HBA. 10601 * In addition, the CPU to IO channel mapping will be calculated 10602 * and the phba->sli4_hba.cpu_map array will reflect this. 10603 */ 10604 static void 10605 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10606 { 10607 int i, cpu, idx; 10608 int max_phys_id, min_phys_id; 10609 int max_core_id, min_core_id; 10610 struct lpfc_vector_map_info *cpup; 10611 const struct cpumask *maskp; 10612 #ifdef CONFIG_X86 10613 struct cpuinfo_x86 *cpuinfo; 10614 #endif 10615 10616 /* Init cpu_map array */ 10617 memset(phba->sli4_hba.cpu_map, 0xff, 10618 (sizeof(struct lpfc_vector_map_info) * 10619 phba->sli4_hba.num_possible_cpu)); 10620 10621 max_phys_id = 0; 10622 min_phys_id = 0xffff; 10623 max_core_id = 0; 10624 min_core_id = 0xffff; 10625 10626 /* Update CPU map with physical id and core id of each CPU */ 10627 for_each_present_cpu(cpu) { 10628 cpup = &phba->sli4_hba.cpu_map[cpu]; 10629 #ifdef CONFIG_X86 10630 cpuinfo = &cpu_data(cpu); 10631 cpup->phys_id = cpuinfo->phys_proc_id; 10632 cpup->core_id = cpuinfo->cpu_core_id; 10633 cpup->hyper = lpfc_find_hyper(phba, cpu, 10634 cpup->phys_id, cpup->core_id); 10635 #else 10636 /* No distinction between CPUs for other platforms */ 10637 cpup->phys_id = 0; 10638 cpup->core_id = cpu; 10639 cpup->hyper = 0; 10640 #endif 10641 10642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10643 "3328 CPU physid %d coreid %d\n", 10644 cpup->phys_id, cpup->core_id); 10645 10646 if (cpup->phys_id > max_phys_id) 10647 max_phys_id = cpup->phys_id; 10648 if (cpup->phys_id < min_phys_id) 10649 min_phys_id = cpup->phys_id; 10650 10651 if (cpup->core_id > max_core_id) 10652 max_core_id = cpup->core_id; 10653 if (cpup->core_id < min_core_id) 10654 min_core_id = cpup->core_id; 10655 } 10656 10657 for_each_possible_cpu(i) { 10658 struct lpfc_eq_intr_info *eqi = 10659 per_cpu_ptr(phba->sli4_hba.eq_info, i); 10660 10661 INIT_LIST_HEAD(&eqi->list); 10662 eqi->icnt = 0; 10663 } 10664 10665 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10666 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10667 if (!maskp) 10668 continue; 10669 10670 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 10671 cpup = &phba->sli4_hba.cpu_map[cpu]; 10672 cpup->eq = idx; 10673 cpup->hdwq = idx; 10674 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10675 10676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10677 "3336 Set Affinity: CPU %d " 10678 "hdwq %d irq %d\n", 10679 cpu, cpup->hdwq, cpup->irq); 10680 } 10681 } 10682 return; 10683 } 10684 10685 /** 10686 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 10687 * @phba: pointer to lpfc hba data structure. 10688 * 10689 * This routine is invoked to enable the MSI-X interrupt vectors to device 10690 * with SLI-4 interface spec. 10691 * 10692 * Return codes 10693 * 0 - successful 10694 * other values - error 10695 **/ 10696 static int 10697 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 10698 { 10699 int vectors, rc, index; 10700 char *name; 10701 10702 /* Set up MSI-X multi-message vectors */ 10703 vectors = phba->cfg_irq_chann; 10704 10705 rc = pci_alloc_irq_vectors(phba->pcidev, 10706 1, 10707 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 10708 if (rc < 0) { 10709 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10710 "0484 PCI enable MSI-X failed (%d)\n", rc); 10711 goto vec_fail_out; 10712 } 10713 vectors = rc; 10714 10715 /* Assign MSI-X vectors to interrupt handlers */ 10716 for (index = 0; index < vectors; index++) { 10717 name = phba->sli4_hba.hba_eq_hdl[index].handler_name; 10718 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 10719 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 10720 LPFC_DRIVER_HANDLER_NAME"%d", index); 10721 10722 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10723 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10724 rc = request_irq(pci_irq_vector(phba->pcidev, index), 10725 &lpfc_sli4_hba_intr_handler, 0, 10726 name, 10727 &phba->sli4_hba.hba_eq_hdl[index]); 10728 if (rc) { 10729 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10730 "0486 MSI-X fast-path (%d) " 10731 "request_irq failed (%d)\n", index, rc); 10732 goto cfg_fail_out; 10733 } 10734 } 10735 10736 if (vectors != phba->cfg_irq_chann) { 10737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10738 "3238 Reducing IO channels to match number of " 10739 "MSI-X vectors, requested %d got %d\n", 10740 phba->cfg_irq_chann, vectors); 10741 if (phba->cfg_irq_chann > vectors) 10742 phba->cfg_irq_chann = vectors; 10743 if (phba->cfg_nvmet_mrq > vectors) 10744 phba->cfg_nvmet_mrq = vectors; 10745 } 10746 10747 return rc; 10748 10749 cfg_fail_out: 10750 /* free the irq already requested */ 10751 for (--index; index >= 0; index--) 10752 free_irq(pci_irq_vector(phba->pcidev, index), 10753 &phba->sli4_hba.hba_eq_hdl[index]); 10754 10755 /* Unconfigure MSI-X capability structure */ 10756 pci_free_irq_vectors(phba->pcidev); 10757 10758 vec_fail_out: 10759 return rc; 10760 } 10761 10762 /** 10763 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 10764 * @phba: pointer to lpfc hba data structure. 10765 * 10766 * This routine is invoked to enable the MSI interrupt mode to device with 10767 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 10768 * to enable the MSI vector. The device driver is responsible for calling 10769 * the request_irq() to register MSI vector with a interrupt the handler, 10770 * which is done in this function. 10771 * 10772 * Return codes 10773 * 0 - successful 10774 * other values - error 10775 **/ 10776 static int 10777 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 10778 { 10779 int rc, index; 10780 10781 rc = pci_enable_msi(phba->pcidev); 10782 if (!rc) 10783 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10784 "0487 PCI enable MSI mode success.\n"); 10785 else { 10786 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10787 "0488 PCI enable MSI mode failed (%d)\n", rc); 10788 return rc; 10789 } 10790 10791 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10792 0, LPFC_DRIVER_NAME, phba); 10793 if (rc) { 10794 pci_disable_msi(phba->pcidev); 10795 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10796 "0490 MSI request_irq failed (%d)\n", rc); 10797 return rc; 10798 } 10799 10800 for (index = 0; index < phba->cfg_irq_chann; index++) { 10801 phba->sli4_hba.hba_eq_hdl[index].idx = index; 10802 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 10803 } 10804 10805 return 0; 10806 } 10807 10808 /** 10809 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 10810 * @phba: pointer to lpfc hba data structure. 10811 * 10812 * This routine is invoked to enable device interrupt and associate driver's 10813 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 10814 * interface spec. Depends on the interrupt mode configured to the driver, 10815 * the driver will try to fallback from the configured interrupt mode to an 10816 * interrupt mode which is supported by the platform, kernel, and device in 10817 * the order of: 10818 * MSI-X -> MSI -> IRQ. 10819 * 10820 * Return codes 10821 * 0 - successful 10822 * other values - error 10823 **/ 10824 static uint32_t 10825 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10826 { 10827 uint32_t intr_mode = LPFC_INTR_ERROR; 10828 int retval, idx; 10829 10830 if (cfg_mode == 2) { 10831 /* Preparation before conf_msi mbox cmd */ 10832 retval = 0; 10833 if (!retval) { 10834 /* Now, try to enable MSI-X interrupt mode */ 10835 retval = lpfc_sli4_enable_msix(phba); 10836 if (!retval) { 10837 /* Indicate initialization to MSI-X mode */ 10838 phba->intr_type = MSIX; 10839 intr_mode = 2; 10840 } 10841 } 10842 } 10843 10844 /* Fallback to MSI if MSI-X initialization failed */ 10845 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10846 retval = lpfc_sli4_enable_msi(phba); 10847 if (!retval) { 10848 /* Indicate initialization to MSI mode */ 10849 phba->intr_type = MSI; 10850 intr_mode = 1; 10851 } 10852 } 10853 10854 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10855 if (phba->intr_type == NONE) { 10856 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 10857 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10858 if (!retval) { 10859 struct lpfc_hba_eq_hdl *eqhdl; 10860 10861 /* Indicate initialization to INTx mode */ 10862 phba->intr_type = INTx; 10863 intr_mode = 0; 10864 10865 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10866 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 10867 eqhdl->idx = idx; 10868 eqhdl->phba = phba; 10869 } 10870 } 10871 } 10872 return intr_mode; 10873 } 10874 10875 /** 10876 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 10877 * @phba: pointer to lpfc hba data structure. 10878 * 10879 * This routine is invoked to disable device interrupt and disassociate 10880 * the driver's interrupt handler(s) from interrupt vector(s) to device 10881 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 10882 * will release the interrupt vector(s) for the message signaled interrupt. 10883 **/ 10884 static void 10885 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 10886 { 10887 /* Disable the currently initialized interrupt mode */ 10888 if (phba->intr_type == MSIX) { 10889 int index; 10890 10891 /* Free up MSI-X multi-message vectors */ 10892 for (index = 0; index < phba->cfg_irq_chann; index++) { 10893 irq_set_affinity_hint( 10894 pci_irq_vector(phba->pcidev, index), 10895 NULL); 10896 free_irq(pci_irq_vector(phba->pcidev, index), 10897 &phba->sli4_hba.hba_eq_hdl[index]); 10898 } 10899 } else { 10900 free_irq(phba->pcidev->irq, phba); 10901 } 10902 10903 pci_free_irq_vectors(phba->pcidev); 10904 10905 /* Reset interrupt management states */ 10906 phba->intr_type = NONE; 10907 phba->sli.slistat.sli_intr = 0; 10908 } 10909 10910 /** 10911 * lpfc_unset_hba - Unset SLI3 hba device initialization 10912 * @phba: pointer to lpfc hba data structure. 10913 * 10914 * This routine is invoked to unset the HBA device initialization steps to 10915 * a device with SLI-3 interface spec. 10916 **/ 10917 static void 10918 lpfc_unset_hba(struct lpfc_hba *phba) 10919 { 10920 struct lpfc_vport *vport = phba->pport; 10921 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10922 10923 spin_lock_irq(shost->host_lock); 10924 vport->load_flag |= FC_UNLOADING; 10925 spin_unlock_irq(shost->host_lock); 10926 10927 kfree(phba->vpi_bmask); 10928 kfree(phba->vpi_ids); 10929 10930 lpfc_stop_hba_timers(phba); 10931 10932 phba->pport->work_port_events = 0; 10933 10934 lpfc_sli_hba_down(phba); 10935 10936 lpfc_sli_brdrestart(phba); 10937 10938 lpfc_sli_disable_intr(phba); 10939 10940 return; 10941 } 10942 10943 /** 10944 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 10945 * @phba: Pointer to HBA context object. 10946 * 10947 * This function is called in the SLI4 code path to wait for completion 10948 * of device's XRIs exchange busy. It will check the XRI exchange busy 10949 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 10950 * that, it will check the XRI exchange busy on outstanding FCP and ELS 10951 * I/Os every 30 seconds, log error message, and wait forever. Only when 10952 * all XRI exchange busy complete, the driver unload shall proceed with 10953 * invoking the function reset ioctl mailbox command to the CNA and the 10954 * the rest of the driver unload resource release. 10955 **/ 10956 static void 10957 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 10958 { 10959 struct lpfc_sli4_hdw_queue *qp; 10960 int idx, ccnt, fcnt; 10961 int wait_time = 0; 10962 int io_xri_cmpl = 1; 10963 int nvmet_xri_cmpl = 1; 10964 int fcp_xri_cmpl = 1; 10965 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 10966 10967 /* Driver just aborted IOs during the hba_unset process. Pause 10968 * here to give the HBA time to complete the IO and get entries 10969 * into the abts lists. 10970 */ 10971 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 10972 10973 /* Wait for NVME pending IO to flush back to transport. */ 10974 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 10975 lpfc_nvme_wait_for_io_drain(phba); 10976 10977 ccnt = 0; 10978 fcnt = 0; 10979 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10980 qp = &phba->sli4_hba.hdwq[idx]; 10981 fcp_xri_cmpl = list_empty( 10982 &qp->lpfc_abts_scsi_buf_list); 10983 if (!fcp_xri_cmpl) /* if list is NOT empty */ 10984 fcnt++; 10985 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10986 io_xri_cmpl = list_empty( 10987 &qp->lpfc_abts_nvme_buf_list); 10988 if (!io_xri_cmpl) /* if list is NOT empty */ 10989 ccnt++; 10990 } 10991 } 10992 if (ccnt) 10993 io_xri_cmpl = 0; 10994 if (fcnt) 10995 fcp_xri_cmpl = 0; 10996 10997 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10998 nvmet_xri_cmpl = 10999 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11000 } 11001 11002 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || 11003 !nvmet_xri_cmpl) { 11004 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11005 if (!nvmet_xri_cmpl) 11006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11007 "6424 NVMET XRI exchange busy " 11008 "wait time: %d seconds.\n", 11009 wait_time/1000); 11010 if (!io_xri_cmpl) 11011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11012 "6100 NVME XRI exchange busy " 11013 "wait time: %d seconds.\n", 11014 wait_time/1000); 11015 if (!fcp_xri_cmpl) 11016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11017 "2877 FCP XRI exchange busy " 11018 "wait time: %d seconds.\n", 11019 wait_time/1000); 11020 if (!els_xri_cmpl) 11021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11022 "2878 ELS XRI exchange busy " 11023 "wait time: %d seconds.\n", 11024 wait_time/1000); 11025 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11026 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11027 } else { 11028 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11029 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11030 } 11031 11032 ccnt = 0; 11033 fcnt = 0; 11034 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11035 qp = &phba->sli4_hba.hdwq[idx]; 11036 fcp_xri_cmpl = list_empty( 11037 &qp->lpfc_abts_scsi_buf_list); 11038 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11039 fcnt++; 11040 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11041 io_xri_cmpl = list_empty( 11042 &qp->lpfc_abts_nvme_buf_list); 11043 if (!io_xri_cmpl) /* if list is NOT empty */ 11044 ccnt++; 11045 } 11046 } 11047 if (ccnt) 11048 io_xri_cmpl = 0; 11049 if (fcnt) 11050 fcp_xri_cmpl = 0; 11051 11052 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11053 nvmet_xri_cmpl = list_empty( 11054 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11055 } 11056 els_xri_cmpl = 11057 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11058 11059 } 11060 } 11061 11062 /** 11063 * lpfc_sli4_hba_unset - Unset the fcoe hba 11064 * @phba: Pointer to HBA context object. 11065 * 11066 * This function is called in the SLI4 code path to reset the HBA's FCoE 11067 * function. The caller is not required to hold any lock. This routine 11068 * issues PCI function reset mailbox command to reset the FCoE function. 11069 * At the end of the function, it calls lpfc_hba_down_post function to 11070 * free any pending commands. 11071 **/ 11072 static void 11073 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11074 { 11075 int wait_cnt = 0; 11076 LPFC_MBOXQ_t *mboxq; 11077 struct pci_dev *pdev = phba->pcidev; 11078 11079 lpfc_stop_hba_timers(phba); 11080 if (phba->pport) 11081 phba->sli4_hba.intr_enable = 0; 11082 11083 /* 11084 * Gracefully wait out the potential current outstanding asynchronous 11085 * mailbox command. 11086 */ 11087 11088 /* First, block any pending async mailbox command from posted */ 11089 spin_lock_irq(&phba->hbalock); 11090 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11091 spin_unlock_irq(&phba->hbalock); 11092 /* Now, trying to wait it out if we can */ 11093 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11094 msleep(10); 11095 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11096 break; 11097 } 11098 /* Forcefully release the outstanding mailbox command if timed out */ 11099 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11100 spin_lock_irq(&phba->hbalock); 11101 mboxq = phba->sli.mbox_active; 11102 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11103 __lpfc_mbox_cmpl_put(phba, mboxq); 11104 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11105 phba->sli.mbox_active = NULL; 11106 spin_unlock_irq(&phba->hbalock); 11107 } 11108 11109 /* Abort all iocbs associated with the hba */ 11110 lpfc_sli_hba_iocb_abort(phba); 11111 11112 /* Wait for completion of device XRI exchange busy */ 11113 lpfc_sli4_xri_exchange_busy_wait(phba); 11114 11115 /* Disable PCI subsystem interrupt */ 11116 lpfc_sli4_disable_intr(phba); 11117 11118 /* Disable SR-IOV if enabled */ 11119 if (phba->cfg_sriov_nr_virtfn) 11120 pci_disable_sriov(pdev); 11121 11122 /* Stop kthread signal shall trigger work_done one more time */ 11123 kthread_stop(phba->worker_thread); 11124 11125 /* Disable FW logging to host memory */ 11126 lpfc_ras_stop_fwlog(phba); 11127 11128 /* Unset the queues shared with the hardware then release all 11129 * allocated resources. 11130 */ 11131 lpfc_sli4_queue_unset(phba); 11132 lpfc_sli4_queue_destroy(phba); 11133 11134 /* Reset SLI4 HBA FCoE function */ 11135 lpfc_pci_function_reset(phba); 11136 11137 /* Free RAS DMA memory */ 11138 if (phba->ras_fwlog.ras_enabled) 11139 lpfc_sli4_ras_dma_free(phba); 11140 11141 /* Stop the SLI4 device port */ 11142 if (phba->pport) 11143 phba->pport->work_port_events = 0; 11144 } 11145 11146 /** 11147 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11148 * @phba: Pointer to HBA context object. 11149 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11150 * 11151 * This function is called in the SLI4 code path to read the port's 11152 * sli4 capabilities. 11153 * 11154 * This function may be be called from any context that can block-wait 11155 * for the completion. The expectation is that this routine is called 11156 * typically from probe_one or from the online routine. 11157 **/ 11158 int 11159 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11160 { 11161 int rc; 11162 struct lpfc_mqe *mqe; 11163 struct lpfc_pc_sli4_params *sli4_params; 11164 uint32_t mbox_tmo; 11165 11166 rc = 0; 11167 mqe = &mboxq->u.mqe; 11168 11169 /* Read the port's SLI4 Parameters port capabilities */ 11170 lpfc_pc_sli4_params(mboxq); 11171 if (!phba->sli4_hba.intr_enable) 11172 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11173 else { 11174 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11175 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11176 } 11177 11178 if (unlikely(rc)) 11179 return 1; 11180 11181 sli4_params = &phba->sli4_hba.pc_sli4_params; 11182 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11183 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11184 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11185 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11186 &mqe->un.sli4_params); 11187 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11188 &mqe->un.sli4_params); 11189 sli4_params->proto_types = mqe->un.sli4_params.word3; 11190 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11191 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11192 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11193 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11194 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11195 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11196 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11197 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11198 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11199 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11200 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11201 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11202 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11203 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11204 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11205 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11206 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11207 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11208 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11209 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11210 11211 /* Make sure that sge_supp_len can be handled by the driver */ 11212 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11213 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11214 11215 return rc; 11216 } 11217 11218 /** 11219 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 11220 * @phba: Pointer to HBA context object. 11221 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11222 * 11223 * This function is called in the SLI4 code path to read the port's 11224 * sli4 capabilities. 11225 * 11226 * This function may be be called from any context that can block-wait 11227 * for the completion. The expectation is that this routine is called 11228 * typically from probe_one or from the online routine. 11229 **/ 11230 int 11231 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11232 { 11233 int rc; 11234 struct lpfc_mqe *mqe = &mboxq->u.mqe; 11235 struct lpfc_pc_sli4_params *sli4_params; 11236 uint32_t mbox_tmo; 11237 int length; 11238 bool exp_wqcq_pages = true; 11239 struct lpfc_sli4_parameters *mbx_sli4_parameters; 11240 11241 /* 11242 * By default, the driver assumes the SLI4 port requires RPI 11243 * header postings. The SLI4_PARAM response will correct this 11244 * assumption. 11245 */ 11246 phba->sli4_hba.rpi_hdrs_in_use = 1; 11247 11248 /* Read the port's SLI4 Config Parameters */ 11249 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 11250 sizeof(struct lpfc_sli4_cfg_mhdr)); 11251 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11252 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 11253 length, LPFC_SLI4_MBX_EMBED); 11254 if (!phba->sli4_hba.intr_enable) 11255 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11256 else { 11257 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11258 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11259 } 11260 if (unlikely(rc)) 11261 return rc; 11262 sli4_params = &phba->sli4_hba.pc_sli4_params; 11263 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 11264 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 11265 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 11266 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 11267 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 11268 mbx_sli4_parameters); 11269 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 11270 mbx_sli4_parameters); 11271 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 11272 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 11273 else 11274 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 11275 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 11276 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 11277 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 11278 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 11279 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 11280 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 11281 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 11282 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 11283 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 11284 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 11285 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 11286 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 11287 mbx_sli4_parameters); 11288 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 11289 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 11290 mbx_sli4_parameters); 11291 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 11292 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 11293 phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && 11294 bf_get(cfg_xib, mbx_sli4_parameters)); 11295 11296 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || 11297 !phba->nvme_support) { 11298 phba->nvme_support = 0; 11299 phba->nvmet_support = 0; 11300 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; 11301 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 11302 "6101 Disabling NVME support: " 11303 "Not supported by firmware: %d %d\n", 11304 bf_get(cfg_nvme, mbx_sli4_parameters), 11305 bf_get(cfg_xib, mbx_sli4_parameters)); 11306 11307 /* If firmware doesn't support NVME, just use SCSI support */ 11308 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 11309 return -ENODEV; 11310 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 11311 } 11312 11313 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 11314 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11315 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 11316 phba->cfg_enable_pbde = 0; 11317 11318 /* 11319 * To support Suppress Response feature we must satisfy 3 conditions. 11320 * lpfc_suppress_rsp module parameter must be set (default). 11321 * In SLI4-Parameters Descriptor: 11322 * Extended Inline Buffers (XIB) must be supported. 11323 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 11324 * (double negative). 11325 */ 11326 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 11327 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 11328 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 11329 else 11330 phba->cfg_suppress_rsp = 0; 11331 11332 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 11333 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 11334 11335 /* Make sure that sge_supp_len can be handled by the driver */ 11336 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11337 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11338 11339 /* 11340 * Check whether the adapter supports an embedded copy of the 11341 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 11342 * to use this option, 128-byte WQEs must be used. 11343 */ 11344 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 11345 phba->fcp_embed_io = 1; 11346 else 11347 phba->fcp_embed_io = 0; 11348 11349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 11350 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 11351 bf_get(cfg_xib, mbx_sli4_parameters), 11352 phba->cfg_enable_pbde, 11353 phba->fcp_embed_io, phba->nvme_support, 11354 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 11355 11356 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 11357 LPFC_SLI_INTF_IF_TYPE_2) && 11358 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 11359 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 11360 exp_wqcq_pages = false; 11361 11362 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 11363 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 11364 exp_wqcq_pages && 11365 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 11366 phba->enab_exp_wqcq_pages = 1; 11367 else 11368 phba->enab_exp_wqcq_pages = 0; 11369 /* 11370 * Check if the SLI port supports MDS Diagnostics 11371 */ 11372 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 11373 phba->mds_diags_support = 1; 11374 else 11375 phba->mds_diags_support = 0; 11376 11377 return 0; 11378 } 11379 11380 /** 11381 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 11382 * @pdev: pointer to PCI device 11383 * @pid: pointer to PCI device identifier 11384 * 11385 * This routine is to be called to attach a device with SLI-3 interface spec 11386 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11387 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11388 * information of the device and driver to see if the driver state that it can 11389 * support this kind of device. If the match is successful, the driver core 11390 * invokes this routine. If this routine determines it can claim the HBA, it 11391 * does all the initialization that it needs to do to handle the HBA properly. 11392 * 11393 * Return code 11394 * 0 - driver can claim the device 11395 * negative value - driver can not claim the device 11396 **/ 11397 static int 11398 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 11399 { 11400 struct lpfc_hba *phba; 11401 struct lpfc_vport *vport = NULL; 11402 struct Scsi_Host *shost = NULL; 11403 int error; 11404 uint32_t cfg_mode, intr_mode; 11405 11406 /* Allocate memory for HBA structure */ 11407 phba = lpfc_hba_alloc(pdev); 11408 if (!phba) 11409 return -ENOMEM; 11410 11411 /* Perform generic PCI device enabling operation */ 11412 error = lpfc_enable_pci_dev(phba); 11413 if (error) 11414 goto out_free_phba; 11415 11416 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 11417 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 11418 if (error) 11419 goto out_disable_pci_dev; 11420 11421 /* Set up SLI-3 specific device PCI memory space */ 11422 error = lpfc_sli_pci_mem_setup(phba); 11423 if (error) { 11424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11425 "1402 Failed to set up pci memory space.\n"); 11426 goto out_disable_pci_dev; 11427 } 11428 11429 /* Set up SLI-3 specific device driver resources */ 11430 error = lpfc_sli_driver_resource_setup(phba); 11431 if (error) { 11432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11433 "1404 Failed to set up driver resource.\n"); 11434 goto out_unset_pci_mem_s3; 11435 } 11436 11437 /* Initialize and populate the iocb list per host */ 11438 11439 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 11440 if (error) { 11441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11442 "1405 Failed to initialize iocb list.\n"); 11443 goto out_unset_driver_resource_s3; 11444 } 11445 11446 /* Set up common device driver resources */ 11447 error = lpfc_setup_driver_resource_phase2(phba); 11448 if (error) { 11449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11450 "1406 Failed to set up driver resource.\n"); 11451 goto out_free_iocb_list; 11452 } 11453 11454 /* Get the default values for Model Name and Description */ 11455 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11456 11457 /* Create SCSI host to the physical port */ 11458 error = lpfc_create_shost(phba); 11459 if (error) { 11460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11461 "1407 Failed to create scsi host.\n"); 11462 goto out_unset_driver_resource; 11463 } 11464 11465 /* Configure sysfs attributes */ 11466 vport = phba->pport; 11467 error = lpfc_alloc_sysfs_attr(vport); 11468 if (error) { 11469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11470 "1476 Failed to allocate sysfs attr\n"); 11471 goto out_destroy_shost; 11472 } 11473 11474 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11475 /* Now, trying to enable interrupt and bring up the device */ 11476 cfg_mode = phba->cfg_use_msi; 11477 while (true) { 11478 /* Put device to a known state before enabling interrupt */ 11479 lpfc_stop_port(phba); 11480 /* Configure and enable interrupt */ 11481 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 11482 if (intr_mode == LPFC_INTR_ERROR) { 11483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11484 "0431 Failed to enable interrupt.\n"); 11485 error = -ENODEV; 11486 goto out_free_sysfs_attr; 11487 } 11488 /* SLI-3 HBA setup */ 11489 if (lpfc_sli_hba_setup(phba)) { 11490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11491 "1477 Failed to set up hba\n"); 11492 error = -ENODEV; 11493 goto out_remove_device; 11494 } 11495 11496 /* Wait 50ms for the interrupts of previous mailbox commands */ 11497 msleep(50); 11498 /* Check active interrupts on message signaled interrupts */ 11499 if (intr_mode == 0 || 11500 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 11501 /* Log the current active interrupt mode */ 11502 phba->intr_mode = intr_mode; 11503 lpfc_log_intr_mode(phba, intr_mode); 11504 break; 11505 } else { 11506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11507 "0447 Configure interrupt mode (%d) " 11508 "failed active interrupt test.\n", 11509 intr_mode); 11510 /* Disable the current interrupt mode */ 11511 lpfc_sli_disable_intr(phba); 11512 /* Try next level of interrupt mode */ 11513 cfg_mode = --intr_mode; 11514 } 11515 } 11516 11517 /* Perform post initialization setup */ 11518 lpfc_post_init_setup(phba); 11519 11520 /* Check if there are static vports to be created. */ 11521 lpfc_create_static_vport(phba); 11522 11523 return 0; 11524 11525 out_remove_device: 11526 lpfc_unset_hba(phba); 11527 out_free_sysfs_attr: 11528 lpfc_free_sysfs_attr(vport); 11529 out_destroy_shost: 11530 lpfc_destroy_shost(phba); 11531 out_unset_driver_resource: 11532 lpfc_unset_driver_resource_phase2(phba); 11533 out_free_iocb_list: 11534 lpfc_free_iocb_list(phba); 11535 out_unset_driver_resource_s3: 11536 lpfc_sli_driver_resource_unset(phba); 11537 out_unset_pci_mem_s3: 11538 lpfc_sli_pci_mem_unset(phba); 11539 out_disable_pci_dev: 11540 lpfc_disable_pci_dev(phba); 11541 if (shost) 11542 scsi_host_put(shost); 11543 out_free_phba: 11544 lpfc_hba_free(phba); 11545 return error; 11546 } 11547 11548 /** 11549 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 11550 * @pdev: pointer to PCI device 11551 * 11552 * This routine is to be called to disattach a device with SLI-3 interface 11553 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11554 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11555 * device to be removed from the PCI subsystem properly. 11556 **/ 11557 static void 11558 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 11559 { 11560 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11561 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11562 struct lpfc_vport **vports; 11563 struct lpfc_hba *phba = vport->phba; 11564 int i; 11565 11566 spin_lock_irq(&phba->hbalock); 11567 vport->load_flag |= FC_UNLOADING; 11568 spin_unlock_irq(&phba->hbalock); 11569 11570 lpfc_free_sysfs_attr(vport); 11571 11572 /* Release all the vports against this physical port */ 11573 vports = lpfc_create_vport_work_array(phba); 11574 if (vports != NULL) 11575 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11576 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11577 continue; 11578 fc_vport_terminate(vports[i]->fc_vport); 11579 } 11580 lpfc_destroy_vport_work_array(phba, vports); 11581 11582 /* Remove FC host and then SCSI host with the physical port */ 11583 fc_remove_host(shost); 11584 scsi_remove_host(shost); 11585 11586 lpfc_cleanup(vport); 11587 11588 /* 11589 * Bring down the SLI Layer. This step disable all interrupts, 11590 * clears the rings, discards all mailbox commands, and resets 11591 * the HBA. 11592 */ 11593 11594 /* HBA interrupt will be disabled after this call */ 11595 lpfc_sli_hba_down(phba); 11596 /* Stop kthread signal shall trigger work_done one more time */ 11597 kthread_stop(phba->worker_thread); 11598 /* Final cleanup of txcmplq and reset the HBA */ 11599 lpfc_sli_brdrestart(phba); 11600 11601 kfree(phba->vpi_bmask); 11602 kfree(phba->vpi_ids); 11603 11604 lpfc_stop_hba_timers(phba); 11605 spin_lock_irq(&phba->port_list_lock); 11606 list_del_init(&vport->listentry); 11607 spin_unlock_irq(&phba->port_list_lock); 11608 11609 lpfc_debugfs_terminate(vport); 11610 11611 /* Disable SR-IOV if enabled */ 11612 if (phba->cfg_sriov_nr_virtfn) 11613 pci_disable_sriov(pdev); 11614 11615 /* Disable interrupt */ 11616 lpfc_sli_disable_intr(phba); 11617 11618 scsi_host_put(shost); 11619 11620 /* 11621 * Call scsi_free before mem_free since scsi bufs are released to their 11622 * corresponding pools here. 11623 */ 11624 lpfc_scsi_free(phba); 11625 lpfc_free_iocb_list(phba); 11626 11627 lpfc_mem_free_all(phba); 11628 11629 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 11630 phba->hbqslimp.virt, phba->hbqslimp.phys); 11631 11632 /* Free resources associated with SLI2 interface */ 11633 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 11634 phba->slim2p.virt, phba->slim2p.phys); 11635 11636 /* unmap adapter SLIM and Control Registers */ 11637 iounmap(phba->ctrl_regs_memmap_p); 11638 iounmap(phba->slim_memmap_p); 11639 11640 lpfc_hba_free(phba); 11641 11642 pci_release_mem_regions(pdev); 11643 pci_disable_device(pdev); 11644 } 11645 11646 /** 11647 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 11648 * @pdev: pointer to PCI device 11649 * @msg: power management message 11650 * 11651 * This routine is to be called from the kernel's PCI subsystem to support 11652 * system Power Management (PM) to device with SLI-3 interface spec. When 11653 * PM invokes this method, it quiesces the device by stopping the driver's 11654 * worker thread for the device, turning off device's interrupt and DMA, 11655 * and bring the device offline. Note that as the driver implements the 11656 * minimum PM requirements to a power-aware driver's PM support for the 11657 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 11658 * to the suspend() method call will be treated as SUSPEND and the driver will 11659 * fully reinitialize its device during resume() method call, the driver will 11660 * set device to PCI_D3hot state in PCI config space instead of setting it 11661 * according to the @msg provided by the PM. 11662 * 11663 * Return code 11664 * 0 - driver suspended the device 11665 * Error otherwise 11666 **/ 11667 static int 11668 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 11669 { 11670 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11671 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11672 11673 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11674 "0473 PCI device Power Management suspend.\n"); 11675 11676 /* Bring down the device */ 11677 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11678 lpfc_offline(phba); 11679 kthread_stop(phba->worker_thread); 11680 11681 /* Disable interrupt from device */ 11682 lpfc_sli_disable_intr(phba); 11683 11684 /* Save device state to PCI config space */ 11685 pci_save_state(pdev); 11686 pci_set_power_state(pdev, PCI_D3hot); 11687 11688 return 0; 11689 } 11690 11691 /** 11692 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 11693 * @pdev: pointer to PCI device 11694 * 11695 * This routine is to be called from the kernel's PCI subsystem to support 11696 * system Power Management (PM) to device with SLI-3 interface spec. When PM 11697 * invokes this method, it restores the device's PCI config space state and 11698 * fully reinitializes the device and brings it online. Note that as the 11699 * driver implements the minimum PM requirements to a power-aware driver's 11700 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 11701 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 11702 * driver will fully reinitialize its device during resume() method call, 11703 * the device will be set to PCI_D0 directly in PCI config space before 11704 * restoring the state. 11705 * 11706 * Return code 11707 * 0 - driver suspended the device 11708 * Error otherwise 11709 **/ 11710 static int 11711 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 11712 { 11713 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11714 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11715 uint32_t intr_mode; 11716 int error; 11717 11718 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11719 "0452 PCI device Power Management resume.\n"); 11720 11721 /* Restore device state from PCI config space */ 11722 pci_set_power_state(pdev, PCI_D0); 11723 pci_restore_state(pdev); 11724 11725 /* 11726 * As the new kernel behavior of pci_restore_state() API call clears 11727 * device saved_state flag, need to save the restored state again. 11728 */ 11729 pci_save_state(pdev); 11730 11731 if (pdev->is_busmaster) 11732 pci_set_master(pdev); 11733 11734 /* Startup the kernel thread for this host adapter. */ 11735 phba->worker_thread = kthread_run(lpfc_do_work, phba, 11736 "lpfc_worker_%d", phba->brd_no); 11737 if (IS_ERR(phba->worker_thread)) { 11738 error = PTR_ERR(phba->worker_thread); 11739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11740 "0434 PM resume failed to start worker " 11741 "thread: error=x%x.\n", error); 11742 return error; 11743 } 11744 11745 /* Configure and enable interrupt */ 11746 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11747 if (intr_mode == LPFC_INTR_ERROR) { 11748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11749 "0430 PM resume Failed to enable interrupt\n"); 11750 return -EIO; 11751 } else 11752 phba->intr_mode = intr_mode; 11753 11754 /* Restart HBA and bring it online */ 11755 lpfc_sli_brdrestart(phba); 11756 lpfc_online(phba); 11757 11758 /* Log the current active interrupt mode */ 11759 lpfc_log_intr_mode(phba, phba->intr_mode); 11760 11761 return 0; 11762 } 11763 11764 /** 11765 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 11766 * @phba: pointer to lpfc hba data structure. 11767 * 11768 * This routine is called to prepare the SLI3 device for PCI slot recover. It 11769 * aborts all the outstanding SCSI I/Os to the pci device. 11770 **/ 11771 static void 11772 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 11773 { 11774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11775 "2723 PCI channel I/O abort preparing for recovery\n"); 11776 11777 /* 11778 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 11779 * and let the SCSI mid-layer to retry them to recover. 11780 */ 11781 lpfc_sli_abort_fcp_rings(phba); 11782 } 11783 11784 /** 11785 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 11786 * @phba: pointer to lpfc hba data structure. 11787 * 11788 * This routine is called to prepare the SLI3 device for PCI slot reset. It 11789 * disables the device interrupt and pci device, and aborts the internal FCP 11790 * pending I/Os. 11791 **/ 11792 static void 11793 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 11794 { 11795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11796 "2710 PCI channel disable preparing for reset\n"); 11797 11798 /* Block any management I/Os to the device */ 11799 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 11800 11801 /* Block all SCSI devices' I/Os on the host */ 11802 lpfc_scsi_dev_block(phba); 11803 11804 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11805 lpfc_sli_flush_fcp_rings(phba); 11806 11807 /* stop all timers */ 11808 lpfc_stop_hba_timers(phba); 11809 11810 /* Disable interrupt and pci device */ 11811 lpfc_sli_disable_intr(phba); 11812 pci_disable_device(phba->pcidev); 11813 } 11814 11815 /** 11816 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 11817 * @phba: pointer to lpfc hba data structure. 11818 * 11819 * This routine is called to prepare the SLI3 device for PCI slot permanently 11820 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 11821 * pending I/Os. 11822 **/ 11823 static void 11824 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 11825 { 11826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11827 "2711 PCI channel permanent disable for failure\n"); 11828 /* Block all SCSI devices' I/Os on the host */ 11829 lpfc_scsi_dev_block(phba); 11830 11831 /* stop all timers */ 11832 lpfc_stop_hba_timers(phba); 11833 11834 /* Clean up all driver's outstanding SCSI I/Os */ 11835 lpfc_sli_flush_fcp_rings(phba); 11836 } 11837 11838 /** 11839 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 11840 * @pdev: pointer to PCI device. 11841 * @state: the current PCI connection state. 11842 * 11843 * This routine is called from the PCI subsystem for I/O error handling to 11844 * device with SLI-3 interface spec. This function is called by the PCI 11845 * subsystem after a PCI bus error affecting this device has been detected. 11846 * When this function is invoked, it will need to stop all the I/Os and 11847 * interrupt(s) to the device. Once that is done, it will return 11848 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 11849 * as desired. 11850 * 11851 * Return codes 11852 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 11853 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11854 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11855 **/ 11856 static pci_ers_result_t 11857 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 11858 { 11859 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11860 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11861 11862 switch (state) { 11863 case pci_channel_io_normal: 11864 /* Non-fatal error, prepare for recovery */ 11865 lpfc_sli_prep_dev_for_recover(phba); 11866 return PCI_ERS_RESULT_CAN_RECOVER; 11867 case pci_channel_io_frozen: 11868 /* Fatal error, prepare for slot reset */ 11869 lpfc_sli_prep_dev_for_reset(phba); 11870 return PCI_ERS_RESULT_NEED_RESET; 11871 case pci_channel_io_perm_failure: 11872 /* Permanent failure, prepare for device down */ 11873 lpfc_sli_prep_dev_for_perm_failure(phba); 11874 return PCI_ERS_RESULT_DISCONNECT; 11875 default: 11876 /* Unknown state, prepare and request slot reset */ 11877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11878 "0472 Unknown PCI error state: x%x\n", state); 11879 lpfc_sli_prep_dev_for_reset(phba); 11880 return PCI_ERS_RESULT_NEED_RESET; 11881 } 11882 } 11883 11884 /** 11885 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 11886 * @pdev: pointer to PCI device. 11887 * 11888 * This routine is called from the PCI subsystem for error handling to 11889 * device with SLI-3 interface spec. This is called after PCI bus has been 11890 * reset to restart the PCI card from scratch, as if from a cold-boot. 11891 * During the PCI subsystem error recovery, after driver returns 11892 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 11893 * recovery and then call this routine before calling the .resume method 11894 * to recover the device. This function will initialize the HBA device, 11895 * enable the interrupt, but it will just put the HBA to offline state 11896 * without passing any I/O traffic. 11897 * 11898 * Return codes 11899 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11900 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11901 */ 11902 static pci_ers_result_t 11903 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 11904 { 11905 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11906 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11907 struct lpfc_sli *psli = &phba->sli; 11908 uint32_t intr_mode; 11909 11910 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 11911 if (pci_enable_device_mem(pdev)) { 11912 printk(KERN_ERR "lpfc: Cannot re-enable " 11913 "PCI device after reset.\n"); 11914 return PCI_ERS_RESULT_DISCONNECT; 11915 } 11916 11917 pci_restore_state(pdev); 11918 11919 /* 11920 * As the new kernel behavior of pci_restore_state() API call clears 11921 * device saved_state flag, need to save the restored state again. 11922 */ 11923 pci_save_state(pdev); 11924 11925 if (pdev->is_busmaster) 11926 pci_set_master(pdev); 11927 11928 spin_lock_irq(&phba->hbalock); 11929 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 11930 spin_unlock_irq(&phba->hbalock); 11931 11932 /* Configure and enable interrupt */ 11933 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 11934 if (intr_mode == LPFC_INTR_ERROR) { 11935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11936 "0427 Cannot re-enable interrupt after " 11937 "slot reset.\n"); 11938 return PCI_ERS_RESULT_DISCONNECT; 11939 } else 11940 phba->intr_mode = intr_mode; 11941 11942 /* Take device offline, it will perform cleanup */ 11943 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11944 lpfc_offline(phba); 11945 lpfc_sli_brdrestart(phba); 11946 11947 /* Log the current active interrupt mode */ 11948 lpfc_log_intr_mode(phba, phba->intr_mode); 11949 11950 return PCI_ERS_RESULT_RECOVERED; 11951 } 11952 11953 /** 11954 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 11955 * @pdev: pointer to PCI device 11956 * 11957 * This routine is called from the PCI subsystem for error handling to device 11958 * with SLI-3 interface spec. It is called when kernel error recovery tells 11959 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 11960 * error recovery. After this call, traffic can start to flow from this device 11961 * again. 11962 */ 11963 static void 11964 lpfc_io_resume_s3(struct pci_dev *pdev) 11965 { 11966 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11967 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11968 11969 /* Bring device online, it will be no-op for non-fatal error resume */ 11970 lpfc_online(phba); 11971 } 11972 11973 /** 11974 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 11975 * @phba: pointer to lpfc hba data structure. 11976 * 11977 * returns the number of ELS/CT IOCBs to reserve 11978 **/ 11979 int 11980 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 11981 { 11982 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 11983 11984 if (phba->sli_rev == LPFC_SLI_REV4) { 11985 if (max_xri <= 100) 11986 return 10; 11987 else if (max_xri <= 256) 11988 return 25; 11989 else if (max_xri <= 512) 11990 return 50; 11991 else if (max_xri <= 1024) 11992 return 100; 11993 else if (max_xri <= 1536) 11994 return 150; 11995 else if (max_xri <= 2048) 11996 return 200; 11997 else 11998 return 250; 11999 } else 12000 return 0; 12001 } 12002 12003 /** 12004 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12005 * @phba: pointer to lpfc hba data structure. 12006 * 12007 * returns the number of ELS/CT + NVMET IOCBs to reserve 12008 **/ 12009 int 12010 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12011 { 12012 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12013 12014 if (phba->nvmet_support) 12015 max_xri += LPFC_NVMET_BUF_POST; 12016 return max_xri; 12017 } 12018 12019 12020 static void 12021 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12022 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12023 const struct firmware *fw) 12024 { 12025 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || 12026 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12027 magic_number != MAGIC_NUMER_G6) || 12028 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12029 magic_number != MAGIC_NUMER_G7)) 12030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12031 "3030 This firmware version is not supported on " 12032 "this HBA model. Device:%x Magic:%x Type:%x " 12033 "ID:%x Size %d %zd\n", 12034 phba->pcidev->device, magic_number, ftype, fid, 12035 fsize, fw->size); 12036 else 12037 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12038 "3022 FW Download failed. Device:%x Magic:%x Type:%x " 12039 "ID:%x Size %d %zd\n", 12040 phba->pcidev->device, magic_number, ftype, fid, 12041 fsize, fw->size); 12042 } 12043 12044 12045 /** 12046 * lpfc_write_firmware - attempt to write a firmware image to the port 12047 * @fw: pointer to firmware image returned from request_firmware. 12048 * @phba: pointer to lpfc hba data structure. 12049 * 12050 **/ 12051 static void 12052 lpfc_write_firmware(const struct firmware *fw, void *context) 12053 { 12054 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12055 char fwrev[FW_REV_STR_SIZE]; 12056 struct lpfc_grp_hdr *image; 12057 struct list_head dma_buffer_list; 12058 int i, rc = 0; 12059 struct lpfc_dmabuf *dmabuf, *next; 12060 uint32_t offset = 0, temp_offset = 0; 12061 uint32_t magic_number, ftype, fid, fsize; 12062 12063 /* It can be null in no-wait mode, sanity check */ 12064 if (!fw) { 12065 rc = -ENXIO; 12066 goto out; 12067 } 12068 image = (struct lpfc_grp_hdr *)fw->data; 12069 12070 magic_number = be32_to_cpu(image->magic_number); 12071 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12072 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12073 fsize = be32_to_cpu(image->size); 12074 12075 INIT_LIST_HEAD(&dma_buffer_list); 12076 lpfc_decode_firmware_rev(phba, fwrev, 1); 12077 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12079 "3023 Updating Firmware, Current Version:%s " 12080 "New Version:%s\n", 12081 fwrev, image->revision); 12082 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12083 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12084 GFP_KERNEL); 12085 if (!dmabuf) { 12086 rc = -ENOMEM; 12087 goto release_out; 12088 } 12089 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12090 SLI4_PAGE_SIZE, 12091 &dmabuf->phys, 12092 GFP_KERNEL); 12093 if (!dmabuf->virt) { 12094 kfree(dmabuf); 12095 rc = -ENOMEM; 12096 goto release_out; 12097 } 12098 list_add_tail(&dmabuf->list, &dma_buffer_list); 12099 } 12100 while (offset < fw->size) { 12101 temp_offset = offset; 12102 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12103 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12104 memcpy(dmabuf->virt, 12105 fw->data + temp_offset, 12106 fw->size - temp_offset); 12107 temp_offset = fw->size; 12108 break; 12109 } 12110 memcpy(dmabuf->virt, fw->data + temp_offset, 12111 SLI4_PAGE_SIZE); 12112 temp_offset += SLI4_PAGE_SIZE; 12113 } 12114 rc = lpfc_wr_object(phba, &dma_buffer_list, 12115 (fw->size - offset), &offset); 12116 if (rc) { 12117 lpfc_log_write_firmware_error(phba, offset, 12118 magic_number, ftype, fid, fsize, fw); 12119 goto release_out; 12120 } 12121 } 12122 rc = offset; 12123 } else 12124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12125 "3029 Skipped Firmware update, Current " 12126 "Version:%s New Version:%s\n", 12127 fwrev, image->revision); 12128 12129 release_out: 12130 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12131 list_del(&dmabuf->list); 12132 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12133 dmabuf->virt, dmabuf->phys); 12134 kfree(dmabuf); 12135 } 12136 release_firmware(fw); 12137 out: 12138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12139 "3024 Firmware update done: %d.\n", rc); 12140 return; 12141 } 12142 12143 /** 12144 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12145 * @phba: pointer to lpfc hba data structure. 12146 * 12147 * This routine is called to perform Linux generic firmware upgrade on device 12148 * that supports such feature. 12149 **/ 12150 int 12151 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 12152 { 12153 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 12154 int ret; 12155 const struct firmware *fw; 12156 12157 /* Only supported on SLI4 interface type 2 for now */ 12158 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 12159 LPFC_SLI_INTF_IF_TYPE_2) 12160 return -EPERM; 12161 12162 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 12163 12164 if (fw_upgrade == INT_FW_UPGRADE) { 12165 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 12166 file_name, &phba->pcidev->dev, 12167 GFP_KERNEL, (void *)phba, 12168 lpfc_write_firmware); 12169 } else if (fw_upgrade == RUN_FW_UPGRADE) { 12170 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 12171 if (!ret) 12172 lpfc_write_firmware(fw, (void *)phba); 12173 } else { 12174 ret = -EINVAL; 12175 } 12176 12177 return ret; 12178 } 12179 12180 /** 12181 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 12182 * @pdev: pointer to PCI device 12183 * @pid: pointer to PCI device identifier 12184 * 12185 * This routine is called from the kernel's PCI subsystem to device with 12186 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12187 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12188 * information of the device and driver to see if the driver state that it 12189 * can support this kind of device. If the match is successful, the driver 12190 * core invokes this routine. If this routine determines it can claim the HBA, 12191 * it does all the initialization that it needs to do to handle the HBA 12192 * properly. 12193 * 12194 * Return code 12195 * 0 - driver can claim the device 12196 * negative value - driver can not claim the device 12197 **/ 12198 static int 12199 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 12200 { 12201 struct lpfc_hba *phba; 12202 struct lpfc_vport *vport = NULL; 12203 struct Scsi_Host *shost = NULL; 12204 int error; 12205 uint32_t cfg_mode, intr_mode; 12206 12207 /* Allocate memory for HBA structure */ 12208 phba = lpfc_hba_alloc(pdev); 12209 if (!phba) 12210 return -ENOMEM; 12211 12212 /* Perform generic PCI device enabling operation */ 12213 error = lpfc_enable_pci_dev(phba); 12214 if (error) 12215 goto out_free_phba; 12216 12217 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 12218 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 12219 if (error) 12220 goto out_disable_pci_dev; 12221 12222 /* Set up SLI-4 specific device PCI memory space */ 12223 error = lpfc_sli4_pci_mem_setup(phba); 12224 if (error) { 12225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12226 "1410 Failed to set up pci memory space.\n"); 12227 goto out_disable_pci_dev; 12228 } 12229 12230 /* Set up SLI-4 Specific device driver resources */ 12231 error = lpfc_sli4_driver_resource_setup(phba); 12232 if (error) { 12233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12234 "1412 Failed to set up driver resource.\n"); 12235 goto out_unset_pci_mem_s4; 12236 } 12237 12238 INIT_LIST_HEAD(&phba->active_rrq_list); 12239 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 12240 12241 /* Set up common device driver resources */ 12242 error = lpfc_setup_driver_resource_phase2(phba); 12243 if (error) { 12244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12245 "1414 Failed to set up driver resource.\n"); 12246 goto out_unset_driver_resource_s4; 12247 } 12248 12249 /* Get the default values for Model Name and Description */ 12250 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12251 12252 /* Now, trying to enable interrupt and bring up the device */ 12253 cfg_mode = phba->cfg_use_msi; 12254 12255 /* Put device to a known state before enabling interrupt */ 12256 phba->pport = NULL; 12257 lpfc_stop_port(phba); 12258 12259 /* Configure and enable interrupt */ 12260 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 12261 if (intr_mode == LPFC_INTR_ERROR) { 12262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12263 "0426 Failed to enable interrupt.\n"); 12264 error = -ENODEV; 12265 goto out_unset_driver_resource; 12266 } 12267 /* Default to single EQ for non-MSI-X */ 12268 if (phba->intr_type != MSIX) { 12269 phba->cfg_irq_chann = 1; 12270 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12271 if (phba->nvmet_support) 12272 phba->cfg_nvmet_mrq = 1; 12273 } 12274 } 12275 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 12276 12277 /* Create SCSI host to the physical port */ 12278 error = lpfc_create_shost(phba); 12279 if (error) { 12280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12281 "1415 Failed to create scsi host.\n"); 12282 goto out_disable_intr; 12283 } 12284 vport = phba->pport; 12285 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12286 12287 /* Configure sysfs attributes */ 12288 error = lpfc_alloc_sysfs_attr(vport); 12289 if (error) { 12290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12291 "1416 Failed to allocate sysfs attr\n"); 12292 goto out_destroy_shost; 12293 } 12294 12295 /* Set up SLI-4 HBA */ 12296 if (lpfc_sli4_hba_setup(phba)) { 12297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12298 "1421 Failed to set up hba\n"); 12299 error = -ENODEV; 12300 goto out_free_sysfs_attr; 12301 } 12302 12303 /* Log the current active interrupt mode */ 12304 phba->intr_mode = intr_mode; 12305 lpfc_log_intr_mode(phba, intr_mode); 12306 12307 /* Perform post initialization setup */ 12308 lpfc_post_init_setup(phba); 12309 12310 /* NVME support in FW earlier in the driver load corrects the 12311 * FC4 type making a check for nvme_support unnecessary. 12312 */ 12313 if (phba->nvmet_support == 0) { 12314 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12315 /* Create NVME binding with nvme_fc_transport. This 12316 * ensures the vport is initialized. If the localport 12317 * create fails, it should not unload the driver to 12318 * support field issues. 12319 */ 12320 error = lpfc_nvme_create_localport(vport); 12321 if (error) { 12322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12323 "6004 NVME registration " 12324 "failed, error x%x\n", 12325 error); 12326 } 12327 } 12328 } 12329 12330 /* check for firmware upgrade or downgrade */ 12331 if (phba->cfg_request_firmware_upgrade) 12332 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 12333 12334 /* Check if there are static vports to be created. */ 12335 lpfc_create_static_vport(phba); 12336 12337 /* Enable RAS FW log support */ 12338 lpfc_sli4_ras_setup(phba); 12339 12340 return 0; 12341 12342 out_free_sysfs_attr: 12343 lpfc_free_sysfs_attr(vport); 12344 out_destroy_shost: 12345 lpfc_destroy_shost(phba); 12346 out_disable_intr: 12347 lpfc_sli4_disable_intr(phba); 12348 out_unset_driver_resource: 12349 lpfc_unset_driver_resource_phase2(phba); 12350 out_unset_driver_resource_s4: 12351 lpfc_sli4_driver_resource_unset(phba); 12352 out_unset_pci_mem_s4: 12353 lpfc_sli4_pci_mem_unset(phba); 12354 out_disable_pci_dev: 12355 lpfc_disable_pci_dev(phba); 12356 if (shost) 12357 scsi_host_put(shost); 12358 out_free_phba: 12359 lpfc_hba_free(phba); 12360 return error; 12361 } 12362 12363 /** 12364 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 12365 * @pdev: pointer to PCI device 12366 * 12367 * This routine is called from the kernel's PCI subsystem to device with 12368 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12369 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12370 * device to be removed from the PCI subsystem properly. 12371 **/ 12372 static void 12373 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 12374 { 12375 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12376 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12377 struct lpfc_vport **vports; 12378 struct lpfc_hba *phba = vport->phba; 12379 int i; 12380 12381 /* Mark the device unloading flag */ 12382 spin_lock_irq(&phba->hbalock); 12383 vport->load_flag |= FC_UNLOADING; 12384 spin_unlock_irq(&phba->hbalock); 12385 12386 /* Free the HBA sysfs attributes */ 12387 lpfc_free_sysfs_attr(vport); 12388 12389 /* Release all the vports against this physical port */ 12390 vports = lpfc_create_vport_work_array(phba); 12391 if (vports != NULL) 12392 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12393 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12394 continue; 12395 fc_vport_terminate(vports[i]->fc_vport); 12396 } 12397 lpfc_destroy_vport_work_array(phba, vports); 12398 12399 /* Remove FC host and then SCSI host with the physical port */ 12400 fc_remove_host(shost); 12401 scsi_remove_host(shost); 12402 12403 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 12404 * localports are destroyed after to cleanup all transport memory. 12405 */ 12406 lpfc_cleanup(vport); 12407 lpfc_nvmet_destroy_targetport(phba); 12408 lpfc_nvme_destroy_localport(vport); 12409 12410 /* De-allocate multi-XRI pools */ 12411 if (phba->cfg_xri_rebalancing) 12412 lpfc_destroy_multixri_pools(phba); 12413 12414 /* 12415 * Bring down the SLI Layer. This step disables all interrupts, 12416 * clears the rings, discards all mailbox commands, and resets 12417 * the HBA FCoE function. 12418 */ 12419 lpfc_debugfs_terminate(vport); 12420 12421 lpfc_stop_hba_timers(phba); 12422 spin_lock_irq(&phba->port_list_lock); 12423 list_del_init(&vport->listentry); 12424 spin_unlock_irq(&phba->port_list_lock); 12425 12426 /* Perform scsi free before driver resource_unset since scsi 12427 * buffers are released to their corresponding pools here. 12428 */ 12429 lpfc_io_free(phba); 12430 lpfc_free_iocb_list(phba); 12431 lpfc_sli4_hba_unset(phba); 12432 12433 lpfc_unset_driver_resource_phase2(phba); 12434 lpfc_sli4_driver_resource_unset(phba); 12435 12436 /* Unmap adapter Control and Doorbell registers */ 12437 lpfc_sli4_pci_mem_unset(phba); 12438 12439 /* Release PCI resources and disable device's PCI function */ 12440 scsi_host_put(shost); 12441 lpfc_disable_pci_dev(phba); 12442 12443 /* Finally, free the driver's device data structure */ 12444 lpfc_hba_free(phba); 12445 12446 return; 12447 } 12448 12449 /** 12450 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 12451 * @pdev: pointer to PCI device 12452 * @msg: power management message 12453 * 12454 * This routine is called from the kernel's PCI subsystem to support system 12455 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 12456 * this method, it quiesces the device by stopping the driver's worker 12457 * thread for the device, turning off device's interrupt and DMA, and bring 12458 * the device offline. Note that as the driver implements the minimum PM 12459 * requirements to a power-aware driver's PM support for suspend/resume -- all 12460 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 12461 * method call will be treated as SUSPEND and the driver will fully 12462 * reinitialize its device during resume() method call, the driver will set 12463 * device to PCI_D3hot state in PCI config space instead of setting it 12464 * according to the @msg provided by the PM. 12465 * 12466 * Return code 12467 * 0 - driver suspended the device 12468 * Error otherwise 12469 **/ 12470 static int 12471 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 12472 { 12473 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12474 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12475 12476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12477 "2843 PCI device Power Management suspend.\n"); 12478 12479 /* Bring down the device */ 12480 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12481 lpfc_offline(phba); 12482 kthread_stop(phba->worker_thread); 12483 12484 /* Disable interrupt from device */ 12485 lpfc_sli4_disable_intr(phba); 12486 lpfc_sli4_queue_destroy(phba); 12487 12488 /* Save device state to PCI config space */ 12489 pci_save_state(pdev); 12490 pci_set_power_state(pdev, PCI_D3hot); 12491 12492 return 0; 12493 } 12494 12495 /** 12496 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 12497 * @pdev: pointer to PCI device 12498 * 12499 * This routine is called from the kernel's PCI subsystem to support system 12500 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 12501 * this method, it restores the device's PCI config space state and fully 12502 * reinitializes the device and brings it online. Note that as the driver 12503 * implements the minimum PM requirements to a power-aware driver's PM for 12504 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12505 * to the suspend() method call will be treated as SUSPEND and the driver 12506 * will fully reinitialize its device during resume() method call, the device 12507 * will be set to PCI_D0 directly in PCI config space before restoring the 12508 * state. 12509 * 12510 * Return code 12511 * 0 - driver suspended the device 12512 * Error otherwise 12513 **/ 12514 static int 12515 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 12516 { 12517 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12518 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12519 uint32_t intr_mode; 12520 int error; 12521 12522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12523 "0292 PCI device Power Management resume.\n"); 12524 12525 /* Restore device state from PCI config space */ 12526 pci_set_power_state(pdev, PCI_D0); 12527 pci_restore_state(pdev); 12528 12529 /* 12530 * As the new kernel behavior of pci_restore_state() API call clears 12531 * device saved_state flag, need to save the restored state again. 12532 */ 12533 pci_save_state(pdev); 12534 12535 if (pdev->is_busmaster) 12536 pci_set_master(pdev); 12537 12538 /* Startup the kernel thread for this host adapter. */ 12539 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12540 "lpfc_worker_%d", phba->brd_no); 12541 if (IS_ERR(phba->worker_thread)) { 12542 error = PTR_ERR(phba->worker_thread); 12543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12544 "0293 PM resume failed to start worker " 12545 "thread: error=x%x.\n", error); 12546 return error; 12547 } 12548 12549 /* Configure and enable interrupt */ 12550 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12551 if (intr_mode == LPFC_INTR_ERROR) { 12552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12553 "0294 PM resume Failed to enable interrupt\n"); 12554 return -EIO; 12555 } else 12556 phba->intr_mode = intr_mode; 12557 12558 /* Restart HBA and bring it online */ 12559 lpfc_sli_brdrestart(phba); 12560 lpfc_online(phba); 12561 12562 /* Log the current active interrupt mode */ 12563 lpfc_log_intr_mode(phba, phba->intr_mode); 12564 12565 return 0; 12566 } 12567 12568 /** 12569 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 12570 * @phba: pointer to lpfc hba data structure. 12571 * 12572 * This routine is called to prepare the SLI4 device for PCI slot recover. It 12573 * aborts all the outstanding SCSI I/Os to the pci device. 12574 **/ 12575 static void 12576 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 12577 { 12578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12579 "2828 PCI channel I/O abort preparing for recovery\n"); 12580 /* 12581 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12582 * and let the SCSI mid-layer to retry them to recover. 12583 */ 12584 lpfc_sli_abort_fcp_rings(phba); 12585 } 12586 12587 /** 12588 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 12589 * @phba: pointer to lpfc hba data structure. 12590 * 12591 * This routine is called to prepare the SLI4 device for PCI slot reset. It 12592 * disables the device interrupt and pci device, and aborts the internal FCP 12593 * pending I/Os. 12594 **/ 12595 static void 12596 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 12597 { 12598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12599 "2826 PCI channel disable preparing for reset\n"); 12600 12601 /* Block any management I/Os to the device */ 12602 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 12603 12604 /* Block all SCSI devices' I/Os on the host */ 12605 lpfc_scsi_dev_block(phba); 12606 12607 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12608 lpfc_sli_flush_fcp_rings(phba); 12609 12610 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12611 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12612 lpfc_sli_flush_nvme_rings(phba); 12613 12614 /* stop all timers */ 12615 lpfc_stop_hba_timers(phba); 12616 12617 /* Disable interrupt and pci device */ 12618 lpfc_sli4_disable_intr(phba); 12619 lpfc_sli4_queue_destroy(phba); 12620 pci_disable_device(phba->pcidev); 12621 } 12622 12623 /** 12624 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 12625 * @phba: pointer to lpfc hba data structure. 12626 * 12627 * This routine is called to prepare the SLI4 device for PCI slot permanently 12628 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12629 * pending I/Os. 12630 **/ 12631 static void 12632 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12633 { 12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12635 "2827 PCI channel permanent disable for failure\n"); 12636 12637 /* Block all SCSI devices' I/Os on the host */ 12638 lpfc_scsi_dev_block(phba); 12639 12640 /* stop all timers */ 12641 lpfc_stop_hba_timers(phba); 12642 12643 /* Clean up all driver's outstanding SCSI I/Os */ 12644 lpfc_sli_flush_fcp_rings(phba); 12645 12646 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12647 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12648 lpfc_sli_flush_nvme_rings(phba); 12649 } 12650 12651 /** 12652 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 12653 * @pdev: pointer to PCI device. 12654 * @state: the current PCI connection state. 12655 * 12656 * This routine is called from the PCI subsystem for error handling to device 12657 * with SLI-4 interface spec. This function is called by the PCI subsystem 12658 * after a PCI bus error affecting this device has been detected. When this 12659 * function is invoked, it will need to stop all the I/Os and interrupt(s) 12660 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 12661 * for the PCI subsystem to perform proper recovery as desired. 12662 * 12663 * Return codes 12664 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12665 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12666 **/ 12667 static pci_ers_result_t 12668 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 12669 { 12670 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12671 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12672 12673 switch (state) { 12674 case pci_channel_io_normal: 12675 /* Non-fatal error, prepare for recovery */ 12676 lpfc_sli4_prep_dev_for_recover(phba); 12677 return PCI_ERS_RESULT_CAN_RECOVER; 12678 case pci_channel_io_frozen: 12679 /* Fatal error, prepare for slot reset */ 12680 lpfc_sli4_prep_dev_for_reset(phba); 12681 return PCI_ERS_RESULT_NEED_RESET; 12682 case pci_channel_io_perm_failure: 12683 /* Permanent failure, prepare for device down */ 12684 lpfc_sli4_prep_dev_for_perm_failure(phba); 12685 return PCI_ERS_RESULT_DISCONNECT; 12686 default: 12687 /* Unknown state, prepare and request slot reset */ 12688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12689 "2825 Unknown PCI error state: x%x\n", state); 12690 lpfc_sli4_prep_dev_for_reset(phba); 12691 return PCI_ERS_RESULT_NEED_RESET; 12692 } 12693 } 12694 12695 /** 12696 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 12697 * @pdev: pointer to PCI device. 12698 * 12699 * This routine is called from the PCI subsystem for error handling to device 12700 * with SLI-4 interface spec. It is called after PCI bus has been reset to 12701 * restart the PCI card from scratch, as if from a cold-boot. During the 12702 * PCI subsystem error recovery, after the driver returns 12703 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12704 * recovery and then call this routine before calling the .resume method to 12705 * recover the device. This function will initialize the HBA device, enable 12706 * the interrupt, but it will just put the HBA to offline state without 12707 * passing any I/O traffic. 12708 * 12709 * Return codes 12710 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12711 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12712 */ 12713 static pci_ers_result_t 12714 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 12715 { 12716 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12717 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12718 struct lpfc_sli *psli = &phba->sli; 12719 uint32_t intr_mode; 12720 12721 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12722 if (pci_enable_device_mem(pdev)) { 12723 printk(KERN_ERR "lpfc: Cannot re-enable " 12724 "PCI device after reset.\n"); 12725 return PCI_ERS_RESULT_DISCONNECT; 12726 } 12727 12728 pci_restore_state(pdev); 12729 12730 /* 12731 * As the new kernel behavior of pci_restore_state() API call clears 12732 * device saved_state flag, need to save the restored state again. 12733 */ 12734 pci_save_state(pdev); 12735 12736 if (pdev->is_busmaster) 12737 pci_set_master(pdev); 12738 12739 spin_lock_irq(&phba->hbalock); 12740 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12741 spin_unlock_irq(&phba->hbalock); 12742 12743 /* Configure and enable interrupt */ 12744 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12745 if (intr_mode == LPFC_INTR_ERROR) { 12746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12747 "2824 Cannot re-enable interrupt after " 12748 "slot reset.\n"); 12749 return PCI_ERS_RESULT_DISCONNECT; 12750 } else 12751 phba->intr_mode = intr_mode; 12752 12753 /* Log the current active interrupt mode */ 12754 lpfc_log_intr_mode(phba, phba->intr_mode); 12755 12756 return PCI_ERS_RESULT_RECOVERED; 12757 } 12758 12759 /** 12760 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 12761 * @pdev: pointer to PCI device 12762 * 12763 * This routine is called from the PCI subsystem for error handling to device 12764 * with SLI-4 interface spec. It is called when kernel error recovery tells 12765 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12766 * error recovery. After this call, traffic can start to flow from this device 12767 * again. 12768 **/ 12769 static void 12770 lpfc_io_resume_s4(struct pci_dev *pdev) 12771 { 12772 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12773 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12774 12775 /* 12776 * In case of slot reset, as function reset is performed through 12777 * mailbox command which needs DMA to be enabled, this operation 12778 * has to be moved to the io resume phase. Taking device offline 12779 * will perform the necessary cleanup. 12780 */ 12781 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 12782 /* Perform device reset */ 12783 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12784 lpfc_offline(phba); 12785 lpfc_sli_brdrestart(phba); 12786 /* Bring the device back online */ 12787 lpfc_online(phba); 12788 } 12789 } 12790 12791 /** 12792 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 12793 * @pdev: pointer to PCI device 12794 * @pid: pointer to PCI device identifier 12795 * 12796 * This routine is to be registered to the kernel's PCI subsystem. When an 12797 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 12798 * at PCI device-specific information of the device and driver to see if the 12799 * driver state that it can support this kind of device. If the match is 12800 * successful, the driver core invokes this routine. This routine dispatches 12801 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 12802 * do all the initialization that it needs to do to handle the HBA device 12803 * properly. 12804 * 12805 * Return code 12806 * 0 - driver can claim the device 12807 * negative value - driver can not claim the device 12808 **/ 12809 static int 12810 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 12811 { 12812 int rc; 12813 struct lpfc_sli_intf intf; 12814 12815 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 12816 return -ENODEV; 12817 12818 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 12819 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 12820 rc = lpfc_pci_probe_one_s4(pdev, pid); 12821 else 12822 rc = lpfc_pci_probe_one_s3(pdev, pid); 12823 12824 return rc; 12825 } 12826 12827 /** 12828 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 12829 * @pdev: pointer to PCI device 12830 * 12831 * This routine is to be registered to the kernel's PCI subsystem. When an 12832 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 12833 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 12834 * remove routine, which will perform all the necessary cleanup for the 12835 * device to be removed from the PCI subsystem properly. 12836 **/ 12837 static void 12838 lpfc_pci_remove_one(struct pci_dev *pdev) 12839 { 12840 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12841 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12842 12843 switch (phba->pci_dev_grp) { 12844 case LPFC_PCI_DEV_LP: 12845 lpfc_pci_remove_one_s3(pdev); 12846 break; 12847 case LPFC_PCI_DEV_OC: 12848 lpfc_pci_remove_one_s4(pdev); 12849 break; 12850 default: 12851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12852 "1424 Invalid PCI device group: 0x%x\n", 12853 phba->pci_dev_grp); 12854 break; 12855 } 12856 return; 12857 } 12858 12859 /** 12860 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 12861 * @pdev: pointer to PCI device 12862 * @msg: power management message 12863 * 12864 * This routine is to be registered to the kernel's PCI subsystem to support 12865 * system Power Management (PM). When PM invokes this method, it dispatches 12866 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 12867 * suspend the device. 12868 * 12869 * Return code 12870 * 0 - driver suspended the device 12871 * Error otherwise 12872 **/ 12873 static int 12874 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 12875 { 12876 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12877 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12878 int rc = -ENODEV; 12879 12880 switch (phba->pci_dev_grp) { 12881 case LPFC_PCI_DEV_LP: 12882 rc = lpfc_pci_suspend_one_s3(pdev, msg); 12883 break; 12884 case LPFC_PCI_DEV_OC: 12885 rc = lpfc_pci_suspend_one_s4(pdev, msg); 12886 break; 12887 default: 12888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12889 "1425 Invalid PCI device group: 0x%x\n", 12890 phba->pci_dev_grp); 12891 break; 12892 } 12893 return rc; 12894 } 12895 12896 /** 12897 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 12898 * @pdev: pointer to PCI device 12899 * 12900 * This routine is to be registered to the kernel's PCI subsystem to support 12901 * system Power Management (PM). When PM invokes this method, it dispatches 12902 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 12903 * resume the device. 12904 * 12905 * Return code 12906 * 0 - driver suspended the device 12907 * Error otherwise 12908 **/ 12909 static int 12910 lpfc_pci_resume_one(struct pci_dev *pdev) 12911 { 12912 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12913 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12914 int rc = -ENODEV; 12915 12916 switch (phba->pci_dev_grp) { 12917 case LPFC_PCI_DEV_LP: 12918 rc = lpfc_pci_resume_one_s3(pdev); 12919 break; 12920 case LPFC_PCI_DEV_OC: 12921 rc = lpfc_pci_resume_one_s4(pdev); 12922 break; 12923 default: 12924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12925 "1426 Invalid PCI device group: 0x%x\n", 12926 phba->pci_dev_grp); 12927 break; 12928 } 12929 return rc; 12930 } 12931 12932 /** 12933 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 12934 * @pdev: pointer to PCI device. 12935 * @state: the current PCI connection state. 12936 * 12937 * This routine is registered to the PCI subsystem for error handling. This 12938 * function is called by the PCI subsystem after a PCI bus error affecting 12939 * this device has been detected. When this routine is invoked, it dispatches 12940 * the action to the proper SLI-3 or SLI-4 device error detected handling 12941 * routine, which will perform the proper error detected operation. 12942 * 12943 * Return codes 12944 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12945 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12946 **/ 12947 static pci_ers_result_t 12948 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12949 { 12950 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12951 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12952 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12953 12954 switch (phba->pci_dev_grp) { 12955 case LPFC_PCI_DEV_LP: 12956 rc = lpfc_io_error_detected_s3(pdev, state); 12957 break; 12958 case LPFC_PCI_DEV_OC: 12959 rc = lpfc_io_error_detected_s4(pdev, state); 12960 break; 12961 default: 12962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12963 "1427 Invalid PCI device group: 0x%x\n", 12964 phba->pci_dev_grp); 12965 break; 12966 } 12967 return rc; 12968 } 12969 12970 /** 12971 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 12972 * @pdev: pointer to PCI device. 12973 * 12974 * This routine is registered to the PCI subsystem for error handling. This 12975 * function is called after PCI bus has been reset to restart the PCI card 12976 * from scratch, as if from a cold-boot. When this routine is invoked, it 12977 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 12978 * routine, which will perform the proper device reset. 12979 * 12980 * Return codes 12981 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12982 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12983 **/ 12984 static pci_ers_result_t 12985 lpfc_io_slot_reset(struct pci_dev *pdev) 12986 { 12987 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12988 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12989 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 12990 12991 switch (phba->pci_dev_grp) { 12992 case LPFC_PCI_DEV_LP: 12993 rc = lpfc_io_slot_reset_s3(pdev); 12994 break; 12995 case LPFC_PCI_DEV_OC: 12996 rc = lpfc_io_slot_reset_s4(pdev); 12997 break; 12998 default: 12999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13000 "1428 Invalid PCI device group: 0x%x\n", 13001 phba->pci_dev_grp); 13002 break; 13003 } 13004 return rc; 13005 } 13006 13007 /** 13008 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13009 * @pdev: pointer to PCI device 13010 * 13011 * This routine is registered to the PCI subsystem for error handling. It 13012 * is called when kernel error recovery tells the lpfc driver that it is 13013 * OK to resume normal PCI operation after PCI bus error recovery. When 13014 * this routine is invoked, it dispatches the action to the proper SLI-3 13015 * or SLI-4 device io_resume routine, which will resume the device operation. 13016 **/ 13017 static void 13018 lpfc_io_resume(struct pci_dev *pdev) 13019 { 13020 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13021 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13022 13023 switch (phba->pci_dev_grp) { 13024 case LPFC_PCI_DEV_LP: 13025 lpfc_io_resume_s3(pdev); 13026 break; 13027 case LPFC_PCI_DEV_OC: 13028 lpfc_io_resume_s4(pdev); 13029 break; 13030 default: 13031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13032 "1429 Invalid PCI device group: 0x%x\n", 13033 phba->pci_dev_grp); 13034 break; 13035 } 13036 return; 13037 } 13038 13039 /** 13040 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13041 * @phba: pointer to lpfc hba data structure. 13042 * 13043 * This routine checks to see if OAS is supported for this adapter. If 13044 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13045 * the enable oas flag is cleared and the pool created for OAS device data 13046 * is destroyed. 13047 * 13048 **/ 13049 void 13050 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13051 { 13052 13053 if (!phba->cfg_EnableXLane) 13054 return; 13055 13056 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13057 phba->cfg_fof = 1; 13058 } else { 13059 phba->cfg_fof = 0; 13060 if (phba->device_data_mem_pool) 13061 mempool_destroy(phba->device_data_mem_pool); 13062 phba->device_data_mem_pool = NULL; 13063 } 13064 13065 return; 13066 } 13067 13068 /** 13069 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13070 * @phba: pointer to lpfc hba data structure. 13071 * 13072 * This routine checks to see if RAS is supported by the adapter. Check the 13073 * function through which RAS support enablement is to be done. 13074 **/ 13075 void 13076 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13077 { 13078 switch (phba->pcidev->device) { 13079 case PCI_DEVICE_ID_LANCER_G6_FC: 13080 case PCI_DEVICE_ID_LANCER_G7_FC: 13081 phba->ras_fwlog.ras_hwsupport = true; 13082 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13083 phba->cfg_ras_fwlog_buffsize) 13084 phba->ras_fwlog.ras_enabled = true; 13085 else 13086 phba->ras_fwlog.ras_enabled = false; 13087 break; 13088 default: 13089 phba->ras_fwlog.ras_hwsupport = false; 13090 } 13091 } 13092 13093 13094 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13095 13096 static const struct pci_error_handlers lpfc_err_handler = { 13097 .error_detected = lpfc_io_error_detected, 13098 .slot_reset = lpfc_io_slot_reset, 13099 .resume = lpfc_io_resume, 13100 }; 13101 13102 static struct pci_driver lpfc_driver = { 13103 .name = LPFC_DRIVER_NAME, 13104 .id_table = lpfc_id_table, 13105 .probe = lpfc_pci_probe_one, 13106 .remove = lpfc_pci_remove_one, 13107 .shutdown = lpfc_pci_remove_one, 13108 .suspend = lpfc_pci_suspend_one, 13109 .resume = lpfc_pci_resume_one, 13110 .err_handler = &lpfc_err_handler, 13111 }; 13112 13113 static const struct file_operations lpfc_mgmt_fop = { 13114 .owner = THIS_MODULE, 13115 }; 13116 13117 static struct miscdevice lpfc_mgmt_dev = { 13118 .minor = MISC_DYNAMIC_MINOR, 13119 .name = "lpfcmgmt", 13120 .fops = &lpfc_mgmt_fop, 13121 }; 13122 13123 /** 13124 * lpfc_init - lpfc module initialization routine 13125 * 13126 * This routine is to be invoked when the lpfc module is loaded into the 13127 * kernel. The special kernel macro module_init() is used to indicate the 13128 * role of this routine to the kernel as lpfc module entry point. 13129 * 13130 * Return codes 13131 * 0 - successful 13132 * -ENOMEM - FC attach transport failed 13133 * all others - failed 13134 */ 13135 static int __init 13136 lpfc_init(void) 13137 { 13138 int error = 0; 13139 13140 printk(LPFC_MODULE_DESC "\n"); 13141 printk(LPFC_COPYRIGHT "\n"); 13142 13143 error = misc_register(&lpfc_mgmt_dev); 13144 if (error) 13145 printk(KERN_ERR "Could not register lpfcmgmt device, " 13146 "misc_register returned with status %d", error); 13147 13148 lpfc_transport_functions.vport_create = lpfc_vport_create; 13149 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 13150 lpfc_transport_template = 13151 fc_attach_transport(&lpfc_transport_functions); 13152 if (lpfc_transport_template == NULL) 13153 return -ENOMEM; 13154 lpfc_vport_transport_template = 13155 fc_attach_transport(&lpfc_vport_transport_functions); 13156 if (lpfc_vport_transport_template == NULL) { 13157 fc_release_transport(lpfc_transport_template); 13158 return -ENOMEM; 13159 } 13160 lpfc_nvme_cmd_template(); 13161 lpfc_nvmet_cmd_template(); 13162 13163 /* Initialize in case vector mapping is needed */ 13164 lpfc_present_cpu = num_present_cpus(); 13165 13166 error = pci_register_driver(&lpfc_driver); 13167 if (error) { 13168 fc_release_transport(lpfc_transport_template); 13169 fc_release_transport(lpfc_vport_transport_template); 13170 } 13171 13172 return error; 13173 } 13174 13175 /** 13176 * lpfc_exit - lpfc module removal routine 13177 * 13178 * This routine is invoked when the lpfc module is removed from the kernel. 13179 * The special kernel macro module_exit() is used to indicate the role of 13180 * this routine to the kernel as lpfc module exit point. 13181 */ 13182 static void __exit 13183 lpfc_exit(void) 13184 { 13185 misc_deregister(&lpfc_mgmt_dev); 13186 pci_unregister_driver(&lpfc_driver); 13187 fc_release_transport(lpfc_transport_template); 13188 fc_release_transport(lpfc_vport_transport_template); 13189 if (_dump_buf_data) { 13190 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 13191 "_dump_buf_data at 0x%p\n", 13192 (1L << _dump_buf_data_order), _dump_buf_data); 13193 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 13194 } 13195 13196 if (_dump_buf_dif) { 13197 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 13198 "_dump_buf_dif at 0x%p\n", 13199 (1L << _dump_buf_dif_order), _dump_buf_dif); 13200 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 13201 } 13202 idr_destroy(&lpfc_hba_index); 13203 } 13204 13205 module_init(lpfc_init); 13206 module_exit(lpfc_exit); 13207 MODULE_LICENSE("GPL"); 13208 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 13209 MODULE_AUTHOR("Broadcom"); 13210 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 13211