1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/aer.h> 27 #include <linux/phylink.h> 28 29 #include "qed.h" 30 #include "qed_sriov.h" 31 #include "qed_sp.h" 32 #include "qed_dev_api.h" 33 #include "qed_ll2.h" 34 #include "qed_fcoe.h" 35 #include "qed_iscsi.h" 36 37 #include "qed_mcp.h" 38 #include "qed_reg_addr.h" 39 #include "qed_hw.h" 40 #include "qed_selftest.h" 41 #include "qed_debug.h" 42 #include "qed_devlink.h" 43 44 #define QED_ROCE_QPS (8192) 45 #define QED_ROCE_DPIS (8) 46 #define QED_RDMA_SRQS QED_ROCE_QPS 47 #define QED_NVM_CFG_GET_FLAGS 0xA 48 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 49 #define QED_NVM_CFG_MAX_ATTRS 50 50 51 static char version[] = 52 "QLogic FastLinQ 4xxxx Core Module qed\n"; 53 54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 55 MODULE_LICENSE("GPL"); 56 57 #define FW_FILE_VERSION \ 58 __stringify(FW_MAJOR_VERSION) "." \ 59 __stringify(FW_MINOR_VERSION) "." \ 60 __stringify(FW_REVISION_VERSION) "." \ 61 __stringify(FW_ENGINEERING_VERSION) 62 63 #define QED_FW_FILE_NAME \ 64 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 65 66 MODULE_FIRMWARE(QED_FW_FILE_NAME); 67 68 /* MFW speed capabilities maps */ 69 70 struct qed_mfw_speed_map { 71 u32 mfw_val; 72 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 73 74 const u32 *cap_arr; 75 u32 arr_size; 76 }; 77 78 #define QED_MFW_SPEED_MAP(type, arr) \ 79 { \ 80 .mfw_val = (type), \ 81 .cap_arr = (arr), \ 82 .arr_size = ARRAY_SIZE(arr), \ 83 } 84 85 static const u32 qed_mfw_ext_1g[] __initconst = { 86 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 87 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 88 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 89 }; 90 91 static const u32 qed_mfw_ext_10g[] __initconst = { 92 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 93 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 95 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 96 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 97 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 99 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 100 }; 101 102 static const u32 qed_mfw_ext_25g[] __initconst = { 103 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 104 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 105 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 106 }; 107 108 static const u32 qed_mfw_ext_40g[] __initconst = { 109 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 110 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 111 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 112 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 113 }; 114 115 static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 116 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 117 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 118 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 119 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 120 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 121 }; 122 123 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 124 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 126 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 127 }; 128 129 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 130 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 131 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 132 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 133 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 134 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 135 }; 136 137 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 138 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 140 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 141 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 142 }; 143 144 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 145 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 146 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 147 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 148 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 149 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 150 qed_mfw_ext_50g_base_r), 151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 152 qed_mfw_ext_50g_base_r2), 153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 154 qed_mfw_ext_100g_base_r2), 155 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 156 qed_mfw_ext_100g_base_r4), 157 }; 158 159 static const u32 qed_mfw_legacy_1g[] __initconst = { 160 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 161 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 162 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 163 }; 164 165 static const u32 qed_mfw_legacy_10g[] __initconst = { 166 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 167 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 168 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 169 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 170 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 171 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 172 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 173 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 174 }; 175 176 static const u32 qed_mfw_legacy_20g[] __initconst = { 177 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 178 }; 179 180 static const u32 qed_mfw_legacy_25g[] __initconst = { 181 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 182 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 183 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 184 }; 185 186 static const u32 qed_mfw_legacy_40g[] __initconst = { 187 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 188 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 189 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 190 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 191 }; 192 193 static const u32 qed_mfw_legacy_50g[] __initconst = { 194 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 195 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 196 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 197 }; 198 199 static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 200 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 201 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 202 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 203 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 204 }; 205 206 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 207 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 208 qed_mfw_legacy_1g), 209 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 210 qed_mfw_legacy_10g), 211 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 212 qed_mfw_legacy_20g), 213 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 214 qed_mfw_legacy_25g), 215 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 216 qed_mfw_legacy_40g), 217 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 218 qed_mfw_legacy_50g), 219 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 220 qed_mfw_legacy_bb_100g), 221 }; 222 223 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 224 { 225 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 226 227 map->cap_arr = NULL; 228 map->arr_size = 0; 229 } 230 231 static void __init qed_mfw_speed_maps_init(void) 232 { 233 u32 i; 234 235 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 236 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 237 238 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 239 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 240 } 241 242 static int __init qed_init(void) 243 { 244 pr_info("%s", version); 245 246 qed_mfw_speed_maps_init(); 247 248 return 0; 249 } 250 module_init(qed_init); 251 252 static void __exit qed_exit(void) 253 { 254 /* To prevent marking this module as "permanent" */ 255 } 256 module_exit(qed_exit); 257 258 static void qed_free_pci(struct qed_dev *cdev) 259 { 260 struct pci_dev *pdev = cdev->pdev; 261 262 pci_disable_pcie_error_reporting(pdev); 263 264 if (cdev->doorbells && cdev->db_size) 265 iounmap(cdev->doorbells); 266 if (cdev->regview) 267 iounmap(cdev->regview); 268 if (atomic_read(&pdev->enable_cnt) == 1) 269 pci_release_regions(pdev); 270 271 pci_disable_device(pdev); 272 } 273 274 #define PCI_REVISION_ID_ERROR_VAL 0xff 275 276 /* Performs PCI initializations as well as initializing PCI-related parameters 277 * in the device structrue. Returns 0 in case of success. 278 */ 279 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 280 { 281 u8 rev_id; 282 int rc; 283 284 cdev->pdev = pdev; 285 286 rc = pci_enable_device(pdev); 287 if (rc) { 288 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 289 goto err0; 290 } 291 292 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 293 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 294 rc = -EIO; 295 goto err1; 296 } 297 298 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 299 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 300 rc = -EIO; 301 goto err1; 302 } 303 304 if (atomic_read(&pdev->enable_cnt) == 1) { 305 rc = pci_request_regions(pdev, "qed"); 306 if (rc) { 307 DP_NOTICE(cdev, 308 "Failed to request PCI memory resources\n"); 309 goto err1; 310 } 311 pci_set_master(pdev); 312 pci_save_state(pdev); 313 } 314 315 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 316 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 317 DP_NOTICE(cdev, 318 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 319 rev_id); 320 rc = -ENODEV; 321 goto err2; 322 } 323 if (!pci_is_pcie(pdev)) { 324 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 325 rc = -EIO; 326 goto err2; 327 } 328 329 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 330 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 331 DP_NOTICE(cdev, "Cannot find power management capability\n"); 332 333 rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); 334 if (rc) { 335 DP_NOTICE(cdev, "Can't request DMA addresses\n"); 336 rc = -EIO; 337 goto err2; 338 } 339 340 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 341 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 342 cdev->pci_params.irq = pdev->irq; 343 344 cdev->regview = pci_ioremap_bar(pdev, 0); 345 if (!cdev->regview) { 346 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 347 rc = -ENOMEM; 348 goto err2; 349 } 350 351 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 352 cdev->db_size = pci_resource_len(cdev->pdev, 2); 353 if (!cdev->db_size) { 354 if (IS_PF(cdev)) { 355 DP_NOTICE(cdev, "No Doorbell bar available\n"); 356 return -EINVAL; 357 } else { 358 return 0; 359 } 360 } 361 362 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 363 364 if (!cdev->doorbells) { 365 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 366 return -ENOMEM; 367 } 368 369 /* AER (Advanced Error reporting) configuration */ 370 rc = pci_enable_pcie_error_reporting(pdev); 371 if (rc) 372 DP_VERBOSE(cdev, NETIF_MSG_DRV, 373 "Failed to configure PCIe AER [%d]\n", rc); 374 375 return 0; 376 377 err2: 378 pci_release_regions(pdev); 379 err1: 380 pci_disable_device(pdev); 381 err0: 382 return rc; 383 } 384 385 int qed_fill_dev_info(struct qed_dev *cdev, 386 struct qed_dev_info *dev_info) 387 { 388 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 389 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 390 struct qed_tunnel_info *tun = &cdev->tunnel; 391 struct qed_ptt *ptt; 392 393 memset(dev_info, 0, sizeof(struct qed_dev_info)); 394 395 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 396 tun->vxlan.b_mode_enabled) 397 dev_info->vxlan_enable = true; 398 399 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 400 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 401 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 402 dev_info->gre_enable = true; 403 404 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 405 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 406 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 407 dev_info->geneve_enable = true; 408 409 dev_info->num_hwfns = cdev->num_hwfns; 410 dev_info->pci_mem_start = cdev->pci_params.mem_start; 411 dev_info->pci_mem_end = cdev->pci_params.mem_end; 412 dev_info->pci_irq = cdev->pci_params.irq; 413 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 414 dev_info->dev_type = cdev->type; 415 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 416 417 if (IS_PF(cdev)) { 418 dev_info->fw_major = FW_MAJOR_VERSION; 419 dev_info->fw_minor = FW_MINOR_VERSION; 420 dev_info->fw_rev = FW_REVISION_VERSION; 421 dev_info->fw_eng = FW_ENGINEERING_VERSION; 422 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 423 &cdev->mf_bits); 424 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) 425 dev_info->b_arfs_capable = true; 426 dev_info->tx_switching = true; 427 428 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 429 dev_info->wol_support = true; 430 431 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 432 dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); 433 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 434 } else { 435 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 436 &dev_info->fw_minor, &dev_info->fw_rev, 437 &dev_info->fw_eng); 438 } 439 440 if (IS_PF(cdev)) { 441 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 442 if (ptt) { 443 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 444 &dev_info->mfw_rev, NULL); 445 446 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 447 &dev_info->mbi_version); 448 449 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 450 &dev_info->flash_size); 451 452 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 453 } 454 } else { 455 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 456 &dev_info->mfw_rev, NULL); 457 } 458 459 dev_info->mtu = hw_info->mtu; 460 cdev->common_dev_info = *dev_info; 461 462 return 0; 463 } 464 465 static void qed_free_cdev(struct qed_dev *cdev) 466 { 467 kfree((void *)cdev); 468 } 469 470 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 471 { 472 struct qed_dev *cdev; 473 474 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 475 if (!cdev) 476 return cdev; 477 478 qed_init_struct(cdev); 479 480 return cdev; 481 } 482 483 /* Sets the requested power state */ 484 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 485 { 486 if (!cdev) 487 return -ENODEV; 488 489 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 490 return 0; 491 } 492 493 /* probing */ 494 static struct qed_dev *qed_probe(struct pci_dev *pdev, 495 struct qed_probe_params *params) 496 { 497 struct qed_dev *cdev; 498 int rc; 499 500 cdev = qed_alloc_cdev(pdev); 501 if (!cdev) 502 goto err0; 503 504 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 505 cdev->protocol = params->protocol; 506 507 if (params->is_vf) 508 cdev->b_is_vf = true; 509 510 qed_init_dp(cdev, params->dp_module, params->dp_level); 511 512 cdev->recov_in_prog = params->recov_in_prog; 513 514 rc = qed_init_pci(cdev, pdev); 515 if (rc) { 516 DP_ERR(cdev, "init pci failed\n"); 517 goto err1; 518 } 519 DP_INFO(cdev, "PCI init completed successfully\n"); 520 521 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 522 if (rc) { 523 DP_ERR(cdev, "hw prepare failed\n"); 524 goto err2; 525 } 526 527 DP_INFO(cdev, "%s completed successfully\n", __func__); 528 529 return cdev; 530 531 err2: 532 qed_free_pci(cdev); 533 err1: 534 qed_free_cdev(cdev); 535 err0: 536 return NULL; 537 } 538 539 static void qed_remove(struct qed_dev *cdev) 540 { 541 if (!cdev) 542 return; 543 544 qed_hw_remove(cdev); 545 546 qed_free_pci(cdev); 547 548 qed_set_power_state(cdev, PCI_D3hot); 549 550 qed_free_cdev(cdev); 551 } 552 553 static void qed_disable_msix(struct qed_dev *cdev) 554 { 555 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 556 pci_disable_msix(cdev->pdev); 557 kfree(cdev->int_params.msix_table); 558 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 559 pci_disable_msi(cdev->pdev); 560 } 561 562 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 563 } 564 565 static int qed_enable_msix(struct qed_dev *cdev, 566 struct qed_int_params *int_params) 567 { 568 int i, rc, cnt; 569 570 cnt = int_params->in.num_vectors; 571 572 for (i = 0; i < cnt; i++) 573 int_params->msix_table[i].entry = i; 574 575 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 576 int_params->in.min_msix_cnt, cnt); 577 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 578 (rc % cdev->num_hwfns)) { 579 pci_disable_msix(cdev->pdev); 580 581 /* If fastpath is initialized, we need at least one interrupt 582 * per hwfn [and the slow path interrupts]. New requested number 583 * should be a multiple of the number of hwfns. 584 */ 585 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 586 DP_NOTICE(cdev, 587 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 588 cnt, int_params->in.num_vectors); 589 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 590 cnt); 591 if (!rc) 592 rc = cnt; 593 } 594 595 /* For VFs, we should return with an error in case we didn't get the 596 * exact number of msix vectors as we requested. 597 * Not doing that will lead to a crash when starting queues for 598 * this VF. 599 */ 600 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { 601 /* MSI-x configuration was achieved */ 602 int_params->out.int_mode = QED_INT_MODE_MSIX; 603 int_params->out.num_vectors = rc; 604 rc = 0; 605 } else { 606 DP_NOTICE(cdev, 607 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 608 cnt, rc); 609 } 610 611 return rc; 612 } 613 614 /* This function outputs the int mode and the number of enabled msix vector */ 615 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 616 { 617 struct qed_int_params *int_params = &cdev->int_params; 618 struct msix_entry *tbl; 619 int rc = 0, cnt; 620 621 switch (int_params->in.int_mode) { 622 case QED_INT_MODE_MSIX: 623 /* Allocate MSIX table */ 624 cnt = int_params->in.num_vectors; 625 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 626 if (!int_params->msix_table) { 627 rc = -ENOMEM; 628 goto out; 629 } 630 631 /* Enable MSIX */ 632 rc = qed_enable_msix(cdev, int_params); 633 if (!rc) 634 goto out; 635 636 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 637 kfree(int_params->msix_table); 638 if (force_mode) 639 goto out; 640 fallthrough; 641 642 case QED_INT_MODE_MSI: 643 if (cdev->num_hwfns == 1) { 644 rc = pci_enable_msi(cdev->pdev); 645 if (!rc) { 646 int_params->out.int_mode = QED_INT_MODE_MSI; 647 goto out; 648 } 649 650 DP_NOTICE(cdev, "Failed to enable MSI\n"); 651 if (force_mode) 652 goto out; 653 } 654 fallthrough; 655 656 case QED_INT_MODE_INTA: 657 int_params->out.int_mode = QED_INT_MODE_INTA; 658 rc = 0; 659 goto out; 660 default: 661 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 662 int_params->in.int_mode); 663 rc = -EINVAL; 664 } 665 666 out: 667 if (!rc) 668 DP_INFO(cdev, "Using %s interrupts\n", 669 int_params->out.int_mode == QED_INT_MODE_INTA ? 670 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 671 "MSI" : "MSIX"); 672 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 673 674 return rc; 675 } 676 677 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 678 int index, void(*handler)(void *)) 679 { 680 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 681 int relative_idx = index / cdev->num_hwfns; 682 683 hwfn->simd_proto_handler[relative_idx].func = handler; 684 hwfn->simd_proto_handler[relative_idx].token = token; 685 } 686 687 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 688 { 689 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 690 int relative_idx = index / cdev->num_hwfns; 691 692 memset(&hwfn->simd_proto_handler[relative_idx], 0, 693 sizeof(struct qed_simd_fp_handler)); 694 } 695 696 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 697 { 698 tasklet_schedule((struct tasklet_struct *)tasklet); 699 return IRQ_HANDLED; 700 } 701 702 static irqreturn_t qed_single_int(int irq, void *dev_instance) 703 { 704 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 705 struct qed_hwfn *hwfn; 706 irqreturn_t rc = IRQ_NONE; 707 u64 status; 708 int i, j; 709 710 for (i = 0; i < cdev->num_hwfns; i++) { 711 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 712 713 if (!status) 714 continue; 715 716 hwfn = &cdev->hwfns[i]; 717 718 /* Slowpath interrupt */ 719 if (unlikely(status & 0x1)) { 720 tasklet_schedule(&hwfn->sp_dpc); 721 status &= ~0x1; 722 rc = IRQ_HANDLED; 723 } 724 725 /* Fastpath interrupts */ 726 for (j = 0; j < 64; j++) { 727 if ((0x2ULL << j) & status) { 728 struct qed_simd_fp_handler *p_handler = 729 &hwfn->simd_proto_handler[j]; 730 731 if (p_handler->func) 732 p_handler->func(p_handler->token); 733 else 734 DP_NOTICE(hwfn, 735 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 736 j, status); 737 738 status &= ~(0x2ULL << j); 739 rc = IRQ_HANDLED; 740 } 741 } 742 743 if (unlikely(status)) 744 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 745 "got an unknown interrupt status 0x%llx\n", 746 status); 747 } 748 749 return rc; 750 } 751 752 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 753 { 754 struct qed_dev *cdev = hwfn->cdev; 755 u32 int_mode; 756 int rc = 0; 757 u8 id; 758 759 int_mode = cdev->int_params.out.int_mode; 760 if (int_mode == QED_INT_MODE_MSIX) { 761 id = hwfn->my_id; 762 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 763 id, cdev->pdev->bus->number, 764 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 765 rc = request_irq(cdev->int_params.msix_table[id].vector, 766 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); 767 } else { 768 unsigned long flags = 0; 769 770 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 771 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 772 PCI_FUNC(cdev->pdev->devfn)); 773 774 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 775 flags |= IRQF_SHARED; 776 777 rc = request_irq(cdev->pdev->irq, qed_single_int, 778 flags, cdev->name, cdev); 779 } 780 781 if (rc) 782 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 783 else 784 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 785 "Requested slowpath %s\n", 786 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 787 788 return rc; 789 } 790 791 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 792 { 793 /* Calling the disable function will make sure that any 794 * currently-running function is completed. The following call to the 795 * enable function makes this sequence a flush-like operation. 796 */ 797 if (p_hwfn->b_sp_dpc_enabled) { 798 tasklet_disable(&p_hwfn->sp_dpc); 799 tasklet_enable(&p_hwfn->sp_dpc); 800 } 801 } 802 803 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 804 { 805 struct qed_dev *cdev = p_hwfn->cdev; 806 u8 id = p_hwfn->my_id; 807 u32 int_mode; 808 809 int_mode = cdev->int_params.out.int_mode; 810 if (int_mode == QED_INT_MODE_MSIX) 811 synchronize_irq(cdev->int_params.msix_table[id].vector); 812 else 813 synchronize_irq(cdev->pdev->irq); 814 815 qed_slowpath_tasklet_flush(p_hwfn); 816 } 817 818 static void qed_slowpath_irq_free(struct qed_dev *cdev) 819 { 820 int i; 821 822 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 823 for_each_hwfn(cdev, i) { 824 if (!cdev->hwfns[i].b_int_requested) 825 break; 826 synchronize_irq(cdev->int_params.msix_table[i].vector); 827 free_irq(cdev->int_params.msix_table[i].vector, 828 &cdev->hwfns[i].sp_dpc); 829 } 830 } else { 831 if (QED_LEADING_HWFN(cdev)->b_int_requested) 832 free_irq(cdev->pdev->irq, cdev); 833 } 834 qed_int_disable_post_isr_release(cdev); 835 } 836 837 static int qed_nic_stop(struct qed_dev *cdev) 838 { 839 int i, rc; 840 841 rc = qed_hw_stop(cdev); 842 843 for (i = 0; i < cdev->num_hwfns; i++) { 844 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 845 846 if (p_hwfn->b_sp_dpc_enabled) { 847 tasklet_disable(&p_hwfn->sp_dpc); 848 p_hwfn->b_sp_dpc_enabled = false; 849 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 850 "Disabled sp tasklet [hwfn %d] at %p\n", 851 i, &p_hwfn->sp_dpc); 852 } 853 } 854 855 qed_dbg_pf_exit(cdev); 856 857 return rc; 858 } 859 860 static int qed_nic_setup(struct qed_dev *cdev) 861 { 862 int rc, i; 863 864 /* Determine if interface is going to require LL2 */ 865 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 866 for (i = 0; i < cdev->num_hwfns; i++) { 867 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 868 869 p_hwfn->using_ll2 = true; 870 } 871 } 872 873 rc = qed_resc_alloc(cdev); 874 if (rc) 875 return rc; 876 877 DP_INFO(cdev, "Allocated qed resources\n"); 878 879 qed_resc_setup(cdev); 880 881 return rc; 882 } 883 884 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 885 { 886 int limit = 0; 887 888 /* Mark the fastpath as free/used */ 889 cdev->int_params.fp_initialized = cnt ? true : false; 890 891 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 892 limit = cdev->num_hwfns * 63; 893 else if (cdev->int_params.fp_msix_cnt) 894 limit = cdev->int_params.fp_msix_cnt; 895 896 if (!limit) 897 return -ENOMEM; 898 899 return min_t(int, cnt, limit); 900 } 901 902 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 903 { 904 memset(info, 0, sizeof(struct qed_int_info)); 905 906 if (!cdev->int_params.fp_initialized) { 907 DP_INFO(cdev, 908 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 909 return -EINVAL; 910 } 911 912 /* Need to expose only MSI-X information; Single IRQ is handled solely 913 * by qed. 914 */ 915 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 916 int msix_base = cdev->int_params.fp_msix_base; 917 918 info->msix_cnt = cdev->int_params.fp_msix_cnt; 919 info->msix = &cdev->int_params.msix_table[msix_base]; 920 } 921 922 return 0; 923 } 924 925 static int qed_slowpath_setup_int(struct qed_dev *cdev, 926 enum qed_int_mode int_mode) 927 { 928 struct qed_sb_cnt_info sb_cnt_info; 929 int num_l2_queues = 0; 930 int rc; 931 int i; 932 933 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 934 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 935 return -EINVAL; 936 } 937 938 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 939 cdev->int_params.in.int_mode = int_mode; 940 for_each_hwfn(cdev, i) { 941 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 942 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 943 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 944 cdev->int_params.in.num_vectors++; /* slowpath */ 945 } 946 947 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 948 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 949 950 if (is_kdump_kernel()) { 951 DP_INFO(cdev, 952 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 953 cdev->int_params.in.min_msix_cnt); 954 cdev->int_params.in.num_vectors = 955 cdev->int_params.in.min_msix_cnt; 956 } 957 958 rc = qed_set_int_mode(cdev, false); 959 if (rc) { 960 DP_ERR(cdev, "%s ERR\n", __func__); 961 return rc; 962 } 963 964 cdev->int_params.fp_msix_base = cdev->num_hwfns; 965 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 966 cdev->num_hwfns; 967 968 if (!IS_ENABLED(CONFIG_QED_RDMA) || 969 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 970 return 0; 971 972 for_each_hwfn(cdev, i) 973 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 974 975 DP_VERBOSE(cdev, QED_MSG_RDMA, 976 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 977 cdev->int_params.fp_msix_cnt, num_l2_queues); 978 979 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 980 cdev->int_params.rdma_msix_cnt = 981 (cdev->int_params.fp_msix_cnt - num_l2_queues) 982 / cdev->num_hwfns; 983 cdev->int_params.rdma_msix_base = 984 cdev->int_params.fp_msix_base + num_l2_queues; 985 cdev->int_params.fp_msix_cnt = num_l2_queues; 986 } else { 987 cdev->int_params.rdma_msix_cnt = 0; 988 } 989 990 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 991 cdev->int_params.rdma_msix_cnt, 992 cdev->int_params.rdma_msix_base); 993 994 return 0; 995 } 996 997 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 998 { 999 int rc; 1000 1001 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 1002 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 1003 1004 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 1005 &cdev->int_params.in.num_vectors); 1006 if (cdev->num_hwfns > 1) { 1007 u8 vectors = 0; 1008 1009 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 1010 cdev->int_params.in.num_vectors += vectors; 1011 } 1012 1013 /* We want a minimum of one fastpath vector per vf hwfn */ 1014 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1015 1016 rc = qed_set_int_mode(cdev, true); 1017 if (rc) 1018 return rc; 1019 1020 cdev->int_params.fp_msix_base = 0; 1021 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1022 1023 return 0; 1024 } 1025 1026 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1027 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1028 { 1029 int rc; 1030 1031 p_hwfn->stream->next_in = input_buf; 1032 p_hwfn->stream->avail_in = input_len; 1033 p_hwfn->stream->next_out = unzip_buf; 1034 p_hwfn->stream->avail_out = max_size; 1035 1036 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1037 1038 if (rc != Z_OK) { 1039 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1040 rc); 1041 return 0; 1042 } 1043 1044 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1045 zlib_inflateEnd(p_hwfn->stream); 1046 1047 if (rc != Z_OK && rc != Z_STREAM_END) { 1048 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1049 p_hwfn->stream->msg, rc); 1050 return 0; 1051 } 1052 1053 return p_hwfn->stream->total_out / 4; 1054 } 1055 1056 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1057 { 1058 int i; 1059 void *workspace; 1060 1061 for_each_hwfn(cdev, i) { 1062 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1063 1064 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1065 if (!p_hwfn->stream) 1066 return -ENOMEM; 1067 1068 workspace = vzalloc(zlib_inflate_workspacesize()); 1069 if (!workspace) 1070 return -ENOMEM; 1071 p_hwfn->stream->workspace = workspace; 1072 } 1073 1074 return 0; 1075 } 1076 1077 static void qed_free_stream_mem(struct qed_dev *cdev) 1078 { 1079 int i; 1080 1081 for_each_hwfn(cdev, i) { 1082 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1083 1084 if (!p_hwfn->stream) 1085 return; 1086 1087 vfree(p_hwfn->stream->workspace); 1088 kfree(p_hwfn->stream); 1089 } 1090 } 1091 1092 static void qed_update_pf_params(struct qed_dev *cdev, 1093 struct qed_pf_params *params) 1094 { 1095 int i; 1096 1097 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1098 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1099 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1100 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1101 /* divide by 3 the MRs to avoid MF ILT overflow */ 1102 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1103 } 1104 1105 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1106 params->eth_pf_params.num_arfs_filters = 0; 1107 1108 /* In case we might support RDMA, don't allow qede to be greedy 1109 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1110 * per hwfn. 1111 */ 1112 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1113 u16 *num_cons; 1114 1115 num_cons = ¶ms->eth_pf_params.num_cons; 1116 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1117 } 1118 1119 for (i = 0; i < cdev->num_hwfns; i++) { 1120 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1121 1122 p_hwfn->pf_params = *params; 1123 } 1124 } 1125 1126 #define QED_PERIODIC_DB_REC_COUNT 10 1127 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1128 #define QED_PERIODIC_DB_REC_INTERVAL \ 1129 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1130 1131 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1132 enum qed_slowpath_wq_flag wq_flag, 1133 unsigned long delay) 1134 { 1135 if (!hwfn->slowpath_wq_active) 1136 return -EINVAL; 1137 1138 /* Memory barrier for setting atomic bit */ 1139 smp_mb__before_atomic(); 1140 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1141 /* Memory barrier after setting atomic bit */ 1142 smp_mb__after_atomic(); 1143 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1144 1145 return 0; 1146 } 1147 1148 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1149 { 1150 /* Reset periodic Doorbell Recovery counter */ 1151 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1152 1153 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1154 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1155 &p_hwfn->slowpath_task_flags)) 1156 return; 1157 1158 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1159 QED_PERIODIC_DB_REC_INTERVAL); 1160 } 1161 1162 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1163 { 1164 int i; 1165 1166 if (IS_VF(cdev)) 1167 return; 1168 1169 for_each_hwfn(cdev, i) { 1170 if (!cdev->hwfns[i].slowpath_wq) 1171 continue; 1172 1173 /* Stop queuing new delayed works */ 1174 cdev->hwfns[i].slowpath_wq_active = false; 1175 1176 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1177 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1178 } 1179 } 1180 1181 static void qed_slowpath_task(struct work_struct *work) 1182 { 1183 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1184 slowpath_task.work); 1185 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1186 1187 if (!ptt) { 1188 if (hwfn->slowpath_wq_active) 1189 queue_delayed_work(hwfn->slowpath_wq, 1190 &hwfn->slowpath_task, 0); 1191 1192 return; 1193 } 1194 1195 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1196 &hwfn->slowpath_task_flags)) 1197 qed_mfw_process_tlv_req(hwfn, ptt); 1198 1199 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1200 &hwfn->slowpath_task_flags)) { 1201 /* skip qed_db_rec_handler during recovery/unload */ 1202 if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) 1203 goto out; 1204 1205 qed_db_rec_handler(hwfn, ptt); 1206 if (hwfn->periodic_db_rec_count--) 1207 qed_slowpath_delayed_work(hwfn, 1208 QED_SLOWPATH_PERIODIC_DB_REC, 1209 QED_PERIODIC_DB_REC_INTERVAL); 1210 } 1211 1212 out: 1213 qed_ptt_release(hwfn, ptt); 1214 } 1215 1216 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1217 { 1218 struct qed_hwfn *hwfn; 1219 char name[NAME_SIZE]; 1220 int i; 1221 1222 if (IS_VF(cdev)) 1223 return 0; 1224 1225 for_each_hwfn(cdev, i) { 1226 hwfn = &cdev->hwfns[i]; 1227 1228 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1229 cdev->pdev->bus->number, 1230 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1231 1232 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1233 if (!hwfn->slowpath_wq) { 1234 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1235 return -ENOMEM; 1236 } 1237 1238 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1239 hwfn->slowpath_wq_active = true; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static int qed_slowpath_start(struct qed_dev *cdev, 1246 struct qed_slowpath_params *params) 1247 { 1248 struct qed_drv_load_params drv_load_params; 1249 struct qed_hw_init_params hw_init_params; 1250 struct qed_mcp_drv_version drv_version; 1251 struct qed_tunnel_info tunn_info; 1252 const u8 *data = NULL; 1253 struct qed_hwfn *hwfn; 1254 struct qed_ptt *p_ptt; 1255 int rc = -EINVAL; 1256 1257 if (qed_iov_wq_start(cdev)) 1258 goto err; 1259 1260 if (qed_slowpath_wq_start(cdev)) 1261 goto err; 1262 1263 if (IS_PF(cdev)) { 1264 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1265 &cdev->pdev->dev); 1266 if (rc) { 1267 DP_NOTICE(cdev, 1268 "Failed to find fw file - /lib/firmware/%s\n", 1269 QED_FW_FILE_NAME); 1270 goto err; 1271 } 1272 1273 if (cdev->num_hwfns == 1) { 1274 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1275 if (p_ptt) { 1276 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1277 } else { 1278 DP_NOTICE(cdev, 1279 "Failed to acquire PTT for aRFS\n"); 1280 rc = -EINVAL; 1281 goto err; 1282 } 1283 } 1284 } 1285 1286 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1287 rc = qed_nic_setup(cdev); 1288 if (rc) 1289 goto err; 1290 1291 if (IS_PF(cdev)) 1292 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1293 else 1294 rc = qed_slowpath_vf_setup_int(cdev); 1295 if (rc) 1296 goto err1; 1297 1298 if (IS_PF(cdev)) { 1299 /* Allocate stream for unzipping */ 1300 rc = qed_alloc_stream_mem(cdev); 1301 if (rc) 1302 goto err2; 1303 1304 /* First Dword used to differentiate between various sources */ 1305 data = cdev->firmware->data + sizeof(u32); 1306 1307 qed_dbg_pf_init(cdev); 1308 } 1309 1310 /* Start the slowpath */ 1311 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1312 memset(&tunn_info, 0, sizeof(tunn_info)); 1313 tunn_info.vxlan.b_mode_enabled = true; 1314 tunn_info.l2_gre.b_mode_enabled = true; 1315 tunn_info.ip_gre.b_mode_enabled = true; 1316 tunn_info.l2_geneve.b_mode_enabled = true; 1317 tunn_info.ip_geneve.b_mode_enabled = true; 1318 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1319 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1320 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1321 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1322 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1323 hw_init_params.p_tunn = &tunn_info; 1324 hw_init_params.b_hw_start = true; 1325 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1326 hw_init_params.allow_npar_tx_switch = true; 1327 hw_init_params.bin_fw_data = data; 1328 1329 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1330 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1331 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1332 drv_load_params.avoid_eng_reset = false; 1333 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1334 hw_init_params.p_drv_load_params = &drv_load_params; 1335 1336 rc = qed_hw_init(cdev, &hw_init_params); 1337 if (rc) 1338 goto err2; 1339 1340 DP_INFO(cdev, 1341 "HW initialization and function start completed successfully\n"); 1342 1343 if (IS_PF(cdev)) { 1344 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1345 BIT(QED_MODE_L2GENEVE_TUNN) | 1346 BIT(QED_MODE_IPGENEVE_TUNN) | 1347 BIT(QED_MODE_L2GRE_TUNN) | 1348 BIT(QED_MODE_IPGRE_TUNN)); 1349 } 1350 1351 /* Allocate LL2 interface if needed */ 1352 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1353 rc = qed_ll2_alloc_if(cdev); 1354 if (rc) 1355 goto err3; 1356 } 1357 if (IS_PF(cdev)) { 1358 hwfn = QED_LEADING_HWFN(cdev); 1359 drv_version.version = (params->drv_major << 24) | 1360 (params->drv_minor << 16) | 1361 (params->drv_rev << 8) | 1362 (params->drv_eng); 1363 strscpy(drv_version.name, params->name, 1364 MCP_DRV_VER_STR_SIZE - 4); 1365 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1366 &drv_version); 1367 if (rc) { 1368 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1369 goto err4; 1370 } 1371 } 1372 1373 qed_reset_vport_stats(cdev); 1374 1375 return 0; 1376 1377 err4: 1378 qed_ll2_dealloc_if(cdev); 1379 err3: 1380 qed_hw_stop(cdev); 1381 err2: 1382 qed_hw_timers_stop_all(cdev); 1383 if (IS_PF(cdev)) 1384 qed_slowpath_irq_free(cdev); 1385 qed_free_stream_mem(cdev); 1386 qed_disable_msix(cdev); 1387 err1: 1388 qed_resc_free(cdev); 1389 err: 1390 if (IS_PF(cdev)) 1391 release_firmware(cdev->firmware); 1392 1393 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1394 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1395 qed_ptt_release(QED_LEADING_HWFN(cdev), 1396 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1397 1398 qed_iov_wq_stop(cdev, false); 1399 1400 qed_slowpath_wq_stop(cdev); 1401 1402 return rc; 1403 } 1404 1405 static int qed_slowpath_stop(struct qed_dev *cdev) 1406 { 1407 if (!cdev) 1408 return -ENODEV; 1409 1410 qed_slowpath_wq_stop(cdev); 1411 1412 qed_ll2_dealloc_if(cdev); 1413 1414 if (IS_PF(cdev)) { 1415 if (cdev->num_hwfns == 1) 1416 qed_ptt_release(QED_LEADING_HWFN(cdev), 1417 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1418 qed_free_stream_mem(cdev); 1419 if (IS_QED_ETH_IF(cdev)) 1420 qed_sriov_disable(cdev, true); 1421 } 1422 1423 qed_nic_stop(cdev); 1424 1425 if (IS_PF(cdev)) 1426 qed_slowpath_irq_free(cdev); 1427 1428 qed_disable_msix(cdev); 1429 1430 qed_resc_free(cdev); 1431 1432 qed_iov_wq_stop(cdev, true); 1433 1434 if (IS_PF(cdev)) 1435 release_firmware(cdev->firmware); 1436 1437 return 0; 1438 } 1439 1440 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1441 { 1442 int i; 1443 1444 memcpy(cdev->name, name, NAME_SIZE); 1445 for_each_hwfn(cdev, i) 1446 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1447 } 1448 1449 static u32 qed_sb_init(struct qed_dev *cdev, 1450 struct qed_sb_info *sb_info, 1451 void *sb_virt_addr, 1452 dma_addr_t sb_phy_addr, u16 sb_id, 1453 enum qed_sb_type type) 1454 { 1455 struct qed_hwfn *p_hwfn; 1456 struct qed_ptt *p_ptt; 1457 u16 rel_sb_id; 1458 u32 rc; 1459 1460 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1461 if (type == QED_SB_TYPE_L2_QUEUE) { 1462 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1463 rel_sb_id = sb_id / cdev->num_hwfns; 1464 } else { 1465 p_hwfn = QED_AFFIN_HWFN(cdev); 1466 rel_sb_id = sb_id; 1467 } 1468 1469 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1470 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1471 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1472 1473 if (IS_PF(p_hwfn->cdev)) { 1474 p_ptt = qed_ptt_acquire(p_hwfn); 1475 if (!p_ptt) 1476 return -EBUSY; 1477 1478 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1479 sb_phy_addr, rel_sb_id); 1480 qed_ptt_release(p_hwfn, p_ptt); 1481 } else { 1482 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1483 sb_phy_addr, rel_sb_id); 1484 } 1485 1486 return rc; 1487 } 1488 1489 static u32 qed_sb_release(struct qed_dev *cdev, 1490 struct qed_sb_info *sb_info, 1491 u16 sb_id, 1492 enum qed_sb_type type) 1493 { 1494 struct qed_hwfn *p_hwfn; 1495 u16 rel_sb_id; 1496 u32 rc; 1497 1498 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1499 if (type == QED_SB_TYPE_L2_QUEUE) { 1500 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1501 rel_sb_id = sb_id / cdev->num_hwfns; 1502 } else { 1503 p_hwfn = QED_AFFIN_HWFN(cdev); 1504 rel_sb_id = sb_id; 1505 } 1506 1507 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1508 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1509 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1510 1511 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1512 1513 return rc; 1514 } 1515 1516 static bool qed_can_link_change(struct qed_dev *cdev) 1517 { 1518 return true; 1519 } 1520 1521 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1522 const struct qed_link_params *params) 1523 { 1524 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1525 const struct qed_mfw_speed_map *map; 1526 u32 i; 1527 1528 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1529 ext_speed->autoneg = !!params->autoneg; 1530 1531 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1532 ext_speed->advertised_speeds = 0; 1533 1534 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1535 map = qed_mfw_ext_maps + i; 1536 1537 if (linkmode_intersects(params->adv_speeds, map->caps)) 1538 ext_speed->advertised_speeds |= map->mfw_val; 1539 } 1540 } 1541 1542 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1543 switch (params->forced_speed) { 1544 case SPEED_1000: 1545 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1546 break; 1547 case SPEED_10000: 1548 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1549 break; 1550 case SPEED_20000: 1551 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1552 break; 1553 case SPEED_25000: 1554 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1555 break; 1556 case SPEED_40000: 1557 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1558 break; 1559 case SPEED_50000: 1560 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1561 QED_EXT_SPEED_50G_R2; 1562 break; 1563 case SPEED_100000: 1564 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1565 QED_EXT_SPEED_100G_R4 | 1566 QED_EXT_SPEED_100G_P4; 1567 break; 1568 default: 1569 break; 1570 } 1571 } 1572 1573 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1574 return; 1575 1576 switch (params->forced_speed) { 1577 case SPEED_25000: 1578 switch (params->fec) { 1579 case FEC_FORCE_MODE_NONE: 1580 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1581 break; 1582 case FEC_FORCE_MODE_FIRECODE: 1583 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1584 break; 1585 case FEC_FORCE_MODE_RS: 1586 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1587 break; 1588 case FEC_FORCE_MODE_AUTO: 1589 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1590 ETH_EXT_FEC_25G_BASE_R | 1591 ETH_EXT_FEC_25G_NONE; 1592 break; 1593 default: 1594 break; 1595 } 1596 1597 break; 1598 case SPEED_40000: 1599 switch (params->fec) { 1600 case FEC_FORCE_MODE_NONE: 1601 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1602 break; 1603 case FEC_FORCE_MODE_FIRECODE: 1604 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1605 break; 1606 case FEC_FORCE_MODE_AUTO: 1607 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1608 ETH_EXT_FEC_40G_NONE; 1609 break; 1610 default: 1611 break; 1612 } 1613 1614 break; 1615 case SPEED_50000: 1616 switch (params->fec) { 1617 case FEC_FORCE_MODE_NONE: 1618 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1619 break; 1620 case FEC_FORCE_MODE_FIRECODE: 1621 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1622 break; 1623 case FEC_FORCE_MODE_RS: 1624 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1625 break; 1626 case FEC_FORCE_MODE_AUTO: 1627 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1628 ETH_EXT_FEC_50G_BASE_R | 1629 ETH_EXT_FEC_50G_NONE; 1630 break; 1631 default: 1632 break; 1633 } 1634 1635 break; 1636 case SPEED_100000: 1637 switch (params->fec) { 1638 case FEC_FORCE_MODE_NONE: 1639 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1640 break; 1641 case FEC_FORCE_MODE_FIRECODE: 1642 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1643 break; 1644 case FEC_FORCE_MODE_RS: 1645 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1646 break; 1647 case FEC_FORCE_MODE_AUTO: 1648 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1649 ETH_EXT_FEC_100G_BASE_R | 1650 ETH_EXT_FEC_100G_NONE; 1651 break; 1652 default: 1653 break; 1654 } 1655 1656 break; 1657 default: 1658 break; 1659 } 1660 } 1661 1662 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1663 { 1664 struct qed_mcp_link_params *link_params; 1665 struct qed_mcp_link_speed_params *speed; 1666 const struct qed_mfw_speed_map *map; 1667 struct qed_hwfn *hwfn; 1668 struct qed_ptt *ptt; 1669 int rc; 1670 u32 i; 1671 1672 if (!cdev) 1673 return -ENODEV; 1674 1675 /* The link should be set only once per PF */ 1676 hwfn = &cdev->hwfns[0]; 1677 1678 /* When VF wants to set link, force it to read the bulletin instead. 1679 * This mimics the PF behavior, where a noitification [both immediate 1680 * and possible later] would be generated when changing properties. 1681 */ 1682 if (IS_VF(cdev)) { 1683 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1684 return 0; 1685 } 1686 1687 ptt = qed_ptt_acquire(hwfn); 1688 if (!ptt) 1689 return -EBUSY; 1690 1691 link_params = qed_mcp_get_link_params(hwfn); 1692 if (!link_params) 1693 return -ENODATA; 1694 1695 speed = &link_params->speed; 1696 1697 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1698 speed->autoneg = !!params->autoneg; 1699 1700 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1701 speed->advertised_speeds = 0; 1702 1703 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1704 map = qed_mfw_legacy_maps + i; 1705 1706 if (linkmode_intersects(params->adv_speeds, map->caps)) 1707 speed->advertised_speeds |= map->mfw_val; 1708 } 1709 } 1710 1711 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1712 speed->forced_speed = params->forced_speed; 1713 1714 if (qed_mcp_is_ext_speed_supported(hwfn)) 1715 qed_set_ext_speed_params(link_params, params); 1716 1717 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1718 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1719 link_params->pause.autoneg = true; 1720 else 1721 link_params->pause.autoneg = false; 1722 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1723 link_params->pause.forced_rx = true; 1724 else 1725 link_params->pause.forced_rx = false; 1726 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1727 link_params->pause.forced_tx = true; 1728 else 1729 link_params->pause.forced_tx = false; 1730 } 1731 1732 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1733 switch (params->loopback_mode) { 1734 case QED_LINK_LOOPBACK_INT_PHY: 1735 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1736 break; 1737 case QED_LINK_LOOPBACK_EXT_PHY: 1738 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1739 break; 1740 case QED_LINK_LOOPBACK_EXT: 1741 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1742 break; 1743 case QED_LINK_LOOPBACK_MAC: 1744 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1745 break; 1746 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1747 link_params->loopback_mode = 1748 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1749 break; 1750 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1751 link_params->loopback_mode = 1752 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1753 break; 1754 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1755 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1756 break; 1757 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1758 link_params->loopback_mode = 1759 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1760 break; 1761 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1762 link_params->loopback_mode = 1763 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1764 break; 1765 default: 1766 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1767 break; 1768 } 1769 } 1770 1771 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1772 memcpy(&link_params->eee, ¶ms->eee, 1773 sizeof(link_params->eee)); 1774 1775 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1776 link_params->fec = params->fec; 1777 1778 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1779 1780 qed_ptt_release(hwfn, ptt); 1781 1782 return rc; 1783 } 1784 1785 static int qed_get_port_type(u32 media_type) 1786 { 1787 int port_type; 1788 1789 switch (media_type) { 1790 case MEDIA_SFPP_10G_FIBER: 1791 case MEDIA_SFP_1G_FIBER: 1792 case MEDIA_XFP_FIBER: 1793 case MEDIA_MODULE_FIBER: 1794 port_type = PORT_FIBRE; 1795 break; 1796 case MEDIA_DA_TWINAX: 1797 port_type = PORT_DA; 1798 break; 1799 case MEDIA_BASE_T: 1800 port_type = PORT_TP; 1801 break; 1802 case MEDIA_KR: 1803 case MEDIA_NOT_PRESENT: 1804 port_type = PORT_NONE; 1805 break; 1806 case MEDIA_UNSPECIFIED: 1807 default: 1808 port_type = PORT_OTHER; 1809 break; 1810 } 1811 return port_type; 1812 } 1813 1814 static int qed_get_link_data(struct qed_hwfn *hwfn, 1815 struct qed_mcp_link_params *params, 1816 struct qed_mcp_link_state *link, 1817 struct qed_mcp_link_capabilities *link_caps) 1818 { 1819 void *p; 1820 1821 if (!IS_PF(hwfn->cdev)) { 1822 qed_vf_get_link_params(hwfn, params); 1823 qed_vf_get_link_state(hwfn, link); 1824 qed_vf_get_link_caps(hwfn, link_caps); 1825 1826 return 0; 1827 } 1828 1829 p = qed_mcp_get_link_params(hwfn); 1830 if (!p) 1831 return -ENXIO; 1832 memcpy(params, p, sizeof(*params)); 1833 1834 p = qed_mcp_get_link_state(hwfn); 1835 if (!p) 1836 return -ENXIO; 1837 memcpy(link, p, sizeof(*link)); 1838 1839 p = qed_mcp_get_link_capabilities(hwfn); 1840 if (!p) 1841 return -ENXIO; 1842 memcpy(link_caps, p, sizeof(*link_caps)); 1843 1844 return 0; 1845 } 1846 1847 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1848 struct qed_ptt *ptt, u32 capability, 1849 unsigned long *if_caps) 1850 { 1851 u32 media_type, tcvr_state, tcvr_type; 1852 u32 speed_mask, board_cfg; 1853 1854 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1855 media_type = MEDIA_UNSPECIFIED; 1856 1857 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1858 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1859 1860 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1861 speed_mask = 0xFFFFFFFF; 1862 1863 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1864 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1865 1866 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1867 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1868 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1869 1870 switch (media_type) { 1871 case MEDIA_DA_TWINAX: 1872 phylink_set(if_caps, FIBRE); 1873 1874 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1875 phylink_set(if_caps, 20000baseKR2_Full); 1876 1877 /* For DAC media multiple speed capabilities are supported */ 1878 capability |= speed_mask; 1879 1880 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1881 phylink_set(if_caps, 1000baseKX_Full); 1882 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1883 phylink_set(if_caps, 10000baseCR_Full); 1884 1885 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1886 switch (tcvr_type) { 1887 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1888 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1889 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1890 phylink_set(if_caps, 40000baseCR4_Full); 1891 break; 1892 default: 1893 break; 1894 } 1895 1896 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1897 phylink_set(if_caps, 25000baseCR_Full); 1898 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1899 phylink_set(if_caps, 50000baseCR2_Full); 1900 1901 if (capability & 1902 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1903 switch (tcvr_type) { 1904 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1905 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1906 phylink_set(if_caps, 100000baseCR4_Full); 1907 break; 1908 default: 1909 break; 1910 } 1911 1912 break; 1913 case MEDIA_BASE_T: 1914 phylink_set(if_caps, TP); 1915 1916 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1917 if (capability & 1918 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1919 phylink_set(if_caps, 1000baseT_Full); 1920 if (capability & 1921 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1922 phylink_set(if_caps, 10000baseT_Full); 1923 } 1924 1925 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1926 phylink_set(if_caps, FIBRE); 1927 1928 switch (tcvr_type) { 1929 case ETH_TRANSCEIVER_TYPE_1000BASET: 1930 phylink_set(if_caps, 1000baseT_Full); 1931 break; 1932 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1933 phylink_set(if_caps, 10000baseT_Full); 1934 break; 1935 default: 1936 break; 1937 } 1938 } 1939 1940 break; 1941 case MEDIA_SFP_1G_FIBER: 1942 case MEDIA_SFPP_10G_FIBER: 1943 case MEDIA_XFP_FIBER: 1944 case MEDIA_MODULE_FIBER: 1945 phylink_set(if_caps, FIBRE); 1946 capability |= speed_mask; 1947 1948 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1949 switch (tcvr_type) { 1950 case ETH_TRANSCEIVER_TYPE_1G_LX: 1951 case ETH_TRANSCEIVER_TYPE_1G_SX: 1952 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1953 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1954 phylink_set(if_caps, 1000baseKX_Full); 1955 break; 1956 default: 1957 break; 1958 } 1959 1960 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1961 switch (tcvr_type) { 1962 case ETH_TRANSCEIVER_TYPE_10G_SR: 1963 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1964 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1965 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1966 phylink_set(if_caps, 10000baseSR_Full); 1967 break; 1968 case ETH_TRANSCEIVER_TYPE_10G_LR: 1969 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1970 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1971 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1972 phylink_set(if_caps, 10000baseLR_Full); 1973 break; 1974 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1975 phylink_set(if_caps, 10000baseLRM_Full); 1976 break; 1977 case ETH_TRANSCEIVER_TYPE_10G_ER: 1978 phylink_set(if_caps, 10000baseR_FEC); 1979 break; 1980 default: 1981 break; 1982 } 1983 1984 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1985 phylink_set(if_caps, 20000baseKR2_Full); 1986 1987 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1988 switch (tcvr_type) { 1989 case ETH_TRANSCEIVER_TYPE_25G_SR: 1990 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1991 phylink_set(if_caps, 25000baseSR_Full); 1992 break; 1993 default: 1994 break; 1995 } 1996 1997 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1998 switch (tcvr_type) { 1999 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2000 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2001 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2002 phylink_set(if_caps, 40000baseLR4_Full); 2003 break; 2004 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2005 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2006 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2007 phylink_set(if_caps, 40000baseSR4_Full); 2008 break; 2009 default: 2010 break; 2011 } 2012 2013 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2014 phylink_set(if_caps, 50000baseKR2_Full); 2015 2016 if (capability & 2017 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2018 switch (tcvr_type) { 2019 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2020 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2021 phylink_set(if_caps, 100000baseSR4_Full); 2022 break; 2023 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2024 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2025 break; 2026 default: 2027 break; 2028 } 2029 2030 break; 2031 case MEDIA_KR: 2032 phylink_set(if_caps, Backplane); 2033 2034 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2035 phylink_set(if_caps, 20000baseKR2_Full); 2036 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2037 phylink_set(if_caps, 1000baseKX_Full); 2038 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2039 phylink_set(if_caps, 10000baseKR_Full); 2040 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2041 phylink_set(if_caps, 25000baseKR_Full); 2042 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2043 phylink_set(if_caps, 40000baseKR4_Full); 2044 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2045 phylink_set(if_caps, 50000baseKR2_Full); 2046 if (capability & 2047 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2048 phylink_set(if_caps, 100000baseKR4_Full); 2049 2050 break; 2051 case MEDIA_UNSPECIFIED: 2052 case MEDIA_NOT_PRESENT: 2053 default: 2054 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2055 "Unknown media and transceiver type;\n"); 2056 break; 2057 } 2058 } 2059 2060 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2061 { 2062 *speed_mask = 0; 2063 2064 if (caps & 2065 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2066 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2067 if (caps & QED_LINK_PARTNER_SPEED_10G) 2068 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2069 if (caps & QED_LINK_PARTNER_SPEED_20G) 2070 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2071 if (caps & QED_LINK_PARTNER_SPEED_25G) 2072 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2073 if (caps & QED_LINK_PARTNER_SPEED_40G) 2074 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2075 if (caps & QED_LINK_PARTNER_SPEED_50G) 2076 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2077 if (caps & QED_LINK_PARTNER_SPEED_100G) 2078 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2079 } 2080 2081 static void qed_fill_link(struct qed_hwfn *hwfn, 2082 struct qed_ptt *ptt, 2083 struct qed_link_output *if_link) 2084 { 2085 struct qed_mcp_link_capabilities link_caps; 2086 struct qed_mcp_link_params params; 2087 struct qed_mcp_link_state link; 2088 u32 media_type, speed_mask; 2089 2090 memset(if_link, 0, sizeof(*if_link)); 2091 2092 /* Prepare source inputs */ 2093 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2094 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2095 return; 2096 } 2097 2098 /* Set the link parameters to pass to protocol driver */ 2099 if (link.link_up) 2100 if_link->link_up = true; 2101 2102 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2103 if (link_caps.default_ext_autoneg) 2104 phylink_set(if_link->supported_caps, Autoneg); 2105 2106 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2107 2108 if (params.ext_speed.autoneg) 2109 phylink_set(if_link->advertised_caps, Autoneg); 2110 else 2111 phylink_clear(if_link->advertised_caps, Autoneg); 2112 2113 qed_fill_link_capability(hwfn, ptt, 2114 params.ext_speed.advertised_speeds, 2115 if_link->advertised_caps); 2116 } else { 2117 if (link_caps.default_speed_autoneg) 2118 phylink_set(if_link->supported_caps, Autoneg); 2119 2120 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2121 2122 if (params.speed.autoneg) 2123 phylink_set(if_link->advertised_caps, Autoneg); 2124 else 2125 phylink_clear(if_link->advertised_caps, Autoneg); 2126 } 2127 2128 if (params.pause.autoneg || 2129 (params.pause.forced_rx && params.pause.forced_tx)) 2130 phylink_set(if_link->supported_caps, Asym_Pause); 2131 if (params.pause.autoneg || params.pause.forced_rx || 2132 params.pause.forced_tx) 2133 phylink_set(if_link->supported_caps, Pause); 2134 2135 if_link->sup_fec = link_caps.fec_default; 2136 if_link->active_fec = params.fec; 2137 2138 /* Fill link advertised capability */ 2139 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2140 if_link->advertised_caps); 2141 2142 /* Fill link supported capability */ 2143 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2144 if_link->supported_caps); 2145 2146 /* Fill partner advertised capability */ 2147 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2148 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2149 2150 if (link.link_up) 2151 if_link->speed = link.speed; 2152 2153 /* TODO - fill duplex properly */ 2154 if_link->duplex = DUPLEX_FULL; 2155 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2156 if_link->port = qed_get_port_type(media_type); 2157 2158 if_link->autoneg = params.speed.autoneg; 2159 2160 if (params.pause.autoneg) 2161 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2162 if (params.pause.forced_rx) 2163 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2164 if (params.pause.forced_tx) 2165 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2166 2167 if (link.an_complete) 2168 phylink_set(if_link->lp_caps, Autoneg); 2169 if (link.partner_adv_pause) 2170 phylink_set(if_link->lp_caps, Pause); 2171 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2172 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2173 phylink_set(if_link->lp_caps, Asym_Pause); 2174 2175 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2176 if_link->eee_supported = false; 2177 } else { 2178 if_link->eee_supported = true; 2179 if_link->eee_active = link.eee_active; 2180 if_link->sup_caps = link_caps.eee_speed_caps; 2181 /* MFW clears adv_caps on eee disable; use configured value */ 2182 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2183 params.eee.adv_caps; 2184 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2185 if_link->eee.enable = params.eee.enable; 2186 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2187 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2188 } 2189 } 2190 2191 static void qed_get_current_link(struct qed_dev *cdev, 2192 struct qed_link_output *if_link) 2193 { 2194 struct qed_hwfn *hwfn; 2195 struct qed_ptt *ptt; 2196 int i; 2197 2198 hwfn = &cdev->hwfns[0]; 2199 if (IS_PF(cdev)) { 2200 ptt = qed_ptt_acquire(hwfn); 2201 if (ptt) { 2202 qed_fill_link(hwfn, ptt, if_link); 2203 qed_ptt_release(hwfn, ptt); 2204 } else { 2205 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2206 } 2207 } else { 2208 qed_fill_link(hwfn, NULL, if_link); 2209 } 2210 2211 for_each_hwfn(cdev, i) 2212 qed_inform_vf_link_state(&cdev->hwfns[i]); 2213 } 2214 2215 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2216 { 2217 void *cookie = hwfn->cdev->ops_cookie; 2218 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2219 struct qed_link_output if_link; 2220 2221 qed_fill_link(hwfn, ptt, &if_link); 2222 qed_inform_vf_link_state(hwfn); 2223 2224 if (IS_LEAD_HWFN(hwfn) && cookie) 2225 op->link_update(cookie, &if_link); 2226 } 2227 2228 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2229 { 2230 void *cookie = hwfn->cdev->ops_cookie; 2231 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2232 2233 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2234 op->bw_update(cookie); 2235 } 2236 2237 static int qed_drain(struct qed_dev *cdev) 2238 { 2239 struct qed_hwfn *hwfn; 2240 struct qed_ptt *ptt; 2241 int i, rc; 2242 2243 if (IS_VF(cdev)) 2244 return 0; 2245 2246 for_each_hwfn(cdev, i) { 2247 hwfn = &cdev->hwfns[i]; 2248 ptt = qed_ptt_acquire(hwfn); 2249 if (!ptt) { 2250 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2251 return -EBUSY; 2252 } 2253 rc = qed_mcp_drain(hwfn, ptt); 2254 qed_ptt_release(hwfn, ptt); 2255 if (rc) 2256 return rc; 2257 } 2258 2259 return 0; 2260 } 2261 2262 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2263 struct qed_nvm_image_att *nvm_image, 2264 u32 *crc) 2265 { 2266 u8 *buf = NULL; 2267 int rc; 2268 2269 /* Allocate a buffer for holding the nvram image */ 2270 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2271 if (!buf) 2272 return -ENOMEM; 2273 2274 /* Read image into buffer */ 2275 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2276 buf, nvm_image->length); 2277 if (rc) { 2278 DP_ERR(cdev, "Failed reading image from nvm\n"); 2279 goto out; 2280 } 2281 2282 /* Convert the buffer into big-endian format (excluding the 2283 * closing 4 bytes of CRC). 2284 */ 2285 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2286 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2287 2288 /* Calc CRC for the "actual" image buffer, i.e. not including 2289 * the last 4 CRC bytes. 2290 */ 2291 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2292 *crc = (__force u32)cpu_to_be32p(crc); 2293 2294 out: 2295 kfree(buf); 2296 2297 return rc; 2298 } 2299 2300 /* Binary file format - 2301 * /----------------------------------------------------------------------\ 2302 * 0B | 0x4 [command index] | 2303 * 4B | image_type | Options | Number of register settings | 2304 * 8B | Value | 2305 * 12B | Mask | 2306 * 16B | Offset | 2307 * \----------------------------------------------------------------------/ 2308 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2309 * Options - 0'b - Calculate & Update CRC for image 2310 */ 2311 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2312 bool *check_resp) 2313 { 2314 struct qed_nvm_image_att nvm_image; 2315 struct qed_hwfn *p_hwfn; 2316 bool is_crc = false; 2317 u32 image_type; 2318 int rc = 0, i; 2319 u16 len; 2320 2321 *data += 4; 2322 image_type = **data; 2323 p_hwfn = QED_LEADING_HWFN(cdev); 2324 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2325 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2326 break; 2327 if (i == p_hwfn->nvm_info.num_images) { 2328 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2329 image_type); 2330 return -ENOENT; 2331 } 2332 2333 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2334 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2335 2336 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2337 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2338 **data, image_type, nvm_image.start_addr, 2339 nvm_image.start_addr + nvm_image.length - 1); 2340 (*data)++; 2341 is_crc = !!(**data & BIT(0)); 2342 (*data)++; 2343 len = *((u16 *)*data); 2344 *data += 2; 2345 if (is_crc) { 2346 u32 crc = 0; 2347 2348 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2349 if (rc) { 2350 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2351 goto exit; 2352 } 2353 2354 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2355 (nvm_image.start_addr + 2356 nvm_image.length - 4), (u8 *)&crc, 4); 2357 if (rc) 2358 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2359 nvm_image.start_addr + nvm_image.length - 4, rc); 2360 goto exit; 2361 } 2362 2363 /* Iterate over the values for setting */ 2364 while (len) { 2365 u32 offset, mask, value, cur_value; 2366 u8 buf[4]; 2367 2368 value = *((u32 *)*data); 2369 *data += 4; 2370 mask = *((u32 *)*data); 2371 *data += 4; 2372 offset = *((u32 *)*data); 2373 *data += 4; 2374 2375 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2376 4); 2377 if (rc) { 2378 DP_ERR(cdev, "Failed reading from %08x\n", 2379 nvm_image.start_addr + offset); 2380 goto exit; 2381 } 2382 2383 cur_value = le32_to_cpu(*((__le32 *)buf)); 2384 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2385 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2386 nvm_image.start_addr + offset, cur_value, 2387 (cur_value & ~mask) | (value & mask), value, mask); 2388 value = (value & mask) | (cur_value & ~mask); 2389 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2390 nvm_image.start_addr + offset, 2391 (u8 *)&value, 4); 2392 if (rc) { 2393 DP_ERR(cdev, "Failed writing to %08x\n", 2394 nvm_image.start_addr + offset); 2395 goto exit; 2396 } 2397 2398 len--; 2399 } 2400 exit: 2401 return rc; 2402 } 2403 2404 /* Binary file format - 2405 * /----------------------------------------------------------------------\ 2406 * 0B | 0x3 [command index] | 2407 * 4B | b'0: check_response? | b'1-31 reserved | 2408 * 8B | File-type | reserved | 2409 * 12B | Image length in bytes | 2410 * \----------------------------------------------------------------------/ 2411 * Start a new file of the provided type 2412 */ 2413 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2414 const u8 **data, bool *check_resp) 2415 { 2416 u32 file_type, file_size = 0; 2417 int rc; 2418 2419 *data += 4; 2420 *check_resp = !!(**data & BIT(0)); 2421 *data += 4; 2422 file_type = **data; 2423 2424 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2425 "About to start a new file of type %02x\n", file_type); 2426 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2427 *data += 4; 2428 file_size = *((u32 *)(*data)); 2429 } 2430 2431 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2432 (u8 *)(&file_size), 4); 2433 *data += 4; 2434 2435 return rc; 2436 } 2437 2438 /* Binary file format - 2439 * /----------------------------------------------------------------------\ 2440 * 0B | 0x2 [command index] | 2441 * 4B | Length in bytes | 2442 * 8B | b'0: check_response? | b'1-31 reserved | 2443 * 12B | Offset in bytes | 2444 * 16B | Data ... | 2445 * \----------------------------------------------------------------------/ 2446 * Write data as part of a file that was previously started. Data should be 2447 * of length equal to that provided in the message 2448 */ 2449 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2450 const u8 **data, bool *check_resp) 2451 { 2452 u32 offset, len; 2453 int rc; 2454 2455 *data += 4; 2456 len = *((u32 *)(*data)); 2457 *data += 4; 2458 *check_resp = !!(**data & BIT(0)); 2459 *data += 4; 2460 offset = *((u32 *)(*data)); 2461 *data += 4; 2462 2463 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2464 "About to write File-data: %08x bytes to offset %08x\n", 2465 len, offset); 2466 2467 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2468 (char *)(*data), len); 2469 *data += len; 2470 2471 return rc; 2472 } 2473 2474 /* Binary file format [General header] - 2475 * /----------------------------------------------------------------------\ 2476 * 0B | QED_NVM_SIGNATURE | 2477 * 4B | Length in bytes | 2478 * 8B | Highest command in this batchfile | Reserved | 2479 * \----------------------------------------------------------------------/ 2480 */ 2481 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2482 const struct firmware *image, 2483 const u8 **data) 2484 { 2485 u32 signature, len; 2486 2487 /* Check minimum size */ 2488 if (image->size < 12) { 2489 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2490 return -EINVAL; 2491 } 2492 2493 /* Check signature */ 2494 signature = *((u32 *)(*data)); 2495 if (signature != QED_NVM_SIGNATURE) { 2496 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2497 return -EINVAL; 2498 } 2499 2500 *data += 4; 2501 /* Validate internal size equals the image-size */ 2502 len = *((u32 *)(*data)); 2503 if (len != image->size) { 2504 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2505 len, (u32)image->size); 2506 return -EINVAL; 2507 } 2508 2509 *data += 4; 2510 /* Make sure driver familiar with all commands necessary for this */ 2511 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2512 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2513 *((u16 *)(*data))); 2514 return -EINVAL; 2515 } 2516 2517 *data += 4; 2518 2519 return 0; 2520 } 2521 2522 /* Binary file format - 2523 * /----------------------------------------------------------------------\ 2524 * 0B | 0x5 [command index] | 2525 * 4B | Number of config attributes | Reserved | 2526 * 4B | Config ID | Entity ID | Length | 2527 * 4B | Value | 2528 * | | 2529 * \----------------------------------------------------------------------/ 2530 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2531 * 'Number of config attributes'. 2532 * 2533 * The API parses config attributes from the user provided buffer and flashes 2534 * them to the respective NVM path using Management FW inerface. 2535 */ 2536 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2537 { 2538 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2539 u8 entity_id, len, buf[32]; 2540 bool need_nvm_init = true; 2541 struct qed_ptt *ptt; 2542 u16 cfg_id, count; 2543 int rc = 0, i; 2544 u32 flags; 2545 2546 ptt = qed_ptt_acquire(hwfn); 2547 if (!ptt) 2548 return -EAGAIN; 2549 2550 /* NVM CFG ID attribute header */ 2551 *data += 4; 2552 count = *((u16 *)*data); 2553 *data += 4; 2554 2555 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2556 "Read config ids: num_attrs = %0d\n", count); 2557 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2558 * arithmetic operations in the implementation. 2559 */ 2560 for (i = 1; i <= count; i++) { 2561 cfg_id = *((u16 *)*data); 2562 *data += 2; 2563 entity_id = **data; 2564 (*data)++; 2565 len = **data; 2566 (*data)++; 2567 memcpy(buf, *data, len); 2568 *data += len; 2569 2570 flags = 0; 2571 if (need_nvm_init) { 2572 flags |= QED_NVM_CFG_OPTION_INIT; 2573 need_nvm_init = false; 2574 } 2575 2576 /* Commit to flash and free the resources */ 2577 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2578 flags |= QED_NVM_CFG_OPTION_COMMIT | 2579 QED_NVM_CFG_OPTION_FREE; 2580 need_nvm_init = true; 2581 } 2582 2583 if (entity_id) 2584 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2585 2586 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2587 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2588 entity_id, len); 2589 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2590 buf, len); 2591 if (rc) { 2592 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2593 break; 2594 } 2595 } 2596 2597 qed_ptt_release(hwfn, ptt); 2598 2599 return rc; 2600 } 2601 2602 #define QED_MAX_NVM_BUF_LEN 32 2603 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2604 { 2605 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2606 u8 buf[QED_MAX_NVM_BUF_LEN]; 2607 struct qed_ptt *ptt; 2608 u32 len; 2609 int rc; 2610 2611 ptt = qed_ptt_acquire(hwfn); 2612 if (!ptt) 2613 return QED_MAX_NVM_BUF_LEN; 2614 2615 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2616 &len); 2617 if (rc || !len) { 2618 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2619 len = QED_MAX_NVM_BUF_LEN; 2620 } 2621 2622 qed_ptt_release(hwfn, ptt); 2623 2624 return len; 2625 } 2626 2627 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2628 u32 cmd, u32 entity_id) 2629 { 2630 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2631 struct qed_ptt *ptt; 2632 u32 flags, len; 2633 int rc = 0; 2634 2635 ptt = qed_ptt_acquire(hwfn); 2636 if (!ptt) 2637 return -EAGAIN; 2638 2639 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2640 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2641 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2642 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2643 if (rc) 2644 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2645 2646 qed_ptt_release(hwfn, ptt); 2647 2648 return rc; 2649 } 2650 2651 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2652 { 2653 const struct firmware *image; 2654 const u8 *data, *data_end; 2655 u32 cmd_type; 2656 int rc; 2657 2658 rc = request_firmware(&image, name, &cdev->pdev->dev); 2659 if (rc) { 2660 DP_ERR(cdev, "Failed to find '%s'\n", name); 2661 return rc; 2662 } 2663 2664 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2665 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2666 name, image->data, (u32)image->size); 2667 data = image->data; 2668 data_end = data + image->size; 2669 2670 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2671 if (rc) 2672 goto exit; 2673 2674 while (data < data_end) { 2675 bool check_resp = false; 2676 2677 /* Parse the actual command */ 2678 cmd_type = *((u32 *)data); 2679 switch (cmd_type) { 2680 case QED_NVM_FLASH_CMD_FILE_DATA: 2681 rc = qed_nvm_flash_image_file_data(cdev, &data, 2682 &check_resp); 2683 break; 2684 case QED_NVM_FLASH_CMD_FILE_START: 2685 rc = qed_nvm_flash_image_file_start(cdev, &data, 2686 &check_resp); 2687 break; 2688 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2689 rc = qed_nvm_flash_image_access(cdev, &data, 2690 &check_resp); 2691 break; 2692 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2693 rc = qed_nvm_flash_cfg_write(cdev, &data); 2694 break; 2695 default: 2696 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2697 rc = -EINVAL; 2698 goto exit; 2699 } 2700 2701 if (rc) { 2702 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2703 goto exit; 2704 } 2705 2706 /* Check response if needed */ 2707 if (check_resp) { 2708 u32 mcp_response = 0; 2709 2710 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2711 DP_ERR(cdev, "Failed getting MCP response\n"); 2712 rc = -EINVAL; 2713 goto exit; 2714 } 2715 2716 switch (mcp_response & FW_MSG_CODE_MASK) { 2717 case FW_MSG_CODE_OK: 2718 case FW_MSG_CODE_NVM_OK: 2719 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2720 case FW_MSG_CODE_PHY_OK: 2721 break; 2722 default: 2723 DP_ERR(cdev, "MFW returns error: %08x\n", 2724 mcp_response); 2725 rc = -EINVAL; 2726 goto exit; 2727 } 2728 } 2729 } 2730 2731 exit: 2732 release_firmware(image); 2733 2734 return rc; 2735 } 2736 2737 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2738 u8 *buf, u16 len) 2739 { 2740 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2741 2742 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2743 } 2744 2745 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2746 { 2747 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2748 void *cookie = p_hwfn->cdev->ops_cookie; 2749 2750 if (ops && ops->schedule_recovery_handler) 2751 ops->schedule_recovery_handler(cookie); 2752 } 2753 2754 static const char * const qed_hw_err_type_descr[] = { 2755 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2756 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2757 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2758 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2759 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2760 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2761 [QED_HW_ERR_LAST] = "Unknown", 2762 }; 2763 2764 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2765 enum qed_hw_err_type err_type) 2766 { 2767 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2768 void *cookie = p_hwfn->cdev->ops_cookie; 2769 const char *err_str; 2770 2771 if (err_type > QED_HW_ERR_LAST) 2772 err_type = QED_HW_ERR_LAST; 2773 err_str = qed_hw_err_type_descr[err_type]; 2774 2775 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2776 2777 /* Call the HW error handler of the protocol driver. 2778 * If it is not available - perform a minimal handling of preventing 2779 * HW attentions from being reasserted. 2780 */ 2781 if (ops && ops->schedule_hw_err_handler) 2782 ops->schedule_hw_err_handler(cookie, err_type); 2783 else 2784 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2785 } 2786 2787 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2788 void *handle) 2789 { 2790 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2791 } 2792 2793 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2794 { 2795 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2796 struct qed_ptt *ptt; 2797 int status = 0; 2798 2799 ptt = qed_ptt_acquire(hwfn); 2800 if (!ptt) 2801 return -EAGAIN; 2802 2803 status = qed_mcp_set_led(hwfn, ptt, mode); 2804 2805 qed_ptt_release(hwfn, ptt); 2806 2807 return status; 2808 } 2809 2810 int qed_recovery_process(struct qed_dev *cdev) 2811 { 2812 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2813 struct qed_ptt *p_ptt; 2814 int rc = 0; 2815 2816 p_ptt = qed_ptt_acquire(p_hwfn); 2817 if (!p_ptt) 2818 return -EAGAIN; 2819 2820 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2821 2822 qed_ptt_release(p_hwfn, p_ptt); 2823 2824 return rc; 2825 } 2826 2827 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2828 { 2829 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2830 struct qed_ptt *ptt; 2831 int rc = 0; 2832 2833 if (IS_VF(cdev)) 2834 return 0; 2835 2836 ptt = qed_ptt_acquire(hwfn); 2837 if (!ptt) 2838 return -EAGAIN; 2839 2840 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2841 : QED_OV_WOL_DISABLED); 2842 if (rc) 2843 goto out; 2844 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2845 2846 out: 2847 qed_ptt_release(hwfn, ptt); 2848 return rc; 2849 } 2850 2851 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2852 { 2853 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2854 struct qed_ptt *ptt; 2855 int status = 0; 2856 2857 if (IS_VF(cdev)) 2858 return 0; 2859 2860 ptt = qed_ptt_acquire(hwfn); 2861 if (!ptt) 2862 return -EAGAIN; 2863 2864 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2865 QED_OV_DRIVER_STATE_ACTIVE : 2866 QED_OV_DRIVER_STATE_DISABLED); 2867 2868 qed_ptt_release(hwfn, ptt); 2869 2870 return status; 2871 } 2872 2873 static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) 2874 { 2875 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2876 struct qed_ptt *ptt; 2877 int status = 0; 2878 2879 if (IS_VF(cdev)) 2880 return 0; 2881 2882 ptt = qed_ptt_acquire(hwfn); 2883 if (!ptt) 2884 return -EAGAIN; 2885 2886 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2887 if (status) 2888 goto out; 2889 2890 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2891 2892 out: 2893 qed_ptt_release(hwfn, ptt); 2894 return status; 2895 } 2896 2897 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2898 { 2899 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2900 struct qed_ptt *ptt; 2901 int status = 0; 2902 2903 if (IS_VF(cdev)) 2904 return 0; 2905 2906 ptt = qed_ptt_acquire(hwfn); 2907 if (!ptt) 2908 return -EAGAIN; 2909 2910 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2911 if (status) 2912 goto out; 2913 2914 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2915 2916 out: 2917 qed_ptt_release(hwfn, ptt); 2918 return status; 2919 } 2920 2921 static int 2922 qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, 2923 u16 qid, struct qed_sb_info_dbg *sb_dbg) 2924 { 2925 struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; 2926 struct qed_ptt *ptt; 2927 int rc; 2928 2929 if (IS_VF(cdev)) 2930 return -EINVAL; 2931 2932 ptt = qed_ptt_acquire(hwfn); 2933 if (!ptt) { 2934 DP_NOTICE(hwfn, "Can't acquire PTT\n"); 2935 return -EAGAIN; 2936 } 2937 2938 memset(sb_dbg, 0, sizeof(*sb_dbg)); 2939 rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); 2940 2941 qed_ptt_release(hwfn, ptt); 2942 return rc; 2943 } 2944 2945 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2946 u8 dev_addr, u32 offset, u32 len) 2947 { 2948 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2949 struct qed_ptt *ptt; 2950 int rc = 0; 2951 2952 if (IS_VF(cdev)) 2953 return 0; 2954 2955 ptt = qed_ptt_acquire(hwfn); 2956 if (!ptt) 2957 return -EAGAIN; 2958 2959 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2960 offset, len, buf); 2961 2962 qed_ptt_release(hwfn, ptt); 2963 2964 return rc; 2965 } 2966 2967 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2968 { 2969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2970 struct qed_ptt *ptt; 2971 int rc = 0; 2972 2973 if (IS_VF(cdev)) 2974 return 0; 2975 2976 ptt = qed_ptt_acquire(hwfn); 2977 if (!ptt) 2978 return -EAGAIN; 2979 2980 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2981 2982 qed_ptt_release(hwfn, ptt); 2983 2984 return rc; 2985 } 2986 2987 static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) 2988 { 2989 char buf[QED_MFW_REPORT_STR_SIZE]; 2990 struct qed_hwfn *p_hwfn; 2991 struct qed_ptt *p_ptt; 2992 va_list vl; 2993 2994 va_start(vl, fmt); 2995 vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); 2996 va_end(vl); 2997 2998 if (IS_PF(cdev)) { 2999 p_hwfn = QED_LEADING_HWFN(cdev); 3000 p_ptt = qed_ptt_acquire(p_hwfn); 3001 if (p_ptt) { 3002 qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); 3003 qed_ptt_release(p_hwfn, p_ptt); 3004 } 3005 } 3006 } 3007 3008 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 3009 { 3010 return QED_AFFIN_HWFN_IDX(cdev); 3011 } 3012 3013 static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) 3014 { 3015 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 3016 struct qed_ptt *ptt; 3017 int rc = 0; 3018 3019 *esl_active = false; 3020 3021 if (IS_VF(cdev)) 3022 return 0; 3023 3024 ptt = qed_ptt_acquire(hwfn); 3025 if (!ptt) 3026 return -EAGAIN; 3027 3028 rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); 3029 3030 qed_ptt_release(hwfn, ptt); 3031 3032 return rc; 3033 } 3034 3035 static struct qed_selftest_ops qed_selftest_ops_pass = { 3036 .selftest_memory = &qed_selftest_memory, 3037 .selftest_interrupt = &qed_selftest_interrupt, 3038 .selftest_register = &qed_selftest_register, 3039 .selftest_clock = &qed_selftest_clock, 3040 .selftest_nvram = &qed_selftest_nvram, 3041 }; 3042 3043 const struct qed_common_ops qed_common_ops_pass = { 3044 .selftest = &qed_selftest_ops_pass, 3045 .probe = &qed_probe, 3046 .remove = &qed_remove, 3047 .set_power_state = &qed_set_power_state, 3048 .set_name = &qed_set_name, 3049 .update_pf_params = &qed_update_pf_params, 3050 .slowpath_start = &qed_slowpath_start, 3051 .slowpath_stop = &qed_slowpath_stop, 3052 .set_fp_int = &qed_set_int_fp, 3053 .get_fp_int = &qed_get_int_fp, 3054 .sb_init = &qed_sb_init, 3055 .sb_release = &qed_sb_release, 3056 .simd_handler_config = &qed_simd_handler_config, 3057 .simd_handler_clean = &qed_simd_handler_clean, 3058 .dbg_grc = &qed_dbg_grc, 3059 .dbg_grc_size = &qed_dbg_grc_size, 3060 .can_link_change = &qed_can_link_change, 3061 .set_link = &qed_set_link, 3062 .get_link = &qed_get_current_link, 3063 .drain = &qed_drain, 3064 .update_msglvl = &qed_init_dp, 3065 .devlink_register = qed_devlink_register, 3066 .devlink_unregister = qed_devlink_unregister, 3067 .report_fatal_error = qed_report_fatal_error, 3068 .dbg_all_data = &qed_dbg_all_data, 3069 .dbg_all_data_size = &qed_dbg_all_data_size, 3070 .chain_alloc = &qed_chain_alloc, 3071 .chain_free = &qed_chain_free, 3072 .nvm_flash = &qed_nvm_flash, 3073 .nvm_get_image = &qed_nvm_get_image, 3074 .set_coalesce = &qed_set_coalesce, 3075 .set_led = &qed_set_led, 3076 .recovery_process = &qed_recovery_process, 3077 .recovery_prolog = &qed_recovery_prolog, 3078 .attn_clr_enable = &qed_int_attn_clr_enable, 3079 .update_drv_state = &qed_update_drv_state, 3080 .update_mac = &qed_update_mac, 3081 .update_mtu = &qed_update_mtu, 3082 .update_wol = &qed_update_wol, 3083 .db_recovery_add = &qed_db_recovery_add, 3084 .db_recovery_del = &qed_db_recovery_del, 3085 .read_module_eeprom = &qed_read_module_eeprom, 3086 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3087 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3088 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3089 .set_grc_config = &qed_set_grc_config, 3090 .mfw_report = &qed_mfw_report, 3091 .get_sb_info = &qed_get_sb_info, 3092 .get_esl_status = &qed_get_esl_status, 3093 }; 3094 3095 void qed_get_protocol_stats(struct qed_dev *cdev, 3096 enum qed_mcp_protocol_type type, 3097 union qed_mcp_protocol_stats *stats) 3098 { 3099 struct qed_eth_stats eth_stats; 3100 3101 memset(stats, 0, sizeof(*stats)); 3102 3103 switch (type) { 3104 case QED_MCP_LAN_STATS: 3105 qed_get_vport_stats(cdev, ð_stats); 3106 stats->lan_stats.ucast_rx_pkts = 3107 eth_stats.common.rx_ucast_pkts; 3108 stats->lan_stats.ucast_tx_pkts = 3109 eth_stats.common.tx_ucast_pkts; 3110 stats->lan_stats.fcs_err = -1; 3111 break; 3112 case QED_MCP_FCOE_STATS: 3113 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 3114 break; 3115 case QED_MCP_ISCSI_STATS: 3116 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 3117 break; 3118 default: 3119 DP_VERBOSE(cdev, QED_MSG_SP, 3120 "Invalid protocol type = %d\n", type); 3121 return; 3122 } 3123 } 3124 3125 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3126 { 3127 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3128 "Scheduling slowpath task [Flag: %d]\n", 3129 QED_SLOWPATH_MFW_TLV_REQ); 3130 /* Memory barrier for setting atomic bit */ 3131 smp_mb__before_atomic(); 3132 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3133 /* Memory barrier after setting atomic bit */ 3134 smp_mb__after_atomic(); 3135 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3136 3137 return 0; 3138 } 3139 3140 static void 3141 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3142 { 3143 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3144 struct qed_eth_stats_common *p_common; 3145 struct qed_generic_tlvs gen_tlvs; 3146 struct qed_eth_stats stats; 3147 int i; 3148 3149 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3150 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3151 3152 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3153 tlv->flags.ipv4_csum_offload = true; 3154 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3155 tlv->flags.lso_supported = true; 3156 tlv->flags.b_set = true; 3157 3158 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3159 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3160 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3161 tlv->mac_set[i] = true; 3162 } 3163 } 3164 3165 qed_get_vport_stats(cdev, &stats); 3166 p_common = &stats.common; 3167 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3168 p_common->rx_bcast_pkts; 3169 tlv->rx_frames_set = true; 3170 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3171 p_common->rx_bcast_bytes; 3172 tlv->rx_bytes_set = true; 3173 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3174 p_common->tx_bcast_pkts; 3175 tlv->tx_frames_set = true; 3176 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3177 p_common->tx_bcast_bytes; 3178 tlv->rx_bytes_set = true; 3179 } 3180 3181 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3182 union qed_mfw_tlv_data *tlv_buf) 3183 { 3184 struct qed_dev *cdev = hwfn->cdev; 3185 struct qed_common_cb_ops *ops; 3186 3187 ops = cdev->protocol_ops.common; 3188 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3189 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3190 return -EINVAL; 3191 } 3192 3193 switch (type) { 3194 case QED_MFW_TLV_GENERIC: 3195 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3196 break; 3197 case QED_MFW_TLV_ETH: 3198 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3199 break; 3200 case QED_MFW_TLV_FCOE: 3201 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3202 break; 3203 case QED_MFW_TLV_ISCSI: 3204 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3205 break; 3206 default: 3207 break; 3208 } 3209 3210 return 0; 3211 } 3212 3213 unsigned long qed_get_epoch_time(void) 3214 { 3215 return ktime_get_real_seconds(); 3216 } 3217