1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/phylink.h> 27 28 #include "qed.h" 29 #include "qed_sriov.h" 30 #include "qed_sp.h" 31 #include "qed_dev_api.h" 32 #include "qed_ll2.h" 33 #include "qed_fcoe.h" 34 #include "qed_iscsi.h" 35 36 #include "qed_mcp.h" 37 #include "qed_reg_addr.h" 38 #include "qed_hw.h" 39 #include "qed_selftest.h" 40 #include "qed_debug.h" 41 #include "qed_devlink.h" 42 43 #define QED_ROCE_QPS (8192) 44 #define QED_ROCE_DPIS (8) 45 #define QED_RDMA_SRQS QED_ROCE_QPS 46 #define QED_NVM_CFG_GET_FLAGS 0xA 47 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 48 #define QED_NVM_CFG_MAX_ATTRS 50 49 50 static char version[] = 51 "QLogic FastLinQ 4xxxx Core Module qed\n"; 52 53 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 54 MODULE_LICENSE("GPL"); 55 56 #define FW_FILE_VERSION \ 57 __stringify(FW_MAJOR_VERSION) "." \ 58 __stringify(FW_MINOR_VERSION) "." \ 59 __stringify(FW_REVISION_VERSION) "." \ 60 __stringify(FW_ENGINEERING_VERSION) 61 62 #define QED_FW_FILE_NAME \ 63 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 64 65 MODULE_FIRMWARE(QED_FW_FILE_NAME); 66 67 /* MFW speed capabilities maps */ 68 69 struct qed_mfw_speed_map { 70 u32 mfw_val; 71 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 72 73 const u32 *cap_arr; 74 u32 arr_size; 75 }; 76 77 #define QED_MFW_SPEED_MAP(type, arr) \ 78 { \ 79 .mfw_val = (type), \ 80 .cap_arr = (arr), \ 81 .arr_size = ARRAY_SIZE(arr), \ 82 } 83 84 static const u32 qed_mfw_ext_1g[] __initconst = { 85 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 86 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 87 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 88 }; 89 90 static const u32 qed_mfw_ext_10g[] __initconst = { 91 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 92 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 93 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 95 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 96 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 97 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 99 }; 100 101 static const u32 qed_mfw_ext_25g[] __initconst = { 102 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 103 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 104 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 105 }; 106 107 static const u32 qed_mfw_ext_40g[] __initconst = { 108 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 109 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 110 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 111 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 112 }; 113 114 static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 115 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 116 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 117 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 118 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 119 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 120 }; 121 122 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 123 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 124 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 126 }; 127 128 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 129 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 130 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 131 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 132 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 133 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 134 }; 135 136 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 137 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 138 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 140 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 141 }; 142 143 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 144 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 145 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 146 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 147 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 148 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 149 qed_mfw_ext_50g_base_r), 150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 151 qed_mfw_ext_50g_base_r2), 152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 153 qed_mfw_ext_100g_base_r2), 154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 155 qed_mfw_ext_100g_base_r4), 156 }; 157 158 static const u32 qed_mfw_legacy_1g[] __initconst = { 159 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 160 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 161 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 162 }; 163 164 static const u32 qed_mfw_legacy_10g[] __initconst = { 165 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 166 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 167 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 168 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 169 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 170 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 171 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 172 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 173 }; 174 175 static const u32 qed_mfw_legacy_20g[] __initconst = { 176 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 177 }; 178 179 static const u32 qed_mfw_legacy_25g[] __initconst = { 180 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 181 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 182 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 183 }; 184 185 static const u32 qed_mfw_legacy_40g[] __initconst = { 186 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 187 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 188 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 189 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 190 }; 191 192 static const u32 qed_mfw_legacy_50g[] __initconst = { 193 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 194 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 195 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 196 }; 197 198 static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 199 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 200 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 201 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 202 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 203 }; 204 205 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 206 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 207 qed_mfw_legacy_1g), 208 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 209 qed_mfw_legacy_10g), 210 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 211 qed_mfw_legacy_20g), 212 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 213 qed_mfw_legacy_25g), 214 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 215 qed_mfw_legacy_40g), 216 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 217 qed_mfw_legacy_50g), 218 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 219 qed_mfw_legacy_bb_100g), 220 }; 221 222 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 223 { 224 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 225 226 map->cap_arr = NULL; 227 map->arr_size = 0; 228 } 229 230 static void __init qed_mfw_speed_maps_init(void) 231 { 232 u32 i; 233 234 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 235 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 236 237 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 238 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 239 } 240 241 static int __init qed_init(void) 242 { 243 pr_info("%s", version); 244 245 qed_mfw_speed_maps_init(); 246 247 return 0; 248 } 249 module_init(qed_init); 250 251 static void __exit qed_exit(void) 252 { 253 /* To prevent marking this module as "permanent" */ 254 } 255 module_exit(qed_exit); 256 257 static void qed_free_pci(struct qed_dev *cdev) 258 { 259 struct pci_dev *pdev = cdev->pdev; 260 261 if (cdev->doorbells && cdev->db_size) 262 iounmap(cdev->doorbells); 263 if (cdev->regview) 264 iounmap(cdev->regview); 265 if (atomic_read(&pdev->enable_cnt) == 1) 266 pci_release_regions(pdev); 267 268 pci_disable_device(pdev); 269 } 270 271 #define PCI_REVISION_ID_ERROR_VAL 0xff 272 273 /* Performs PCI initializations as well as initializing PCI-related parameters 274 * in the device structrue. Returns 0 in case of success. 275 */ 276 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 277 { 278 u8 rev_id; 279 int rc; 280 281 cdev->pdev = pdev; 282 283 rc = pci_enable_device(pdev); 284 if (rc) { 285 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 286 goto err0; 287 } 288 289 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 290 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 291 rc = -EIO; 292 goto err1; 293 } 294 295 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 296 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 297 rc = -EIO; 298 goto err1; 299 } 300 301 if (atomic_read(&pdev->enable_cnt) == 1) { 302 rc = pci_request_regions(pdev, "qed"); 303 if (rc) { 304 DP_NOTICE(cdev, 305 "Failed to request PCI memory resources\n"); 306 goto err1; 307 } 308 pci_set_master(pdev); 309 pci_save_state(pdev); 310 } 311 312 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 313 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 314 DP_NOTICE(cdev, 315 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 316 rev_id); 317 rc = -ENODEV; 318 goto err2; 319 } 320 if (!pci_is_pcie(pdev)) { 321 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 322 rc = -EIO; 323 goto err2; 324 } 325 326 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 327 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 328 DP_NOTICE(cdev, "Cannot find power management capability\n"); 329 330 rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); 331 if (rc) { 332 DP_NOTICE(cdev, "Can't request DMA addresses\n"); 333 rc = -EIO; 334 goto err2; 335 } 336 337 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 338 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 339 cdev->pci_params.irq = pdev->irq; 340 341 cdev->regview = pci_ioremap_bar(pdev, 0); 342 if (!cdev->regview) { 343 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 344 rc = -ENOMEM; 345 goto err2; 346 } 347 348 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 349 cdev->db_size = pci_resource_len(cdev->pdev, 2); 350 if (!cdev->db_size) { 351 if (IS_PF(cdev)) { 352 DP_NOTICE(cdev, "No Doorbell bar available\n"); 353 return -EINVAL; 354 } else { 355 return 0; 356 } 357 } 358 359 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 360 361 if (!cdev->doorbells) { 362 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 363 return -ENOMEM; 364 } 365 366 return 0; 367 368 err2: 369 pci_release_regions(pdev); 370 err1: 371 pci_disable_device(pdev); 372 err0: 373 return rc; 374 } 375 376 int qed_fill_dev_info(struct qed_dev *cdev, 377 struct qed_dev_info *dev_info) 378 { 379 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 380 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 381 struct qed_tunnel_info *tun = &cdev->tunnel; 382 struct qed_ptt *ptt; 383 384 memset(dev_info, 0, sizeof(struct qed_dev_info)); 385 386 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 387 tun->vxlan.b_mode_enabled) 388 dev_info->vxlan_enable = true; 389 390 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 391 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 392 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 393 dev_info->gre_enable = true; 394 395 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 396 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 397 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 398 dev_info->geneve_enable = true; 399 400 dev_info->num_hwfns = cdev->num_hwfns; 401 dev_info->pci_mem_start = cdev->pci_params.mem_start; 402 dev_info->pci_mem_end = cdev->pci_params.mem_end; 403 dev_info->pci_irq = cdev->pci_params.irq; 404 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 405 dev_info->dev_type = cdev->type; 406 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 407 408 if (IS_PF(cdev)) { 409 dev_info->fw_major = FW_MAJOR_VERSION; 410 dev_info->fw_minor = FW_MINOR_VERSION; 411 dev_info->fw_rev = FW_REVISION_VERSION; 412 dev_info->fw_eng = FW_ENGINEERING_VERSION; 413 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 414 &cdev->mf_bits); 415 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) 416 dev_info->b_arfs_capable = true; 417 dev_info->tx_switching = true; 418 419 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 420 dev_info->wol_support = true; 421 422 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 423 dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); 424 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 425 } else { 426 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 427 &dev_info->fw_minor, &dev_info->fw_rev, 428 &dev_info->fw_eng); 429 } 430 431 if (IS_PF(cdev)) { 432 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 433 if (ptt) { 434 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 435 &dev_info->mfw_rev, NULL); 436 437 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 438 &dev_info->mbi_version); 439 440 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 441 &dev_info->flash_size); 442 443 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 444 } 445 } else { 446 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 447 &dev_info->mfw_rev, NULL); 448 } 449 450 dev_info->mtu = hw_info->mtu; 451 cdev->common_dev_info = *dev_info; 452 453 return 0; 454 } 455 456 static void qed_free_cdev(struct qed_dev *cdev) 457 { 458 kfree((void *)cdev); 459 } 460 461 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 462 { 463 struct qed_dev *cdev; 464 465 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 466 if (!cdev) 467 return cdev; 468 469 qed_init_struct(cdev); 470 471 return cdev; 472 } 473 474 /* Sets the requested power state */ 475 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 476 { 477 if (!cdev) 478 return -ENODEV; 479 480 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 481 return 0; 482 } 483 484 /* probing */ 485 static struct qed_dev *qed_probe(struct pci_dev *pdev, 486 struct qed_probe_params *params) 487 { 488 struct qed_dev *cdev; 489 int rc; 490 491 cdev = qed_alloc_cdev(pdev); 492 if (!cdev) 493 goto err0; 494 495 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 496 cdev->protocol = params->protocol; 497 498 if (params->is_vf) 499 cdev->b_is_vf = true; 500 501 qed_init_dp(cdev, params->dp_module, params->dp_level); 502 503 cdev->recov_in_prog = params->recov_in_prog; 504 505 rc = qed_init_pci(cdev, pdev); 506 if (rc) { 507 DP_ERR(cdev, "init pci failed\n"); 508 goto err1; 509 } 510 DP_INFO(cdev, "PCI init completed successfully\n"); 511 512 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 513 if (rc) { 514 DP_ERR(cdev, "hw prepare failed\n"); 515 goto err2; 516 } 517 518 DP_INFO(cdev, "%s completed successfully\n", __func__); 519 520 return cdev; 521 522 err2: 523 qed_free_pci(cdev); 524 err1: 525 qed_free_cdev(cdev); 526 err0: 527 return NULL; 528 } 529 530 static void qed_remove(struct qed_dev *cdev) 531 { 532 if (!cdev) 533 return; 534 535 qed_hw_remove(cdev); 536 537 qed_free_pci(cdev); 538 539 qed_set_power_state(cdev, PCI_D3hot); 540 541 qed_free_cdev(cdev); 542 } 543 544 static void qed_disable_msix(struct qed_dev *cdev) 545 { 546 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 547 pci_disable_msix(cdev->pdev); 548 kfree(cdev->int_params.msix_table); 549 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 550 pci_disable_msi(cdev->pdev); 551 } 552 553 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 554 } 555 556 static int qed_enable_msix(struct qed_dev *cdev, 557 struct qed_int_params *int_params) 558 { 559 int i, rc, cnt; 560 561 cnt = int_params->in.num_vectors; 562 563 for (i = 0; i < cnt; i++) 564 int_params->msix_table[i].entry = i; 565 566 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 567 int_params->in.min_msix_cnt, cnt); 568 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 569 (rc % cdev->num_hwfns)) { 570 pci_disable_msix(cdev->pdev); 571 572 /* If fastpath is initialized, we need at least one interrupt 573 * per hwfn [and the slow path interrupts]. New requested number 574 * should be a multiple of the number of hwfns. 575 */ 576 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 577 DP_NOTICE(cdev, 578 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 579 cnt, int_params->in.num_vectors); 580 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 581 cnt); 582 if (!rc) 583 rc = cnt; 584 } 585 586 /* For VFs, we should return with an error in case we didn't get the 587 * exact number of msix vectors as we requested. 588 * Not doing that will lead to a crash when starting queues for 589 * this VF. 590 */ 591 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { 592 /* MSI-x configuration was achieved */ 593 int_params->out.int_mode = QED_INT_MODE_MSIX; 594 int_params->out.num_vectors = rc; 595 rc = 0; 596 } else { 597 DP_NOTICE(cdev, 598 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 599 cnt, rc); 600 } 601 602 return rc; 603 } 604 605 /* This function outputs the int mode and the number of enabled msix vector */ 606 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 607 { 608 struct qed_int_params *int_params = &cdev->int_params; 609 struct msix_entry *tbl; 610 int rc = 0, cnt; 611 612 switch (int_params->in.int_mode) { 613 case QED_INT_MODE_MSIX: 614 /* Allocate MSIX table */ 615 cnt = int_params->in.num_vectors; 616 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 617 if (!int_params->msix_table) { 618 rc = -ENOMEM; 619 goto out; 620 } 621 622 /* Enable MSIX */ 623 rc = qed_enable_msix(cdev, int_params); 624 if (!rc) 625 goto out; 626 627 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 628 kfree(int_params->msix_table); 629 if (force_mode) 630 goto out; 631 fallthrough; 632 633 case QED_INT_MODE_MSI: 634 if (cdev->num_hwfns == 1) { 635 rc = pci_enable_msi(cdev->pdev); 636 if (!rc) { 637 int_params->out.int_mode = QED_INT_MODE_MSI; 638 goto out; 639 } 640 641 DP_NOTICE(cdev, "Failed to enable MSI\n"); 642 if (force_mode) 643 goto out; 644 } 645 fallthrough; 646 647 case QED_INT_MODE_INTA: 648 int_params->out.int_mode = QED_INT_MODE_INTA; 649 rc = 0; 650 goto out; 651 default: 652 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 653 int_params->in.int_mode); 654 rc = -EINVAL; 655 } 656 657 out: 658 if (!rc) 659 DP_INFO(cdev, "Using %s interrupts\n", 660 int_params->out.int_mode == QED_INT_MODE_INTA ? 661 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 662 "MSI" : "MSIX"); 663 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 664 665 return rc; 666 } 667 668 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 669 int index, void(*handler)(void *)) 670 { 671 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 672 int relative_idx = index / cdev->num_hwfns; 673 674 hwfn->simd_proto_handler[relative_idx].func = handler; 675 hwfn->simd_proto_handler[relative_idx].token = token; 676 } 677 678 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 679 { 680 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 681 int relative_idx = index / cdev->num_hwfns; 682 683 memset(&hwfn->simd_proto_handler[relative_idx], 0, 684 sizeof(struct qed_simd_fp_handler)); 685 } 686 687 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 688 { 689 tasklet_schedule((struct tasklet_struct *)tasklet); 690 return IRQ_HANDLED; 691 } 692 693 static irqreturn_t qed_single_int(int irq, void *dev_instance) 694 { 695 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 696 struct qed_hwfn *hwfn; 697 irqreturn_t rc = IRQ_NONE; 698 u64 status; 699 int i, j; 700 701 for (i = 0; i < cdev->num_hwfns; i++) { 702 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 703 704 if (!status) 705 continue; 706 707 hwfn = &cdev->hwfns[i]; 708 709 /* Slowpath interrupt */ 710 if (unlikely(status & 0x1)) { 711 tasklet_schedule(&hwfn->sp_dpc); 712 status &= ~0x1; 713 rc = IRQ_HANDLED; 714 } 715 716 /* Fastpath interrupts */ 717 for (j = 0; j < 64; j++) { 718 if ((0x2ULL << j) & status) { 719 struct qed_simd_fp_handler *p_handler = 720 &hwfn->simd_proto_handler[j]; 721 722 if (p_handler->func) 723 p_handler->func(p_handler->token); 724 else 725 DP_NOTICE(hwfn, 726 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 727 j, status); 728 729 status &= ~(0x2ULL << j); 730 rc = IRQ_HANDLED; 731 } 732 } 733 734 if (unlikely(status)) 735 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 736 "got an unknown interrupt status 0x%llx\n", 737 status); 738 } 739 740 return rc; 741 } 742 743 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 744 { 745 struct qed_dev *cdev = hwfn->cdev; 746 u32 int_mode; 747 int rc = 0; 748 u8 id; 749 750 int_mode = cdev->int_params.out.int_mode; 751 if (int_mode == QED_INT_MODE_MSIX) { 752 id = hwfn->my_id; 753 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 754 id, cdev->pdev->bus->number, 755 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 756 rc = request_irq(cdev->int_params.msix_table[id].vector, 757 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); 758 } else { 759 unsigned long flags = 0; 760 761 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 762 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 763 PCI_FUNC(cdev->pdev->devfn)); 764 765 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 766 flags |= IRQF_SHARED; 767 768 rc = request_irq(cdev->pdev->irq, qed_single_int, 769 flags, cdev->name, cdev); 770 } 771 772 if (rc) 773 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 774 else 775 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 776 "Requested slowpath %s\n", 777 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 778 779 return rc; 780 } 781 782 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 783 { 784 /* Calling the disable function will make sure that any 785 * currently-running function is completed. The following call to the 786 * enable function makes this sequence a flush-like operation. 787 */ 788 if (p_hwfn->b_sp_dpc_enabled) { 789 tasklet_disable(&p_hwfn->sp_dpc); 790 tasklet_enable(&p_hwfn->sp_dpc); 791 } 792 } 793 794 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 795 { 796 struct qed_dev *cdev = p_hwfn->cdev; 797 u8 id = p_hwfn->my_id; 798 u32 int_mode; 799 800 int_mode = cdev->int_params.out.int_mode; 801 if (int_mode == QED_INT_MODE_MSIX) 802 synchronize_irq(cdev->int_params.msix_table[id].vector); 803 else 804 synchronize_irq(cdev->pdev->irq); 805 806 qed_slowpath_tasklet_flush(p_hwfn); 807 } 808 809 static void qed_slowpath_irq_free(struct qed_dev *cdev) 810 { 811 int i; 812 813 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 814 for_each_hwfn(cdev, i) { 815 if (!cdev->hwfns[i].b_int_requested) 816 break; 817 free_irq(cdev->int_params.msix_table[i].vector, 818 &cdev->hwfns[i].sp_dpc); 819 } 820 } else { 821 if (QED_LEADING_HWFN(cdev)->b_int_requested) 822 free_irq(cdev->pdev->irq, cdev); 823 } 824 qed_int_disable_post_isr_release(cdev); 825 } 826 827 static int qed_nic_stop(struct qed_dev *cdev) 828 { 829 int i, rc; 830 831 rc = qed_hw_stop(cdev); 832 833 for (i = 0; i < cdev->num_hwfns; i++) { 834 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 835 836 if (p_hwfn->b_sp_dpc_enabled) { 837 tasklet_disable(&p_hwfn->sp_dpc); 838 p_hwfn->b_sp_dpc_enabled = false; 839 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 840 "Disabled sp tasklet [hwfn %d] at %p\n", 841 i, &p_hwfn->sp_dpc); 842 } 843 } 844 845 qed_dbg_pf_exit(cdev); 846 847 return rc; 848 } 849 850 static int qed_nic_setup(struct qed_dev *cdev) 851 { 852 int rc, i; 853 854 /* Determine if interface is going to require LL2 */ 855 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 856 for (i = 0; i < cdev->num_hwfns; i++) { 857 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 858 859 p_hwfn->using_ll2 = true; 860 } 861 } 862 863 rc = qed_resc_alloc(cdev); 864 if (rc) 865 return rc; 866 867 DP_INFO(cdev, "Allocated qed resources\n"); 868 869 qed_resc_setup(cdev); 870 871 return rc; 872 } 873 874 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 875 { 876 int limit = 0; 877 878 /* Mark the fastpath as free/used */ 879 cdev->int_params.fp_initialized = cnt ? true : false; 880 881 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 882 limit = cdev->num_hwfns * 63; 883 else if (cdev->int_params.fp_msix_cnt) 884 limit = cdev->int_params.fp_msix_cnt; 885 886 if (!limit) 887 return -ENOMEM; 888 889 return min_t(int, cnt, limit); 890 } 891 892 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 893 { 894 memset(info, 0, sizeof(struct qed_int_info)); 895 896 if (!cdev->int_params.fp_initialized) { 897 DP_INFO(cdev, 898 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 899 return -EINVAL; 900 } 901 902 /* Need to expose only MSI-X information; Single IRQ is handled solely 903 * by qed. 904 */ 905 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 906 int msix_base = cdev->int_params.fp_msix_base; 907 908 info->msix_cnt = cdev->int_params.fp_msix_cnt; 909 info->msix = &cdev->int_params.msix_table[msix_base]; 910 } 911 912 return 0; 913 } 914 915 static int qed_slowpath_setup_int(struct qed_dev *cdev, 916 enum qed_int_mode int_mode) 917 { 918 struct qed_sb_cnt_info sb_cnt_info; 919 int num_l2_queues = 0; 920 int rc; 921 int i; 922 923 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 924 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 925 return -EINVAL; 926 } 927 928 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 929 cdev->int_params.in.int_mode = int_mode; 930 for_each_hwfn(cdev, i) { 931 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 932 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 933 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 934 cdev->int_params.in.num_vectors++; /* slowpath */ 935 } 936 937 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 938 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 939 940 if (is_kdump_kernel()) { 941 DP_INFO(cdev, 942 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 943 cdev->int_params.in.min_msix_cnt); 944 cdev->int_params.in.num_vectors = 945 cdev->int_params.in.min_msix_cnt; 946 } 947 948 rc = qed_set_int_mode(cdev, false); 949 if (rc) { 950 DP_ERR(cdev, "%s ERR\n", __func__); 951 return rc; 952 } 953 954 cdev->int_params.fp_msix_base = cdev->num_hwfns; 955 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 956 cdev->num_hwfns; 957 958 if (!IS_ENABLED(CONFIG_QED_RDMA) || 959 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 960 return 0; 961 962 for_each_hwfn(cdev, i) 963 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 964 965 DP_VERBOSE(cdev, QED_MSG_RDMA, 966 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 967 cdev->int_params.fp_msix_cnt, num_l2_queues); 968 969 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 970 cdev->int_params.rdma_msix_cnt = 971 (cdev->int_params.fp_msix_cnt - num_l2_queues) 972 / cdev->num_hwfns; 973 cdev->int_params.rdma_msix_base = 974 cdev->int_params.fp_msix_base + num_l2_queues; 975 cdev->int_params.fp_msix_cnt = num_l2_queues; 976 } else { 977 cdev->int_params.rdma_msix_cnt = 0; 978 } 979 980 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 981 cdev->int_params.rdma_msix_cnt, 982 cdev->int_params.rdma_msix_base); 983 984 return 0; 985 } 986 987 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 988 { 989 int rc; 990 991 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 992 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 993 994 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 995 &cdev->int_params.in.num_vectors); 996 if (cdev->num_hwfns > 1) { 997 u8 vectors = 0; 998 999 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 1000 cdev->int_params.in.num_vectors += vectors; 1001 } 1002 1003 /* We want a minimum of one fastpath vector per vf hwfn */ 1004 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1005 1006 rc = qed_set_int_mode(cdev, true); 1007 if (rc) 1008 return rc; 1009 1010 cdev->int_params.fp_msix_base = 0; 1011 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1012 1013 return 0; 1014 } 1015 1016 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1017 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1018 { 1019 int rc; 1020 1021 p_hwfn->stream->next_in = input_buf; 1022 p_hwfn->stream->avail_in = input_len; 1023 p_hwfn->stream->next_out = unzip_buf; 1024 p_hwfn->stream->avail_out = max_size; 1025 1026 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1027 1028 if (rc != Z_OK) { 1029 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1030 rc); 1031 return 0; 1032 } 1033 1034 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1035 zlib_inflateEnd(p_hwfn->stream); 1036 1037 if (rc != Z_OK && rc != Z_STREAM_END) { 1038 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1039 p_hwfn->stream->msg, rc); 1040 return 0; 1041 } 1042 1043 return p_hwfn->stream->total_out / 4; 1044 } 1045 1046 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1047 { 1048 int i; 1049 void *workspace; 1050 1051 for_each_hwfn(cdev, i) { 1052 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1053 1054 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1055 if (!p_hwfn->stream) 1056 return -ENOMEM; 1057 1058 workspace = vzalloc(zlib_inflate_workspacesize()); 1059 if (!workspace) 1060 return -ENOMEM; 1061 p_hwfn->stream->workspace = workspace; 1062 } 1063 1064 return 0; 1065 } 1066 1067 static void qed_free_stream_mem(struct qed_dev *cdev) 1068 { 1069 int i; 1070 1071 for_each_hwfn(cdev, i) { 1072 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1073 1074 if (!p_hwfn->stream) 1075 return; 1076 1077 vfree(p_hwfn->stream->workspace); 1078 kfree(p_hwfn->stream); 1079 } 1080 } 1081 1082 static void qed_update_pf_params(struct qed_dev *cdev, 1083 struct qed_pf_params *params) 1084 { 1085 int i; 1086 1087 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1088 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1089 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1090 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1091 /* divide by 3 the MRs to avoid MF ILT overflow */ 1092 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1093 } 1094 1095 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1096 params->eth_pf_params.num_arfs_filters = 0; 1097 1098 /* In case we might support RDMA, don't allow qede to be greedy 1099 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1100 * per hwfn. 1101 */ 1102 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1103 u16 *num_cons; 1104 1105 num_cons = ¶ms->eth_pf_params.num_cons; 1106 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1107 } 1108 1109 for (i = 0; i < cdev->num_hwfns; i++) { 1110 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1111 1112 p_hwfn->pf_params = *params; 1113 } 1114 } 1115 1116 #define QED_PERIODIC_DB_REC_COUNT 10 1117 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1118 #define QED_PERIODIC_DB_REC_INTERVAL \ 1119 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1120 1121 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1122 enum qed_slowpath_wq_flag wq_flag, 1123 unsigned long delay) 1124 { 1125 if (!hwfn->slowpath_wq_active) 1126 return -EINVAL; 1127 1128 /* Memory barrier for setting atomic bit */ 1129 smp_mb__before_atomic(); 1130 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1131 /* Memory barrier after setting atomic bit */ 1132 smp_mb__after_atomic(); 1133 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1134 1135 return 0; 1136 } 1137 1138 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1139 { 1140 /* Reset periodic Doorbell Recovery counter */ 1141 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1142 1143 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1144 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1145 &p_hwfn->slowpath_task_flags)) 1146 return; 1147 1148 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1149 QED_PERIODIC_DB_REC_INTERVAL); 1150 } 1151 1152 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1153 { 1154 int i; 1155 1156 if (IS_VF(cdev)) 1157 return; 1158 1159 for_each_hwfn(cdev, i) { 1160 if (!cdev->hwfns[i].slowpath_wq) 1161 continue; 1162 1163 /* Stop queuing new delayed works */ 1164 cdev->hwfns[i].slowpath_wq_active = false; 1165 1166 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1167 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1168 } 1169 } 1170 1171 static void qed_slowpath_task(struct work_struct *work) 1172 { 1173 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1174 slowpath_task.work); 1175 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1176 1177 if (!ptt) { 1178 if (hwfn->slowpath_wq_active) 1179 queue_delayed_work(hwfn->slowpath_wq, 1180 &hwfn->slowpath_task, 0); 1181 1182 return; 1183 } 1184 1185 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1186 &hwfn->slowpath_task_flags)) 1187 qed_mfw_process_tlv_req(hwfn, ptt); 1188 1189 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1190 &hwfn->slowpath_task_flags)) { 1191 /* skip qed_db_rec_handler during recovery/unload */ 1192 if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) 1193 goto out; 1194 1195 qed_db_rec_handler(hwfn, ptt); 1196 if (hwfn->periodic_db_rec_count--) 1197 qed_slowpath_delayed_work(hwfn, 1198 QED_SLOWPATH_PERIODIC_DB_REC, 1199 QED_PERIODIC_DB_REC_INTERVAL); 1200 } 1201 1202 out: 1203 qed_ptt_release(hwfn, ptt); 1204 } 1205 1206 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1207 { 1208 struct qed_hwfn *hwfn; 1209 int i; 1210 1211 if (IS_VF(cdev)) 1212 return 0; 1213 1214 for_each_hwfn(cdev, i) { 1215 hwfn = &cdev->hwfns[i]; 1216 1217 hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x", 1218 0, 0, cdev->pdev->bus->number, 1219 PCI_SLOT(cdev->pdev->devfn), 1220 hwfn->abs_pf_id); 1221 1222 if (!hwfn->slowpath_wq) { 1223 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1224 return -ENOMEM; 1225 } 1226 1227 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1228 hwfn->slowpath_wq_active = true; 1229 } 1230 1231 return 0; 1232 } 1233 1234 static int qed_slowpath_start(struct qed_dev *cdev, 1235 struct qed_slowpath_params *params) 1236 { 1237 struct qed_drv_load_params drv_load_params; 1238 struct qed_hw_init_params hw_init_params; 1239 struct qed_mcp_drv_version drv_version; 1240 struct qed_tunnel_info tunn_info; 1241 const u8 *data = NULL; 1242 struct qed_hwfn *hwfn; 1243 struct qed_ptt *p_ptt; 1244 int rc = -EINVAL; 1245 1246 if (qed_iov_wq_start(cdev)) 1247 goto err; 1248 1249 if (qed_slowpath_wq_start(cdev)) 1250 goto err; 1251 1252 if (IS_PF(cdev)) { 1253 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1254 &cdev->pdev->dev); 1255 if (rc) { 1256 DP_NOTICE(cdev, 1257 "Failed to find fw file - /lib/firmware/%s\n", 1258 QED_FW_FILE_NAME); 1259 goto err; 1260 } 1261 1262 if (cdev->num_hwfns == 1) { 1263 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1264 if (p_ptt) { 1265 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1266 } else { 1267 DP_NOTICE(cdev, 1268 "Failed to acquire PTT for aRFS\n"); 1269 rc = -EINVAL; 1270 goto err; 1271 } 1272 } 1273 } 1274 1275 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1276 rc = qed_nic_setup(cdev); 1277 if (rc) 1278 goto err; 1279 1280 if (IS_PF(cdev)) 1281 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1282 else 1283 rc = qed_slowpath_vf_setup_int(cdev); 1284 if (rc) 1285 goto err1; 1286 1287 if (IS_PF(cdev)) { 1288 /* Allocate stream for unzipping */ 1289 rc = qed_alloc_stream_mem(cdev); 1290 if (rc) 1291 goto err2; 1292 1293 /* First Dword used to differentiate between various sources */ 1294 data = cdev->firmware->data + sizeof(u32); 1295 1296 qed_dbg_pf_init(cdev); 1297 } 1298 1299 /* Start the slowpath */ 1300 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1301 memset(&tunn_info, 0, sizeof(tunn_info)); 1302 tunn_info.vxlan.b_mode_enabled = true; 1303 tunn_info.l2_gre.b_mode_enabled = true; 1304 tunn_info.ip_gre.b_mode_enabled = true; 1305 tunn_info.l2_geneve.b_mode_enabled = true; 1306 tunn_info.ip_geneve.b_mode_enabled = true; 1307 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1308 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1309 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1310 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1311 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1312 hw_init_params.p_tunn = &tunn_info; 1313 hw_init_params.b_hw_start = true; 1314 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1315 hw_init_params.allow_npar_tx_switch = true; 1316 hw_init_params.bin_fw_data = data; 1317 1318 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1319 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1320 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1321 drv_load_params.avoid_eng_reset = false; 1322 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1323 hw_init_params.p_drv_load_params = &drv_load_params; 1324 1325 rc = qed_hw_init(cdev, &hw_init_params); 1326 if (rc) 1327 goto err2; 1328 1329 DP_INFO(cdev, 1330 "HW initialization and function start completed successfully\n"); 1331 1332 if (IS_PF(cdev)) { 1333 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1334 BIT(QED_MODE_L2GENEVE_TUNN) | 1335 BIT(QED_MODE_IPGENEVE_TUNN) | 1336 BIT(QED_MODE_L2GRE_TUNN) | 1337 BIT(QED_MODE_IPGRE_TUNN)); 1338 } 1339 1340 /* Allocate LL2 interface if needed */ 1341 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1342 rc = qed_ll2_alloc_if(cdev); 1343 if (rc) 1344 goto err3; 1345 } 1346 if (IS_PF(cdev)) { 1347 hwfn = QED_LEADING_HWFN(cdev); 1348 drv_version.version = (params->drv_major << 24) | 1349 (params->drv_minor << 16) | 1350 (params->drv_rev << 8) | 1351 (params->drv_eng); 1352 strscpy(drv_version.name, params->name, 1353 MCP_DRV_VER_STR_SIZE - 4); 1354 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1355 &drv_version); 1356 if (rc) { 1357 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1358 goto err4; 1359 } 1360 } 1361 1362 qed_reset_vport_stats(cdev); 1363 1364 return 0; 1365 1366 err4: 1367 qed_ll2_dealloc_if(cdev); 1368 err3: 1369 qed_hw_stop(cdev); 1370 err2: 1371 qed_hw_timers_stop_all(cdev); 1372 if (IS_PF(cdev)) 1373 qed_slowpath_irq_free(cdev); 1374 qed_free_stream_mem(cdev); 1375 qed_disable_msix(cdev); 1376 err1: 1377 qed_resc_free(cdev); 1378 err: 1379 if (IS_PF(cdev)) 1380 release_firmware(cdev->firmware); 1381 1382 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1383 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1384 qed_ptt_release(QED_LEADING_HWFN(cdev), 1385 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1386 1387 qed_iov_wq_stop(cdev, false); 1388 1389 qed_slowpath_wq_stop(cdev); 1390 1391 return rc; 1392 } 1393 1394 static int qed_slowpath_stop(struct qed_dev *cdev) 1395 { 1396 if (!cdev) 1397 return -ENODEV; 1398 1399 qed_slowpath_wq_stop(cdev); 1400 1401 qed_ll2_dealloc_if(cdev); 1402 1403 if (IS_PF(cdev)) { 1404 if (cdev->num_hwfns == 1) 1405 qed_ptt_release(QED_LEADING_HWFN(cdev), 1406 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1407 qed_free_stream_mem(cdev); 1408 if (IS_QED_ETH_IF(cdev)) 1409 qed_sriov_disable(cdev, true); 1410 } 1411 1412 qed_nic_stop(cdev); 1413 1414 if (IS_PF(cdev)) 1415 qed_slowpath_irq_free(cdev); 1416 1417 qed_disable_msix(cdev); 1418 1419 qed_resc_free(cdev); 1420 1421 qed_iov_wq_stop(cdev, true); 1422 1423 if (IS_PF(cdev)) 1424 release_firmware(cdev->firmware); 1425 1426 return 0; 1427 } 1428 1429 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1430 { 1431 int i; 1432 1433 memcpy(cdev->name, name, NAME_SIZE); 1434 for_each_hwfn(cdev, i) 1435 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1436 } 1437 1438 static u32 qed_sb_init(struct qed_dev *cdev, 1439 struct qed_sb_info *sb_info, 1440 void *sb_virt_addr, 1441 dma_addr_t sb_phy_addr, u16 sb_id, 1442 enum qed_sb_type type) 1443 { 1444 struct qed_hwfn *p_hwfn; 1445 struct qed_ptt *p_ptt; 1446 u16 rel_sb_id; 1447 u32 rc; 1448 1449 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1450 if (type == QED_SB_TYPE_L2_QUEUE) { 1451 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1452 rel_sb_id = sb_id / cdev->num_hwfns; 1453 } else { 1454 p_hwfn = QED_AFFIN_HWFN(cdev); 1455 rel_sb_id = sb_id; 1456 } 1457 1458 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1459 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1460 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1461 1462 if (IS_PF(p_hwfn->cdev)) { 1463 p_ptt = qed_ptt_acquire(p_hwfn); 1464 if (!p_ptt) 1465 return -EBUSY; 1466 1467 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1468 sb_phy_addr, rel_sb_id); 1469 qed_ptt_release(p_hwfn, p_ptt); 1470 } else { 1471 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1472 sb_phy_addr, rel_sb_id); 1473 } 1474 1475 return rc; 1476 } 1477 1478 static u32 qed_sb_release(struct qed_dev *cdev, 1479 struct qed_sb_info *sb_info, 1480 u16 sb_id, 1481 enum qed_sb_type type) 1482 { 1483 struct qed_hwfn *p_hwfn; 1484 u16 rel_sb_id; 1485 u32 rc; 1486 1487 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1488 if (type == QED_SB_TYPE_L2_QUEUE) { 1489 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1490 rel_sb_id = sb_id / cdev->num_hwfns; 1491 } else { 1492 p_hwfn = QED_AFFIN_HWFN(cdev); 1493 rel_sb_id = sb_id; 1494 } 1495 1496 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1497 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1498 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1499 1500 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1501 1502 return rc; 1503 } 1504 1505 static bool qed_can_link_change(struct qed_dev *cdev) 1506 { 1507 return true; 1508 } 1509 1510 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1511 const struct qed_link_params *params) 1512 { 1513 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1514 const struct qed_mfw_speed_map *map; 1515 u32 i; 1516 1517 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1518 ext_speed->autoneg = !!params->autoneg; 1519 1520 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1521 ext_speed->advertised_speeds = 0; 1522 1523 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1524 map = qed_mfw_ext_maps + i; 1525 1526 if (linkmode_intersects(params->adv_speeds, map->caps)) 1527 ext_speed->advertised_speeds |= map->mfw_val; 1528 } 1529 } 1530 1531 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1532 switch (params->forced_speed) { 1533 case SPEED_1000: 1534 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1535 break; 1536 case SPEED_10000: 1537 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1538 break; 1539 case SPEED_20000: 1540 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1541 break; 1542 case SPEED_25000: 1543 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1544 break; 1545 case SPEED_40000: 1546 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1547 break; 1548 case SPEED_50000: 1549 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1550 QED_EXT_SPEED_50G_R2; 1551 break; 1552 case SPEED_100000: 1553 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1554 QED_EXT_SPEED_100G_R4 | 1555 QED_EXT_SPEED_100G_P4; 1556 break; 1557 default: 1558 break; 1559 } 1560 } 1561 1562 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1563 return; 1564 1565 switch (params->forced_speed) { 1566 case SPEED_25000: 1567 switch (params->fec) { 1568 case FEC_FORCE_MODE_NONE: 1569 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1570 break; 1571 case FEC_FORCE_MODE_FIRECODE: 1572 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1573 break; 1574 case FEC_FORCE_MODE_RS: 1575 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1576 break; 1577 case FEC_FORCE_MODE_AUTO: 1578 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1579 ETH_EXT_FEC_25G_BASE_R | 1580 ETH_EXT_FEC_25G_NONE; 1581 break; 1582 default: 1583 break; 1584 } 1585 1586 break; 1587 case SPEED_40000: 1588 switch (params->fec) { 1589 case FEC_FORCE_MODE_NONE: 1590 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1591 break; 1592 case FEC_FORCE_MODE_FIRECODE: 1593 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1594 break; 1595 case FEC_FORCE_MODE_AUTO: 1596 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1597 ETH_EXT_FEC_40G_NONE; 1598 break; 1599 default: 1600 break; 1601 } 1602 1603 break; 1604 case SPEED_50000: 1605 switch (params->fec) { 1606 case FEC_FORCE_MODE_NONE: 1607 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1608 break; 1609 case FEC_FORCE_MODE_FIRECODE: 1610 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1611 break; 1612 case FEC_FORCE_MODE_RS: 1613 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1614 break; 1615 case FEC_FORCE_MODE_AUTO: 1616 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1617 ETH_EXT_FEC_50G_BASE_R | 1618 ETH_EXT_FEC_50G_NONE; 1619 break; 1620 default: 1621 break; 1622 } 1623 1624 break; 1625 case SPEED_100000: 1626 switch (params->fec) { 1627 case FEC_FORCE_MODE_NONE: 1628 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1629 break; 1630 case FEC_FORCE_MODE_FIRECODE: 1631 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1632 break; 1633 case FEC_FORCE_MODE_RS: 1634 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1635 break; 1636 case FEC_FORCE_MODE_AUTO: 1637 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1638 ETH_EXT_FEC_100G_BASE_R | 1639 ETH_EXT_FEC_100G_NONE; 1640 break; 1641 default: 1642 break; 1643 } 1644 1645 break; 1646 default: 1647 break; 1648 } 1649 } 1650 1651 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1652 { 1653 struct qed_mcp_link_params *link_params; 1654 struct qed_mcp_link_speed_params *speed; 1655 const struct qed_mfw_speed_map *map; 1656 struct qed_hwfn *hwfn; 1657 struct qed_ptt *ptt; 1658 int rc; 1659 u32 i; 1660 1661 if (!cdev) 1662 return -ENODEV; 1663 1664 /* The link should be set only once per PF */ 1665 hwfn = &cdev->hwfns[0]; 1666 1667 /* When VF wants to set link, force it to read the bulletin instead. 1668 * This mimics the PF behavior, where a noitification [both immediate 1669 * and possible later] would be generated when changing properties. 1670 */ 1671 if (IS_VF(cdev)) { 1672 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1673 return 0; 1674 } 1675 1676 ptt = qed_ptt_acquire(hwfn); 1677 if (!ptt) 1678 return -EBUSY; 1679 1680 link_params = qed_mcp_get_link_params(hwfn); 1681 if (!link_params) 1682 return -ENODATA; 1683 1684 speed = &link_params->speed; 1685 1686 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1687 speed->autoneg = !!params->autoneg; 1688 1689 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1690 speed->advertised_speeds = 0; 1691 1692 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1693 map = qed_mfw_legacy_maps + i; 1694 1695 if (linkmode_intersects(params->adv_speeds, map->caps)) 1696 speed->advertised_speeds |= map->mfw_val; 1697 } 1698 } 1699 1700 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1701 speed->forced_speed = params->forced_speed; 1702 1703 if (qed_mcp_is_ext_speed_supported(hwfn)) 1704 qed_set_ext_speed_params(link_params, params); 1705 1706 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1707 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1708 link_params->pause.autoneg = true; 1709 else 1710 link_params->pause.autoneg = false; 1711 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1712 link_params->pause.forced_rx = true; 1713 else 1714 link_params->pause.forced_rx = false; 1715 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1716 link_params->pause.forced_tx = true; 1717 else 1718 link_params->pause.forced_tx = false; 1719 } 1720 1721 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1722 switch (params->loopback_mode) { 1723 case QED_LINK_LOOPBACK_INT_PHY: 1724 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1725 break; 1726 case QED_LINK_LOOPBACK_EXT_PHY: 1727 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1728 break; 1729 case QED_LINK_LOOPBACK_EXT: 1730 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1731 break; 1732 case QED_LINK_LOOPBACK_MAC: 1733 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1734 break; 1735 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1736 link_params->loopback_mode = 1737 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1738 break; 1739 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1740 link_params->loopback_mode = 1741 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1742 break; 1743 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1744 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1745 break; 1746 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1747 link_params->loopback_mode = 1748 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1749 break; 1750 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1751 link_params->loopback_mode = 1752 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1753 break; 1754 default: 1755 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1756 break; 1757 } 1758 } 1759 1760 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1761 memcpy(&link_params->eee, ¶ms->eee, 1762 sizeof(link_params->eee)); 1763 1764 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1765 link_params->fec = params->fec; 1766 1767 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1768 1769 qed_ptt_release(hwfn, ptt); 1770 1771 return rc; 1772 } 1773 1774 static int qed_get_port_type(u32 media_type) 1775 { 1776 int port_type; 1777 1778 switch (media_type) { 1779 case MEDIA_SFPP_10G_FIBER: 1780 case MEDIA_SFP_1G_FIBER: 1781 case MEDIA_XFP_FIBER: 1782 case MEDIA_MODULE_FIBER: 1783 port_type = PORT_FIBRE; 1784 break; 1785 case MEDIA_DA_TWINAX: 1786 port_type = PORT_DA; 1787 break; 1788 case MEDIA_BASE_T: 1789 port_type = PORT_TP; 1790 break; 1791 case MEDIA_KR: 1792 case MEDIA_NOT_PRESENT: 1793 port_type = PORT_NONE; 1794 break; 1795 case MEDIA_UNSPECIFIED: 1796 default: 1797 port_type = PORT_OTHER; 1798 break; 1799 } 1800 return port_type; 1801 } 1802 1803 static int qed_get_link_data(struct qed_hwfn *hwfn, 1804 struct qed_mcp_link_params *params, 1805 struct qed_mcp_link_state *link, 1806 struct qed_mcp_link_capabilities *link_caps) 1807 { 1808 void *p; 1809 1810 if (!IS_PF(hwfn->cdev)) { 1811 qed_vf_get_link_params(hwfn, params); 1812 qed_vf_get_link_state(hwfn, link); 1813 qed_vf_get_link_caps(hwfn, link_caps); 1814 1815 return 0; 1816 } 1817 1818 p = qed_mcp_get_link_params(hwfn); 1819 if (!p) 1820 return -ENXIO; 1821 memcpy(params, p, sizeof(*params)); 1822 1823 p = qed_mcp_get_link_state(hwfn); 1824 if (!p) 1825 return -ENXIO; 1826 memcpy(link, p, sizeof(*link)); 1827 1828 p = qed_mcp_get_link_capabilities(hwfn); 1829 if (!p) 1830 return -ENXIO; 1831 memcpy(link_caps, p, sizeof(*link_caps)); 1832 1833 return 0; 1834 } 1835 1836 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1837 struct qed_ptt *ptt, u32 capability, 1838 unsigned long *if_caps) 1839 { 1840 u32 media_type, tcvr_state, tcvr_type; 1841 u32 speed_mask, board_cfg; 1842 1843 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1844 media_type = MEDIA_UNSPECIFIED; 1845 1846 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1847 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1848 1849 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1850 speed_mask = 0xFFFFFFFF; 1851 1852 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1853 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1854 1855 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1856 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1857 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1858 1859 switch (media_type) { 1860 case MEDIA_DA_TWINAX: 1861 phylink_set(if_caps, FIBRE); 1862 1863 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1864 phylink_set(if_caps, 20000baseKR2_Full); 1865 1866 /* For DAC media multiple speed capabilities are supported */ 1867 capability |= speed_mask; 1868 1869 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1870 phylink_set(if_caps, 1000baseKX_Full); 1871 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1872 phylink_set(if_caps, 10000baseCR_Full); 1873 1874 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1875 switch (tcvr_type) { 1876 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1877 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1878 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1879 phylink_set(if_caps, 40000baseCR4_Full); 1880 break; 1881 default: 1882 break; 1883 } 1884 1885 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1886 phylink_set(if_caps, 25000baseCR_Full); 1887 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1888 phylink_set(if_caps, 50000baseCR2_Full); 1889 1890 if (capability & 1891 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1892 switch (tcvr_type) { 1893 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1894 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1895 phylink_set(if_caps, 100000baseCR4_Full); 1896 break; 1897 default: 1898 break; 1899 } 1900 1901 break; 1902 case MEDIA_BASE_T: 1903 phylink_set(if_caps, TP); 1904 1905 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1906 if (capability & 1907 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1908 phylink_set(if_caps, 1000baseT_Full); 1909 if (capability & 1910 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1911 phylink_set(if_caps, 10000baseT_Full); 1912 } 1913 1914 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1915 phylink_set(if_caps, FIBRE); 1916 1917 switch (tcvr_type) { 1918 case ETH_TRANSCEIVER_TYPE_1000BASET: 1919 phylink_set(if_caps, 1000baseT_Full); 1920 break; 1921 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1922 phylink_set(if_caps, 10000baseT_Full); 1923 break; 1924 default: 1925 break; 1926 } 1927 } 1928 1929 break; 1930 case MEDIA_SFP_1G_FIBER: 1931 case MEDIA_SFPP_10G_FIBER: 1932 case MEDIA_XFP_FIBER: 1933 case MEDIA_MODULE_FIBER: 1934 phylink_set(if_caps, FIBRE); 1935 capability |= speed_mask; 1936 1937 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1938 switch (tcvr_type) { 1939 case ETH_TRANSCEIVER_TYPE_1G_LX: 1940 case ETH_TRANSCEIVER_TYPE_1G_SX: 1941 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1942 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1943 phylink_set(if_caps, 1000baseKX_Full); 1944 break; 1945 default: 1946 break; 1947 } 1948 1949 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1950 switch (tcvr_type) { 1951 case ETH_TRANSCEIVER_TYPE_10G_SR: 1952 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1953 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1954 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1955 phylink_set(if_caps, 10000baseSR_Full); 1956 break; 1957 case ETH_TRANSCEIVER_TYPE_10G_LR: 1958 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1959 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1960 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1961 phylink_set(if_caps, 10000baseLR_Full); 1962 break; 1963 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1964 phylink_set(if_caps, 10000baseLRM_Full); 1965 break; 1966 case ETH_TRANSCEIVER_TYPE_10G_ER: 1967 phylink_set(if_caps, 10000baseR_FEC); 1968 break; 1969 default: 1970 break; 1971 } 1972 1973 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1974 phylink_set(if_caps, 20000baseKR2_Full); 1975 1976 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1977 switch (tcvr_type) { 1978 case ETH_TRANSCEIVER_TYPE_25G_SR: 1979 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1980 phylink_set(if_caps, 25000baseSR_Full); 1981 break; 1982 default: 1983 break; 1984 } 1985 1986 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1987 switch (tcvr_type) { 1988 case ETH_TRANSCEIVER_TYPE_40G_LR4: 1989 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1990 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 1991 phylink_set(if_caps, 40000baseLR4_Full); 1992 break; 1993 case ETH_TRANSCEIVER_TYPE_40G_SR4: 1994 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 1995 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1996 phylink_set(if_caps, 40000baseSR4_Full); 1997 break; 1998 default: 1999 break; 2000 } 2001 2002 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2003 phylink_set(if_caps, 50000baseKR2_Full); 2004 2005 if (capability & 2006 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2007 switch (tcvr_type) { 2008 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2009 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2010 phylink_set(if_caps, 100000baseSR4_Full); 2011 break; 2012 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2013 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2014 break; 2015 default: 2016 break; 2017 } 2018 2019 break; 2020 case MEDIA_KR: 2021 phylink_set(if_caps, Backplane); 2022 2023 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2024 phylink_set(if_caps, 20000baseKR2_Full); 2025 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2026 phylink_set(if_caps, 1000baseKX_Full); 2027 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2028 phylink_set(if_caps, 10000baseKR_Full); 2029 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2030 phylink_set(if_caps, 25000baseKR_Full); 2031 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2032 phylink_set(if_caps, 40000baseKR4_Full); 2033 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2034 phylink_set(if_caps, 50000baseKR2_Full); 2035 if (capability & 2036 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2037 phylink_set(if_caps, 100000baseKR4_Full); 2038 2039 break; 2040 case MEDIA_UNSPECIFIED: 2041 case MEDIA_NOT_PRESENT: 2042 default: 2043 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2044 "Unknown media and transceiver type;\n"); 2045 break; 2046 } 2047 } 2048 2049 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2050 { 2051 *speed_mask = 0; 2052 2053 if (caps & 2054 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2055 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2056 if (caps & QED_LINK_PARTNER_SPEED_10G) 2057 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2058 if (caps & QED_LINK_PARTNER_SPEED_20G) 2059 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2060 if (caps & QED_LINK_PARTNER_SPEED_25G) 2061 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2062 if (caps & QED_LINK_PARTNER_SPEED_40G) 2063 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2064 if (caps & QED_LINK_PARTNER_SPEED_50G) 2065 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2066 if (caps & QED_LINK_PARTNER_SPEED_100G) 2067 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2068 } 2069 2070 static void qed_fill_link(struct qed_hwfn *hwfn, 2071 struct qed_ptt *ptt, 2072 struct qed_link_output *if_link) 2073 { 2074 struct qed_mcp_link_capabilities link_caps; 2075 struct qed_mcp_link_params params; 2076 struct qed_mcp_link_state link; 2077 u32 media_type, speed_mask; 2078 2079 memset(if_link, 0, sizeof(*if_link)); 2080 2081 /* Prepare source inputs */ 2082 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2083 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2084 return; 2085 } 2086 2087 /* Set the link parameters to pass to protocol driver */ 2088 if (link.link_up) 2089 if_link->link_up = true; 2090 2091 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2092 if (link_caps.default_ext_autoneg) 2093 phylink_set(if_link->supported_caps, Autoneg); 2094 2095 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2096 2097 if (params.ext_speed.autoneg) 2098 phylink_set(if_link->advertised_caps, Autoneg); 2099 else 2100 phylink_clear(if_link->advertised_caps, Autoneg); 2101 2102 qed_fill_link_capability(hwfn, ptt, 2103 params.ext_speed.advertised_speeds, 2104 if_link->advertised_caps); 2105 } else { 2106 if (link_caps.default_speed_autoneg) 2107 phylink_set(if_link->supported_caps, Autoneg); 2108 2109 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2110 2111 if (params.speed.autoneg) 2112 phylink_set(if_link->advertised_caps, Autoneg); 2113 else 2114 phylink_clear(if_link->advertised_caps, Autoneg); 2115 } 2116 2117 if (params.pause.autoneg || 2118 (params.pause.forced_rx && params.pause.forced_tx)) 2119 phylink_set(if_link->supported_caps, Asym_Pause); 2120 if (params.pause.autoneg || params.pause.forced_rx || 2121 params.pause.forced_tx) 2122 phylink_set(if_link->supported_caps, Pause); 2123 2124 if_link->sup_fec = link_caps.fec_default; 2125 if_link->active_fec = params.fec; 2126 2127 /* Fill link advertised capability */ 2128 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2129 if_link->advertised_caps); 2130 2131 /* Fill link supported capability */ 2132 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2133 if_link->supported_caps); 2134 2135 /* Fill partner advertised capability */ 2136 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2137 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2138 2139 if (link.link_up) 2140 if_link->speed = link.speed; 2141 2142 /* TODO - fill duplex properly */ 2143 if_link->duplex = DUPLEX_FULL; 2144 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2145 if_link->port = qed_get_port_type(media_type); 2146 2147 if_link->autoneg = params.speed.autoneg; 2148 2149 if (params.pause.autoneg) 2150 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2151 if (params.pause.forced_rx) 2152 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2153 if (params.pause.forced_tx) 2154 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2155 2156 if (link.an_complete) 2157 phylink_set(if_link->lp_caps, Autoneg); 2158 if (link.partner_adv_pause) 2159 phylink_set(if_link->lp_caps, Pause); 2160 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2161 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2162 phylink_set(if_link->lp_caps, Asym_Pause); 2163 2164 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2165 if_link->eee_supported = false; 2166 } else { 2167 if_link->eee_supported = true; 2168 if_link->eee_active = link.eee_active; 2169 if_link->sup_caps = link_caps.eee_speed_caps; 2170 /* MFW clears adv_caps on eee disable; use configured value */ 2171 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2172 params.eee.adv_caps; 2173 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2174 if_link->eee.enable = params.eee.enable; 2175 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2176 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2177 } 2178 } 2179 2180 static void qed_get_current_link(struct qed_dev *cdev, 2181 struct qed_link_output *if_link) 2182 { 2183 struct qed_hwfn *hwfn; 2184 struct qed_ptt *ptt; 2185 int i; 2186 2187 hwfn = &cdev->hwfns[0]; 2188 if (IS_PF(cdev)) { 2189 ptt = qed_ptt_acquire(hwfn); 2190 if (ptt) { 2191 qed_fill_link(hwfn, ptt, if_link); 2192 qed_ptt_release(hwfn, ptt); 2193 } else { 2194 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2195 } 2196 } else { 2197 qed_fill_link(hwfn, NULL, if_link); 2198 } 2199 2200 for_each_hwfn(cdev, i) 2201 qed_inform_vf_link_state(&cdev->hwfns[i]); 2202 } 2203 2204 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2205 { 2206 void *cookie = hwfn->cdev->ops_cookie; 2207 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2208 struct qed_link_output if_link; 2209 2210 qed_fill_link(hwfn, ptt, &if_link); 2211 qed_inform_vf_link_state(hwfn); 2212 2213 if (IS_LEAD_HWFN(hwfn) && cookie) 2214 op->link_update(cookie, &if_link); 2215 } 2216 2217 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2218 { 2219 void *cookie = hwfn->cdev->ops_cookie; 2220 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2221 2222 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2223 op->bw_update(cookie); 2224 } 2225 2226 static int qed_drain(struct qed_dev *cdev) 2227 { 2228 struct qed_hwfn *hwfn; 2229 struct qed_ptt *ptt; 2230 int i, rc; 2231 2232 if (IS_VF(cdev)) 2233 return 0; 2234 2235 for_each_hwfn(cdev, i) { 2236 hwfn = &cdev->hwfns[i]; 2237 ptt = qed_ptt_acquire(hwfn); 2238 if (!ptt) { 2239 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2240 return -EBUSY; 2241 } 2242 rc = qed_mcp_drain(hwfn, ptt); 2243 qed_ptt_release(hwfn, ptt); 2244 if (rc) 2245 return rc; 2246 } 2247 2248 return 0; 2249 } 2250 2251 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2252 struct qed_nvm_image_att *nvm_image, 2253 u32 *crc) 2254 { 2255 u8 *buf = NULL; 2256 int rc; 2257 2258 /* Allocate a buffer for holding the nvram image */ 2259 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2260 if (!buf) 2261 return -ENOMEM; 2262 2263 /* Read image into buffer */ 2264 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2265 buf, nvm_image->length); 2266 if (rc) { 2267 DP_ERR(cdev, "Failed reading image from nvm\n"); 2268 goto out; 2269 } 2270 2271 /* Convert the buffer into big-endian format (excluding the 2272 * closing 4 bytes of CRC). 2273 */ 2274 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2275 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2276 2277 /* Calc CRC for the "actual" image buffer, i.e. not including 2278 * the last 4 CRC bytes. 2279 */ 2280 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2281 *crc = (__force u32)cpu_to_be32p(crc); 2282 2283 out: 2284 kfree(buf); 2285 2286 return rc; 2287 } 2288 2289 /* Binary file format - 2290 * /----------------------------------------------------------------------\ 2291 * 0B | 0x4 [command index] | 2292 * 4B | image_type | Options | Number of register settings | 2293 * 8B | Value | 2294 * 12B | Mask | 2295 * 16B | Offset | 2296 * \----------------------------------------------------------------------/ 2297 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2298 * Options - 0'b - Calculate & Update CRC for image 2299 */ 2300 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2301 bool *check_resp) 2302 { 2303 struct qed_nvm_image_att nvm_image; 2304 struct qed_hwfn *p_hwfn; 2305 bool is_crc = false; 2306 u32 image_type; 2307 int rc = 0, i; 2308 u16 len; 2309 2310 *data += 4; 2311 image_type = **data; 2312 p_hwfn = QED_LEADING_HWFN(cdev); 2313 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2314 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2315 break; 2316 if (i == p_hwfn->nvm_info.num_images) { 2317 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2318 image_type); 2319 return -ENOENT; 2320 } 2321 2322 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2323 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2324 2325 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2326 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2327 **data, image_type, nvm_image.start_addr, 2328 nvm_image.start_addr + nvm_image.length - 1); 2329 (*data)++; 2330 is_crc = !!(**data & BIT(0)); 2331 (*data)++; 2332 len = *((u16 *)*data); 2333 *data += 2; 2334 if (is_crc) { 2335 u32 crc = 0; 2336 2337 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2338 if (rc) { 2339 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2340 goto exit; 2341 } 2342 2343 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2344 (nvm_image.start_addr + 2345 nvm_image.length - 4), (u8 *)&crc, 4); 2346 if (rc) 2347 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2348 nvm_image.start_addr + nvm_image.length - 4, rc); 2349 goto exit; 2350 } 2351 2352 /* Iterate over the values for setting */ 2353 while (len) { 2354 u32 offset, mask, value, cur_value; 2355 u8 buf[4]; 2356 2357 value = *((u32 *)*data); 2358 *data += 4; 2359 mask = *((u32 *)*data); 2360 *data += 4; 2361 offset = *((u32 *)*data); 2362 *data += 4; 2363 2364 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2365 4); 2366 if (rc) { 2367 DP_ERR(cdev, "Failed reading from %08x\n", 2368 nvm_image.start_addr + offset); 2369 goto exit; 2370 } 2371 2372 cur_value = le32_to_cpu(*((__le32 *)buf)); 2373 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2374 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2375 nvm_image.start_addr + offset, cur_value, 2376 (cur_value & ~mask) | (value & mask), value, mask); 2377 value = (value & mask) | (cur_value & ~mask); 2378 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2379 nvm_image.start_addr + offset, 2380 (u8 *)&value, 4); 2381 if (rc) { 2382 DP_ERR(cdev, "Failed writing to %08x\n", 2383 nvm_image.start_addr + offset); 2384 goto exit; 2385 } 2386 2387 len--; 2388 } 2389 exit: 2390 return rc; 2391 } 2392 2393 /* Binary file format - 2394 * /----------------------------------------------------------------------\ 2395 * 0B | 0x3 [command index] | 2396 * 4B | b'0: check_response? | b'1-31 reserved | 2397 * 8B | File-type | reserved | 2398 * 12B | Image length in bytes | 2399 * \----------------------------------------------------------------------/ 2400 * Start a new file of the provided type 2401 */ 2402 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2403 const u8 **data, bool *check_resp) 2404 { 2405 u32 file_type, file_size = 0; 2406 int rc; 2407 2408 *data += 4; 2409 *check_resp = !!(**data & BIT(0)); 2410 *data += 4; 2411 file_type = **data; 2412 2413 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2414 "About to start a new file of type %02x\n", file_type); 2415 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2416 *data += 4; 2417 file_size = *((u32 *)(*data)); 2418 } 2419 2420 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2421 (u8 *)(&file_size), 4); 2422 *data += 4; 2423 2424 return rc; 2425 } 2426 2427 /* Binary file format - 2428 * /----------------------------------------------------------------------\ 2429 * 0B | 0x2 [command index] | 2430 * 4B | Length in bytes | 2431 * 8B | b'0: check_response? | b'1-31 reserved | 2432 * 12B | Offset in bytes | 2433 * 16B | Data ... | 2434 * \----------------------------------------------------------------------/ 2435 * Write data as part of a file that was previously started. Data should be 2436 * of length equal to that provided in the message 2437 */ 2438 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2439 const u8 **data, bool *check_resp) 2440 { 2441 u32 offset, len; 2442 int rc; 2443 2444 *data += 4; 2445 len = *((u32 *)(*data)); 2446 *data += 4; 2447 *check_resp = !!(**data & BIT(0)); 2448 *data += 4; 2449 offset = *((u32 *)(*data)); 2450 *data += 4; 2451 2452 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2453 "About to write File-data: %08x bytes to offset %08x\n", 2454 len, offset); 2455 2456 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2457 (char *)(*data), len); 2458 *data += len; 2459 2460 return rc; 2461 } 2462 2463 /* Binary file format [General header] - 2464 * /----------------------------------------------------------------------\ 2465 * 0B | QED_NVM_SIGNATURE | 2466 * 4B | Length in bytes | 2467 * 8B | Highest command in this batchfile | Reserved | 2468 * \----------------------------------------------------------------------/ 2469 */ 2470 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2471 const struct firmware *image, 2472 const u8 **data) 2473 { 2474 u32 signature, len; 2475 2476 /* Check minimum size */ 2477 if (image->size < 12) { 2478 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2479 return -EINVAL; 2480 } 2481 2482 /* Check signature */ 2483 signature = *((u32 *)(*data)); 2484 if (signature != QED_NVM_SIGNATURE) { 2485 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2486 return -EINVAL; 2487 } 2488 2489 *data += 4; 2490 /* Validate internal size equals the image-size */ 2491 len = *((u32 *)(*data)); 2492 if (len != image->size) { 2493 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2494 len, (u32)image->size); 2495 return -EINVAL; 2496 } 2497 2498 *data += 4; 2499 /* Make sure driver familiar with all commands necessary for this */ 2500 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2501 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2502 *((u16 *)(*data))); 2503 return -EINVAL; 2504 } 2505 2506 *data += 4; 2507 2508 return 0; 2509 } 2510 2511 /* Binary file format - 2512 * /----------------------------------------------------------------------\ 2513 * 0B | 0x5 [command index] | 2514 * 4B | Number of config attributes | Reserved | 2515 * 4B | Config ID | Entity ID | Length | 2516 * 4B | Value | 2517 * | | 2518 * \----------------------------------------------------------------------/ 2519 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2520 * 'Number of config attributes'. 2521 * 2522 * The API parses config attributes from the user provided buffer and flashes 2523 * them to the respective NVM path using Management FW inerface. 2524 */ 2525 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2526 { 2527 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2528 u8 entity_id, len, buf[32]; 2529 bool need_nvm_init = true; 2530 struct qed_ptt *ptt; 2531 u16 cfg_id, count; 2532 int rc = 0, i; 2533 u32 flags; 2534 2535 ptt = qed_ptt_acquire(hwfn); 2536 if (!ptt) 2537 return -EAGAIN; 2538 2539 /* NVM CFG ID attribute header */ 2540 *data += 4; 2541 count = *((u16 *)*data); 2542 *data += 4; 2543 2544 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2545 "Read config ids: num_attrs = %0d\n", count); 2546 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2547 * arithmetic operations in the implementation. 2548 */ 2549 for (i = 1; i <= count; i++) { 2550 cfg_id = *((u16 *)*data); 2551 *data += 2; 2552 entity_id = **data; 2553 (*data)++; 2554 len = **data; 2555 (*data)++; 2556 memcpy(buf, *data, len); 2557 *data += len; 2558 2559 flags = 0; 2560 if (need_nvm_init) { 2561 flags |= QED_NVM_CFG_OPTION_INIT; 2562 need_nvm_init = false; 2563 } 2564 2565 /* Commit to flash and free the resources */ 2566 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2567 flags |= QED_NVM_CFG_OPTION_COMMIT | 2568 QED_NVM_CFG_OPTION_FREE; 2569 need_nvm_init = true; 2570 } 2571 2572 if (entity_id) 2573 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2574 2575 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2576 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2577 entity_id, len); 2578 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2579 buf, len); 2580 if (rc) { 2581 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2582 break; 2583 } 2584 } 2585 2586 qed_ptt_release(hwfn, ptt); 2587 2588 return rc; 2589 } 2590 2591 #define QED_MAX_NVM_BUF_LEN 32 2592 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2593 { 2594 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2595 u8 buf[QED_MAX_NVM_BUF_LEN]; 2596 struct qed_ptt *ptt; 2597 u32 len; 2598 int rc; 2599 2600 ptt = qed_ptt_acquire(hwfn); 2601 if (!ptt) 2602 return QED_MAX_NVM_BUF_LEN; 2603 2604 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2605 &len); 2606 if (rc || !len) { 2607 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2608 len = QED_MAX_NVM_BUF_LEN; 2609 } 2610 2611 qed_ptt_release(hwfn, ptt); 2612 2613 return len; 2614 } 2615 2616 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2617 u32 cmd, u32 entity_id) 2618 { 2619 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2620 struct qed_ptt *ptt; 2621 u32 flags, len; 2622 int rc = 0; 2623 2624 ptt = qed_ptt_acquire(hwfn); 2625 if (!ptt) 2626 return -EAGAIN; 2627 2628 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2629 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2630 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2631 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2632 if (rc) 2633 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2634 2635 qed_ptt_release(hwfn, ptt); 2636 2637 return rc; 2638 } 2639 2640 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2641 { 2642 const struct firmware *image; 2643 const u8 *data, *data_end; 2644 u32 cmd_type; 2645 int rc; 2646 2647 rc = request_firmware(&image, name, &cdev->pdev->dev); 2648 if (rc) { 2649 DP_ERR(cdev, "Failed to find '%s'\n", name); 2650 return rc; 2651 } 2652 2653 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2654 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2655 name, image->data, (u32)image->size); 2656 data = image->data; 2657 data_end = data + image->size; 2658 2659 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2660 if (rc) 2661 goto exit; 2662 2663 while (data < data_end) { 2664 bool check_resp = false; 2665 2666 /* Parse the actual command */ 2667 cmd_type = *((u32 *)data); 2668 switch (cmd_type) { 2669 case QED_NVM_FLASH_CMD_FILE_DATA: 2670 rc = qed_nvm_flash_image_file_data(cdev, &data, 2671 &check_resp); 2672 break; 2673 case QED_NVM_FLASH_CMD_FILE_START: 2674 rc = qed_nvm_flash_image_file_start(cdev, &data, 2675 &check_resp); 2676 break; 2677 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2678 rc = qed_nvm_flash_image_access(cdev, &data, 2679 &check_resp); 2680 break; 2681 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2682 rc = qed_nvm_flash_cfg_write(cdev, &data); 2683 break; 2684 default: 2685 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2686 rc = -EINVAL; 2687 goto exit; 2688 } 2689 2690 if (rc) { 2691 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2692 goto exit; 2693 } 2694 2695 /* Check response if needed */ 2696 if (check_resp) { 2697 u32 mcp_response = 0; 2698 2699 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2700 DP_ERR(cdev, "Failed getting MCP response\n"); 2701 rc = -EINVAL; 2702 goto exit; 2703 } 2704 2705 switch (mcp_response & FW_MSG_CODE_MASK) { 2706 case FW_MSG_CODE_OK: 2707 case FW_MSG_CODE_NVM_OK: 2708 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2709 case FW_MSG_CODE_PHY_OK: 2710 break; 2711 default: 2712 DP_ERR(cdev, "MFW returns error: %08x\n", 2713 mcp_response); 2714 rc = -EINVAL; 2715 goto exit; 2716 } 2717 } 2718 } 2719 2720 exit: 2721 release_firmware(image); 2722 2723 return rc; 2724 } 2725 2726 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2727 u8 *buf, u16 len) 2728 { 2729 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2730 2731 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2732 } 2733 2734 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2735 { 2736 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2737 void *cookie = p_hwfn->cdev->ops_cookie; 2738 2739 if (ops && ops->schedule_recovery_handler) 2740 ops->schedule_recovery_handler(cookie); 2741 } 2742 2743 static const char * const qed_hw_err_type_descr[] = { 2744 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2745 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2746 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2747 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2748 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2749 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2750 [QED_HW_ERR_LAST] = "Unknown", 2751 }; 2752 2753 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2754 enum qed_hw_err_type err_type) 2755 { 2756 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2757 void *cookie = p_hwfn->cdev->ops_cookie; 2758 const char *err_str; 2759 2760 if (err_type > QED_HW_ERR_LAST) 2761 err_type = QED_HW_ERR_LAST; 2762 err_str = qed_hw_err_type_descr[err_type]; 2763 2764 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2765 2766 /* Call the HW error handler of the protocol driver. 2767 * If it is not available - perform a minimal handling of preventing 2768 * HW attentions from being reasserted. 2769 */ 2770 if (ops && ops->schedule_hw_err_handler) 2771 ops->schedule_hw_err_handler(cookie, err_type); 2772 else 2773 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2774 } 2775 2776 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2777 void *handle) 2778 { 2779 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2780 } 2781 2782 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2783 { 2784 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2785 struct qed_ptt *ptt; 2786 int status = 0; 2787 2788 ptt = qed_ptt_acquire(hwfn); 2789 if (!ptt) 2790 return -EAGAIN; 2791 2792 status = qed_mcp_set_led(hwfn, ptt, mode); 2793 2794 qed_ptt_release(hwfn, ptt); 2795 2796 return status; 2797 } 2798 2799 int qed_recovery_process(struct qed_dev *cdev) 2800 { 2801 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2802 struct qed_ptt *p_ptt; 2803 int rc = 0; 2804 2805 p_ptt = qed_ptt_acquire(p_hwfn); 2806 if (!p_ptt) 2807 return -EAGAIN; 2808 2809 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2810 2811 qed_ptt_release(p_hwfn, p_ptt); 2812 2813 return rc; 2814 } 2815 2816 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2817 { 2818 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2819 struct qed_ptt *ptt; 2820 int rc = 0; 2821 2822 if (IS_VF(cdev)) 2823 return 0; 2824 2825 ptt = qed_ptt_acquire(hwfn); 2826 if (!ptt) 2827 return -EAGAIN; 2828 2829 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2830 : QED_OV_WOL_DISABLED); 2831 if (rc) 2832 goto out; 2833 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2834 2835 out: 2836 qed_ptt_release(hwfn, ptt); 2837 return rc; 2838 } 2839 2840 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2841 { 2842 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2843 struct qed_ptt *ptt; 2844 int status = 0; 2845 2846 if (IS_VF(cdev)) 2847 return 0; 2848 2849 ptt = qed_ptt_acquire(hwfn); 2850 if (!ptt) 2851 return -EAGAIN; 2852 2853 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2854 QED_OV_DRIVER_STATE_ACTIVE : 2855 QED_OV_DRIVER_STATE_DISABLED); 2856 2857 qed_ptt_release(hwfn, ptt); 2858 2859 return status; 2860 } 2861 2862 static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) 2863 { 2864 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2865 struct qed_ptt *ptt; 2866 int status = 0; 2867 2868 if (IS_VF(cdev)) 2869 return 0; 2870 2871 ptt = qed_ptt_acquire(hwfn); 2872 if (!ptt) 2873 return -EAGAIN; 2874 2875 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2876 if (status) 2877 goto out; 2878 2879 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2880 2881 out: 2882 qed_ptt_release(hwfn, ptt); 2883 return status; 2884 } 2885 2886 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2887 { 2888 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2889 struct qed_ptt *ptt; 2890 int status = 0; 2891 2892 if (IS_VF(cdev)) 2893 return 0; 2894 2895 ptt = qed_ptt_acquire(hwfn); 2896 if (!ptt) 2897 return -EAGAIN; 2898 2899 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2900 if (status) 2901 goto out; 2902 2903 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2904 2905 out: 2906 qed_ptt_release(hwfn, ptt); 2907 return status; 2908 } 2909 2910 static int 2911 qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, 2912 u16 qid, struct qed_sb_info_dbg *sb_dbg) 2913 { 2914 struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; 2915 struct qed_ptt *ptt; 2916 int rc; 2917 2918 if (IS_VF(cdev)) 2919 return -EINVAL; 2920 2921 ptt = qed_ptt_acquire(hwfn); 2922 if (!ptt) { 2923 DP_NOTICE(hwfn, "Can't acquire PTT\n"); 2924 return -EAGAIN; 2925 } 2926 2927 memset(sb_dbg, 0, sizeof(*sb_dbg)); 2928 rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); 2929 2930 qed_ptt_release(hwfn, ptt); 2931 return rc; 2932 } 2933 2934 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2935 u8 dev_addr, u32 offset, u32 len) 2936 { 2937 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2938 struct qed_ptt *ptt; 2939 int rc = 0; 2940 2941 if (IS_VF(cdev)) 2942 return 0; 2943 2944 ptt = qed_ptt_acquire(hwfn); 2945 if (!ptt) 2946 return -EAGAIN; 2947 2948 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2949 offset, len, buf); 2950 2951 qed_ptt_release(hwfn, ptt); 2952 2953 return rc; 2954 } 2955 2956 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2957 { 2958 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2959 struct qed_ptt *ptt; 2960 int rc = 0; 2961 2962 if (IS_VF(cdev)) 2963 return 0; 2964 2965 ptt = qed_ptt_acquire(hwfn); 2966 if (!ptt) 2967 return -EAGAIN; 2968 2969 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2970 2971 qed_ptt_release(hwfn, ptt); 2972 2973 return rc; 2974 } 2975 2976 static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) 2977 { 2978 char buf[QED_MFW_REPORT_STR_SIZE]; 2979 struct qed_hwfn *p_hwfn; 2980 struct qed_ptt *p_ptt; 2981 va_list vl; 2982 2983 va_start(vl, fmt); 2984 vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); 2985 va_end(vl); 2986 2987 if (IS_PF(cdev)) { 2988 p_hwfn = QED_LEADING_HWFN(cdev); 2989 p_ptt = qed_ptt_acquire(p_hwfn); 2990 if (p_ptt) { 2991 qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); 2992 qed_ptt_release(p_hwfn, p_ptt); 2993 } 2994 } 2995 } 2996 2997 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2998 { 2999 return QED_AFFIN_HWFN_IDX(cdev); 3000 } 3001 3002 static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) 3003 { 3004 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 3005 struct qed_ptt *ptt; 3006 int rc = 0; 3007 3008 *esl_active = false; 3009 3010 if (IS_VF(cdev)) 3011 return 0; 3012 3013 ptt = qed_ptt_acquire(hwfn); 3014 if (!ptt) 3015 return -EAGAIN; 3016 3017 rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); 3018 3019 qed_ptt_release(hwfn, ptt); 3020 3021 return rc; 3022 } 3023 3024 static struct qed_selftest_ops qed_selftest_ops_pass = { 3025 .selftest_memory = &qed_selftest_memory, 3026 .selftest_interrupt = &qed_selftest_interrupt, 3027 .selftest_register = &qed_selftest_register, 3028 .selftest_clock = &qed_selftest_clock, 3029 .selftest_nvram = &qed_selftest_nvram, 3030 }; 3031 3032 const struct qed_common_ops qed_common_ops_pass = { 3033 .selftest = &qed_selftest_ops_pass, 3034 .probe = &qed_probe, 3035 .remove = &qed_remove, 3036 .set_power_state = &qed_set_power_state, 3037 .set_name = &qed_set_name, 3038 .update_pf_params = &qed_update_pf_params, 3039 .slowpath_start = &qed_slowpath_start, 3040 .slowpath_stop = &qed_slowpath_stop, 3041 .set_fp_int = &qed_set_int_fp, 3042 .get_fp_int = &qed_get_int_fp, 3043 .sb_init = &qed_sb_init, 3044 .sb_release = &qed_sb_release, 3045 .simd_handler_config = &qed_simd_handler_config, 3046 .simd_handler_clean = &qed_simd_handler_clean, 3047 .dbg_grc = &qed_dbg_grc, 3048 .dbg_grc_size = &qed_dbg_grc_size, 3049 .can_link_change = &qed_can_link_change, 3050 .set_link = &qed_set_link, 3051 .get_link = &qed_get_current_link, 3052 .drain = &qed_drain, 3053 .update_msglvl = &qed_init_dp, 3054 .devlink_register = qed_devlink_register, 3055 .devlink_unregister = qed_devlink_unregister, 3056 .report_fatal_error = qed_report_fatal_error, 3057 .dbg_all_data = &qed_dbg_all_data, 3058 .dbg_all_data_size = &qed_dbg_all_data_size, 3059 .chain_alloc = &qed_chain_alloc, 3060 .chain_free = &qed_chain_free, 3061 .nvm_flash = &qed_nvm_flash, 3062 .nvm_get_image = &qed_nvm_get_image, 3063 .set_coalesce = &qed_set_coalesce, 3064 .set_led = &qed_set_led, 3065 .recovery_process = &qed_recovery_process, 3066 .recovery_prolog = &qed_recovery_prolog, 3067 .attn_clr_enable = &qed_int_attn_clr_enable, 3068 .update_drv_state = &qed_update_drv_state, 3069 .update_mac = &qed_update_mac, 3070 .update_mtu = &qed_update_mtu, 3071 .update_wol = &qed_update_wol, 3072 .db_recovery_add = &qed_db_recovery_add, 3073 .db_recovery_del = &qed_db_recovery_del, 3074 .read_module_eeprom = &qed_read_module_eeprom, 3075 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3076 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3077 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3078 .set_grc_config = &qed_set_grc_config, 3079 .mfw_report = &qed_mfw_report, 3080 .get_sb_info = &qed_get_sb_info, 3081 .get_esl_status = &qed_get_esl_status, 3082 }; 3083 3084 void qed_get_protocol_stats(struct qed_dev *cdev, 3085 enum qed_mcp_protocol_type type, 3086 union qed_mcp_protocol_stats *stats) 3087 { 3088 struct qed_eth_stats eth_stats; 3089 3090 memset(stats, 0, sizeof(*stats)); 3091 3092 switch (type) { 3093 case QED_MCP_LAN_STATS: 3094 qed_get_vport_stats_context(cdev, ð_stats, true); 3095 stats->lan_stats.ucast_rx_pkts = 3096 eth_stats.common.rx_ucast_pkts; 3097 stats->lan_stats.ucast_tx_pkts = 3098 eth_stats.common.tx_ucast_pkts; 3099 stats->lan_stats.fcs_err = -1; 3100 break; 3101 case QED_MCP_FCOE_STATS: 3102 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); 3103 break; 3104 case QED_MCP_ISCSI_STATS: 3105 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); 3106 break; 3107 default: 3108 DP_VERBOSE(cdev, QED_MSG_SP, 3109 "Invalid protocol type = %d\n", type); 3110 return; 3111 } 3112 } 3113 3114 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3115 { 3116 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3117 "Scheduling slowpath task [Flag: %d]\n", 3118 QED_SLOWPATH_MFW_TLV_REQ); 3119 /* Memory barrier for setting atomic bit */ 3120 smp_mb__before_atomic(); 3121 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3122 /* Memory barrier after setting atomic bit */ 3123 smp_mb__after_atomic(); 3124 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3125 3126 return 0; 3127 } 3128 3129 static void 3130 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3131 { 3132 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3133 struct qed_eth_stats_common *p_common; 3134 struct qed_generic_tlvs gen_tlvs; 3135 struct qed_eth_stats stats; 3136 int i; 3137 3138 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3139 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3140 3141 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3142 tlv->flags.ipv4_csum_offload = true; 3143 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3144 tlv->flags.lso_supported = true; 3145 tlv->flags.b_set = true; 3146 3147 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3148 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3149 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3150 tlv->mac_set[i] = true; 3151 } 3152 } 3153 3154 qed_get_vport_stats(cdev, &stats); 3155 p_common = &stats.common; 3156 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3157 p_common->rx_bcast_pkts; 3158 tlv->rx_frames_set = true; 3159 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3160 p_common->rx_bcast_bytes; 3161 tlv->rx_bytes_set = true; 3162 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3163 p_common->tx_bcast_pkts; 3164 tlv->tx_frames_set = true; 3165 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3166 p_common->tx_bcast_bytes; 3167 tlv->rx_bytes_set = true; 3168 } 3169 3170 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3171 union qed_mfw_tlv_data *tlv_buf) 3172 { 3173 struct qed_dev *cdev = hwfn->cdev; 3174 struct qed_common_cb_ops *ops; 3175 3176 ops = cdev->protocol_ops.common; 3177 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3178 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3179 return -EINVAL; 3180 } 3181 3182 switch (type) { 3183 case QED_MFW_TLV_GENERIC: 3184 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3185 break; 3186 case QED_MFW_TLV_ETH: 3187 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3188 break; 3189 case QED_MFW_TLV_FCOE: 3190 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3191 break; 3192 case QED_MFW_TLV_ISCSI: 3193 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3194 break; 3195 default: 3196 break; 3197 } 3198 3199 return 0; 3200 } 3201 3202 unsigned long qed_get_epoch_time(void) 3203 { 3204 return ktime_get_real_seconds(); 3205 } 3206