1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/aer.h> 27 #include <linux/phylink.h> 28 29 #include "qed.h" 30 #include "qed_sriov.h" 31 #include "qed_sp.h" 32 #include "qed_dev_api.h" 33 #include "qed_ll2.h" 34 #include "qed_fcoe.h" 35 #include "qed_iscsi.h" 36 37 #include "qed_mcp.h" 38 #include "qed_reg_addr.h" 39 #include "qed_hw.h" 40 #include "qed_selftest.h" 41 #include "qed_debug.h" 42 #include "qed_devlink.h" 43 44 #define QED_ROCE_QPS (8192) 45 #define QED_ROCE_DPIS (8) 46 #define QED_RDMA_SRQS QED_ROCE_QPS 47 #define QED_NVM_CFG_GET_FLAGS 0xA 48 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 49 #define QED_NVM_CFG_MAX_ATTRS 50 50 51 static char version[] = 52 "QLogic FastLinQ 4xxxx Core Module qed\n"; 53 54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 55 MODULE_LICENSE("GPL"); 56 57 #define FW_FILE_VERSION \ 58 __stringify(FW_MAJOR_VERSION) "." \ 59 __stringify(FW_MINOR_VERSION) "." \ 60 __stringify(FW_REVISION_VERSION) "." \ 61 __stringify(FW_ENGINEERING_VERSION) 62 63 #define QED_FW_FILE_NAME \ 64 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 65 66 MODULE_FIRMWARE(QED_FW_FILE_NAME); 67 68 /* MFW speed capabilities maps */ 69 70 struct qed_mfw_speed_map { 71 u32 mfw_val; 72 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 73 74 const u32 *cap_arr; 75 u32 arr_size; 76 }; 77 78 #define QED_MFW_SPEED_MAP(type, arr) \ 79 { \ 80 .mfw_val = (type), \ 81 .cap_arr = (arr), \ 82 .arr_size = ARRAY_SIZE(arr), \ 83 } 84 85 static const u32 qed_mfw_ext_1g[] __initconst = { 86 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 87 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 88 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 89 }; 90 91 static const u32 qed_mfw_ext_10g[] __initconst = { 92 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 93 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 95 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 96 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 97 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 99 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 100 }; 101 102 static const u32 qed_mfw_ext_25g[] __initconst = { 103 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 104 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 105 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 106 }; 107 108 static const u32 qed_mfw_ext_40g[] __initconst = { 109 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 110 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 111 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 112 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 113 }; 114 115 static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 116 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 117 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 118 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 119 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 120 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 121 }; 122 123 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 124 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 126 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 127 }; 128 129 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 130 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 131 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 132 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 133 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 134 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 135 }; 136 137 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 138 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 140 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 141 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 142 }; 143 144 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 145 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 146 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 147 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 148 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 149 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 150 qed_mfw_ext_50g_base_r), 151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 152 qed_mfw_ext_50g_base_r2), 153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 154 qed_mfw_ext_100g_base_r2), 155 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 156 qed_mfw_ext_100g_base_r4), 157 }; 158 159 static const u32 qed_mfw_legacy_1g[] __initconst = { 160 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 161 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 162 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 163 }; 164 165 static const u32 qed_mfw_legacy_10g[] __initconst = { 166 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 167 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 168 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 169 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 170 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 171 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 172 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 173 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 174 }; 175 176 static const u32 qed_mfw_legacy_20g[] __initconst = { 177 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 178 }; 179 180 static const u32 qed_mfw_legacy_25g[] __initconst = { 181 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 182 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 183 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 184 }; 185 186 static const u32 qed_mfw_legacy_40g[] __initconst = { 187 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 188 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 189 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 190 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 191 }; 192 193 static const u32 qed_mfw_legacy_50g[] __initconst = { 194 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 195 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 196 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 197 }; 198 199 static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 200 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 201 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 202 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 203 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 204 }; 205 206 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 207 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 208 qed_mfw_legacy_1g), 209 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 210 qed_mfw_legacy_10g), 211 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 212 qed_mfw_legacy_20g), 213 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 214 qed_mfw_legacy_25g), 215 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 216 qed_mfw_legacy_40g), 217 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 218 qed_mfw_legacy_50g), 219 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 220 qed_mfw_legacy_bb_100g), 221 }; 222 223 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 224 { 225 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 226 227 map->cap_arr = NULL; 228 map->arr_size = 0; 229 } 230 231 static void __init qed_mfw_speed_maps_init(void) 232 { 233 u32 i; 234 235 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 236 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 237 238 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 239 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 240 } 241 242 static int __init qed_init(void) 243 { 244 pr_info("%s", version); 245 246 qed_mfw_speed_maps_init(); 247 248 return 0; 249 } 250 module_init(qed_init); 251 252 static void __exit qed_exit(void) 253 { 254 /* To prevent marking this module as "permanent" */ 255 } 256 module_exit(qed_exit); 257 258 /* Check if the DMA controller on the machine can properly handle the DMA 259 * addressing required by the device. 260 */ 261 static int qed_set_coherency_mask(struct qed_dev *cdev) 262 { 263 struct device *dev = &cdev->pdev->dev; 264 265 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 266 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 267 DP_NOTICE(cdev, 268 "Can't request 64-bit consistent allocations\n"); 269 return -EIO; 270 } 271 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 272 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 273 return -EIO; 274 } 275 276 return 0; 277 } 278 279 static void qed_free_pci(struct qed_dev *cdev) 280 { 281 struct pci_dev *pdev = cdev->pdev; 282 283 pci_disable_pcie_error_reporting(pdev); 284 285 if (cdev->doorbells && cdev->db_size) 286 iounmap(cdev->doorbells); 287 if (cdev->regview) 288 iounmap(cdev->regview); 289 if (atomic_read(&pdev->enable_cnt) == 1) 290 pci_release_regions(pdev); 291 292 pci_disable_device(pdev); 293 } 294 295 #define PCI_REVISION_ID_ERROR_VAL 0xff 296 297 /* Performs PCI initializations as well as initializing PCI-related parameters 298 * in the device structrue. Returns 0 in case of success. 299 */ 300 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 301 { 302 u8 rev_id; 303 int rc; 304 305 cdev->pdev = pdev; 306 307 rc = pci_enable_device(pdev); 308 if (rc) { 309 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 310 goto err0; 311 } 312 313 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 314 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 315 rc = -EIO; 316 goto err1; 317 } 318 319 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 320 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 321 rc = -EIO; 322 goto err1; 323 } 324 325 if (atomic_read(&pdev->enable_cnt) == 1) { 326 rc = pci_request_regions(pdev, "qed"); 327 if (rc) { 328 DP_NOTICE(cdev, 329 "Failed to request PCI memory resources\n"); 330 goto err1; 331 } 332 pci_set_master(pdev); 333 pci_save_state(pdev); 334 } 335 336 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 337 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 338 DP_NOTICE(cdev, 339 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 340 rev_id); 341 rc = -ENODEV; 342 goto err2; 343 } 344 if (!pci_is_pcie(pdev)) { 345 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 346 rc = -EIO; 347 goto err2; 348 } 349 350 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 351 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 352 DP_NOTICE(cdev, "Cannot find power management capability\n"); 353 354 rc = qed_set_coherency_mask(cdev); 355 if (rc) 356 goto err2; 357 358 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 359 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 360 cdev->pci_params.irq = pdev->irq; 361 362 cdev->regview = pci_ioremap_bar(pdev, 0); 363 if (!cdev->regview) { 364 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 365 rc = -ENOMEM; 366 goto err2; 367 } 368 369 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 370 cdev->db_size = pci_resource_len(cdev->pdev, 2); 371 if (!cdev->db_size) { 372 if (IS_PF(cdev)) { 373 DP_NOTICE(cdev, "No Doorbell bar available\n"); 374 return -EINVAL; 375 } else { 376 return 0; 377 } 378 } 379 380 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 381 382 if (!cdev->doorbells) { 383 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 384 return -ENOMEM; 385 } 386 387 /* AER (Advanced Error reporting) configuration */ 388 rc = pci_enable_pcie_error_reporting(pdev); 389 if (rc) 390 DP_VERBOSE(cdev, NETIF_MSG_DRV, 391 "Failed to configure PCIe AER [%d]\n", rc); 392 393 return 0; 394 395 err2: 396 pci_release_regions(pdev); 397 err1: 398 pci_disable_device(pdev); 399 err0: 400 return rc; 401 } 402 403 int qed_fill_dev_info(struct qed_dev *cdev, 404 struct qed_dev_info *dev_info) 405 { 406 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 407 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 408 struct qed_tunnel_info *tun = &cdev->tunnel; 409 struct qed_ptt *ptt; 410 411 memset(dev_info, 0, sizeof(struct qed_dev_info)); 412 413 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 414 tun->vxlan.b_mode_enabled) 415 dev_info->vxlan_enable = true; 416 417 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 418 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 419 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 420 dev_info->gre_enable = true; 421 422 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 423 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 424 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 425 dev_info->geneve_enable = true; 426 427 dev_info->num_hwfns = cdev->num_hwfns; 428 dev_info->pci_mem_start = cdev->pci_params.mem_start; 429 dev_info->pci_mem_end = cdev->pci_params.mem_end; 430 dev_info->pci_irq = cdev->pci_params.irq; 431 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 432 dev_info->dev_type = cdev->type; 433 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 434 435 if (IS_PF(cdev)) { 436 dev_info->fw_major = FW_MAJOR_VERSION; 437 dev_info->fw_minor = FW_MINOR_VERSION; 438 dev_info->fw_rev = FW_REVISION_VERSION; 439 dev_info->fw_eng = FW_ENGINEERING_VERSION; 440 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 441 &cdev->mf_bits); 442 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) 443 dev_info->b_arfs_capable = true; 444 dev_info->tx_switching = true; 445 446 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 447 dev_info->wol_support = true; 448 449 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 450 451 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 452 } else { 453 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 454 &dev_info->fw_minor, &dev_info->fw_rev, 455 &dev_info->fw_eng); 456 } 457 458 if (IS_PF(cdev)) { 459 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 460 if (ptt) { 461 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 462 &dev_info->mfw_rev, NULL); 463 464 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 465 &dev_info->mbi_version); 466 467 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 468 &dev_info->flash_size); 469 470 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 471 } 472 } else { 473 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 474 &dev_info->mfw_rev, NULL); 475 } 476 477 dev_info->mtu = hw_info->mtu; 478 cdev->common_dev_info = *dev_info; 479 480 return 0; 481 } 482 483 static void qed_free_cdev(struct qed_dev *cdev) 484 { 485 kfree((void *)cdev); 486 } 487 488 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 489 { 490 struct qed_dev *cdev; 491 492 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 493 if (!cdev) 494 return cdev; 495 496 qed_init_struct(cdev); 497 498 return cdev; 499 } 500 501 /* Sets the requested power state */ 502 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 503 { 504 if (!cdev) 505 return -ENODEV; 506 507 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 508 return 0; 509 } 510 511 /* probing */ 512 static struct qed_dev *qed_probe(struct pci_dev *pdev, 513 struct qed_probe_params *params) 514 { 515 struct qed_dev *cdev; 516 int rc; 517 518 cdev = qed_alloc_cdev(pdev); 519 if (!cdev) 520 goto err0; 521 522 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 523 cdev->protocol = params->protocol; 524 525 if (params->is_vf) 526 cdev->b_is_vf = true; 527 528 qed_init_dp(cdev, params->dp_module, params->dp_level); 529 530 cdev->recov_in_prog = params->recov_in_prog; 531 532 rc = qed_init_pci(cdev, pdev); 533 if (rc) { 534 DP_ERR(cdev, "init pci failed\n"); 535 goto err1; 536 } 537 DP_INFO(cdev, "PCI init completed successfully\n"); 538 539 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 540 if (rc) { 541 DP_ERR(cdev, "hw prepare failed\n"); 542 goto err2; 543 } 544 545 DP_INFO(cdev, "%s completed successfully\n", __func__); 546 547 return cdev; 548 549 err2: 550 qed_free_pci(cdev); 551 err1: 552 qed_free_cdev(cdev); 553 err0: 554 return NULL; 555 } 556 557 static void qed_remove(struct qed_dev *cdev) 558 { 559 if (!cdev) 560 return; 561 562 qed_hw_remove(cdev); 563 564 qed_free_pci(cdev); 565 566 qed_set_power_state(cdev, PCI_D3hot); 567 568 qed_free_cdev(cdev); 569 } 570 571 static void qed_disable_msix(struct qed_dev *cdev) 572 { 573 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 574 pci_disable_msix(cdev->pdev); 575 kfree(cdev->int_params.msix_table); 576 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 577 pci_disable_msi(cdev->pdev); 578 } 579 580 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 581 } 582 583 static int qed_enable_msix(struct qed_dev *cdev, 584 struct qed_int_params *int_params) 585 { 586 int i, rc, cnt; 587 588 cnt = int_params->in.num_vectors; 589 590 for (i = 0; i < cnt; i++) 591 int_params->msix_table[i].entry = i; 592 593 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 594 int_params->in.min_msix_cnt, cnt); 595 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 596 (rc % cdev->num_hwfns)) { 597 pci_disable_msix(cdev->pdev); 598 599 /* If fastpath is initialized, we need at least one interrupt 600 * per hwfn [and the slow path interrupts]. New requested number 601 * should be a multiple of the number of hwfns. 602 */ 603 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 604 DP_NOTICE(cdev, 605 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 606 cnt, int_params->in.num_vectors); 607 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 608 cnt); 609 if (!rc) 610 rc = cnt; 611 } 612 613 /* For VFs, we should return with an error in case we didn't get the 614 * exact number of msix vectors as we requested. 615 * Not doing that will lead to a crash when starting queues for 616 * this VF. 617 */ 618 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { 619 /* MSI-x configuration was achieved */ 620 int_params->out.int_mode = QED_INT_MODE_MSIX; 621 int_params->out.num_vectors = rc; 622 rc = 0; 623 } else { 624 DP_NOTICE(cdev, 625 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 626 cnt, rc); 627 } 628 629 return rc; 630 } 631 632 /* This function outputs the int mode and the number of enabled msix vector */ 633 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 634 { 635 struct qed_int_params *int_params = &cdev->int_params; 636 struct msix_entry *tbl; 637 int rc = 0, cnt; 638 639 switch (int_params->in.int_mode) { 640 case QED_INT_MODE_MSIX: 641 /* Allocate MSIX table */ 642 cnt = int_params->in.num_vectors; 643 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 644 if (!int_params->msix_table) { 645 rc = -ENOMEM; 646 goto out; 647 } 648 649 /* Enable MSIX */ 650 rc = qed_enable_msix(cdev, int_params); 651 if (!rc) 652 goto out; 653 654 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 655 kfree(int_params->msix_table); 656 if (force_mode) 657 goto out; 658 fallthrough; 659 660 case QED_INT_MODE_MSI: 661 if (cdev->num_hwfns == 1) { 662 rc = pci_enable_msi(cdev->pdev); 663 if (!rc) { 664 int_params->out.int_mode = QED_INT_MODE_MSI; 665 goto out; 666 } 667 668 DP_NOTICE(cdev, "Failed to enable MSI\n"); 669 if (force_mode) 670 goto out; 671 } 672 fallthrough; 673 674 case QED_INT_MODE_INTA: 675 int_params->out.int_mode = QED_INT_MODE_INTA; 676 rc = 0; 677 goto out; 678 default: 679 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 680 int_params->in.int_mode); 681 rc = -EINVAL; 682 } 683 684 out: 685 if (!rc) 686 DP_INFO(cdev, "Using %s interrupts\n", 687 int_params->out.int_mode == QED_INT_MODE_INTA ? 688 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 689 "MSI" : "MSIX"); 690 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 691 692 return rc; 693 } 694 695 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 696 int index, void(*handler)(void *)) 697 { 698 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 699 int relative_idx = index / cdev->num_hwfns; 700 701 hwfn->simd_proto_handler[relative_idx].func = handler; 702 hwfn->simd_proto_handler[relative_idx].token = token; 703 } 704 705 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 706 { 707 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 708 int relative_idx = index / cdev->num_hwfns; 709 710 memset(&hwfn->simd_proto_handler[relative_idx], 0, 711 sizeof(struct qed_simd_fp_handler)); 712 } 713 714 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 715 { 716 tasklet_schedule((struct tasklet_struct *)tasklet); 717 return IRQ_HANDLED; 718 } 719 720 static irqreturn_t qed_single_int(int irq, void *dev_instance) 721 { 722 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 723 struct qed_hwfn *hwfn; 724 irqreturn_t rc = IRQ_NONE; 725 u64 status; 726 int i, j; 727 728 for (i = 0; i < cdev->num_hwfns; i++) { 729 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 730 731 if (!status) 732 continue; 733 734 hwfn = &cdev->hwfns[i]; 735 736 /* Slowpath interrupt */ 737 if (unlikely(status & 0x1)) { 738 tasklet_schedule(&hwfn->sp_dpc); 739 status &= ~0x1; 740 rc = IRQ_HANDLED; 741 } 742 743 /* Fastpath interrupts */ 744 for (j = 0; j < 64; j++) { 745 if ((0x2ULL << j) & status) { 746 struct qed_simd_fp_handler *p_handler = 747 &hwfn->simd_proto_handler[j]; 748 749 if (p_handler->func) 750 p_handler->func(p_handler->token); 751 else 752 DP_NOTICE(hwfn, 753 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 754 j, status); 755 756 status &= ~(0x2ULL << j); 757 rc = IRQ_HANDLED; 758 } 759 } 760 761 if (unlikely(status)) 762 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 763 "got an unknown interrupt status 0x%llx\n", 764 status); 765 } 766 767 return rc; 768 } 769 770 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 771 { 772 struct qed_dev *cdev = hwfn->cdev; 773 u32 int_mode; 774 int rc = 0; 775 u8 id; 776 777 int_mode = cdev->int_params.out.int_mode; 778 if (int_mode == QED_INT_MODE_MSIX) { 779 id = hwfn->my_id; 780 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 781 id, cdev->pdev->bus->number, 782 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 783 rc = request_irq(cdev->int_params.msix_table[id].vector, 784 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); 785 } else { 786 unsigned long flags = 0; 787 788 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 789 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 790 PCI_FUNC(cdev->pdev->devfn)); 791 792 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 793 flags |= IRQF_SHARED; 794 795 rc = request_irq(cdev->pdev->irq, qed_single_int, 796 flags, cdev->name, cdev); 797 } 798 799 if (rc) 800 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 801 else 802 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 803 "Requested slowpath %s\n", 804 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 805 806 return rc; 807 } 808 809 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 810 { 811 /* Calling the disable function will make sure that any 812 * currently-running function is completed. The following call to the 813 * enable function makes this sequence a flush-like operation. 814 */ 815 if (p_hwfn->b_sp_dpc_enabled) { 816 tasklet_disable(&p_hwfn->sp_dpc); 817 tasklet_enable(&p_hwfn->sp_dpc); 818 } 819 } 820 821 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 822 { 823 struct qed_dev *cdev = p_hwfn->cdev; 824 u8 id = p_hwfn->my_id; 825 u32 int_mode; 826 827 int_mode = cdev->int_params.out.int_mode; 828 if (int_mode == QED_INT_MODE_MSIX) 829 synchronize_irq(cdev->int_params.msix_table[id].vector); 830 else 831 synchronize_irq(cdev->pdev->irq); 832 833 qed_slowpath_tasklet_flush(p_hwfn); 834 } 835 836 static void qed_slowpath_irq_free(struct qed_dev *cdev) 837 { 838 int i; 839 840 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 841 for_each_hwfn(cdev, i) { 842 if (!cdev->hwfns[i].b_int_requested) 843 break; 844 synchronize_irq(cdev->int_params.msix_table[i].vector); 845 free_irq(cdev->int_params.msix_table[i].vector, 846 &cdev->hwfns[i].sp_dpc); 847 } 848 } else { 849 if (QED_LEADING_HWFN(cdev)->b_int_requested) 850 free_irq(cdev->pdev->irq, cdev); 851 } 852 qed_int_disable_post_isr_release(cdev); 853 } 854 855 static int qed_nic_stop(struct qed_dev *cdev) 856 { 857 int i, rc; 858 859 rc = qed_hw_stop(cdev); 860 861 for (i = 0; i < cdev->num_hwfns; i++) { 862 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 863 864 if (p_hwfn->b_sp_dpc_enabled) { 865 tasklet_disable(&p_hwfn->sp_dpc); 866 p_hwfn->b_sp_dpc_enabled = false; 867 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 868 "Disabled sp tasklet [hwfn %d] at %p\n", 869 i, &p_hwfn->sp_dpc); 870 } 871 } 872 873 qed_dbg_pf_exit(cdev); 874 875 return rc; 876 } 877 878 static int qed_nic_setup(struct qed_dev *cdev) 879 { 880 int rc, i; 881 882 /* Determine if interface is going to require LL2 */ 883 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 884 for (i = 0; i < cdev->num_hwfns; i++) { 885 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 886 887 p_hwfn->using_ll2 = true; 888 } 889 } 890 891 rc = qed_resc_alloc(cdev); 892 if (rc) 893 return rc; 894 895 DP_INFO(cdev, "Allocated qed resources\n"); 896 897 qed_resc_setup(cdev); 898 899 return rc; 900 } 901 902 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 903 { 904 int limit = 0; 905 906 /* Mark the fastpath as free/used */ 907 cdev->int_params.fp_initialized = cnt ? true : false; 908 909 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 910 limit = cdev->num_hwfns * 63; 911 else if (cdev->int_params.fp_msix_cnt) 912 limit = cdev->int_params.fp_msix_cnt; 913 914 if (!limit) 915 return -ENOMEM; 916 917 return min_t(int, cnt, limit); 918 } 919 920 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 921 { 922 memset(info, 0, sizeof(struct qed_int_info)); 923 924 if (!cdev->int_params.fp_initialized) { 925 DP_INFO(cdev, 926 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 927 return -EINVAL; 928 } 929 930 /* Need to expose only MSI-X information; Single IRQ is handled solely 931 * by qed. 932 */ 933 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 934 int msix_base = cdev->int_params.fp_msix_base; 935 936 info->msix_cnt = cdev->int_params.fp_msix_cnt; 937 info->msix = &cdev->int_params.msix_table[msix_base]; 938 } 939 940 return 0; 941 } 942 943 static int qed_slowpath_setup_int(struct qed_dev *cdev, 944 enum qed_int_mode int_mode) 945 { 946 struct qed_sb_cnt_info sb_cnt_info; 947 int num_l2_queues = 0; 948 int rc; 949 int i; 950 951 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 952 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 953 return -EINVAL; 954 } 955 956 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 957 cdev->int_params.in.int_mode = int_mode; 958 for_each_hwfn(cdev, i) { 959 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 960 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 961 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 962 cdev->int_params.in.num_vectors++; /* slowpath */ 963 } 964 965 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 966 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 967 968 if (is_kdump_kernel()) { 969 DP_INFO(cdev, 970 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 971 cdev->int_params.in.min_msix_cnt); 972 cdev->int_params.in.num_vectors = 973 cdev->int_params.in.min_msix_cnt; 974 } 975 976 rc = qed_set_int_mode(cdev, false); 977 if (rc) { 978 DP_ERR(cdev, "%s ERR\n", __func__); 979 return rc; 980 } 981 982 cdev->int_params.fp_msix_base = cdev->num_hwfns; 983 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 984 cdev->num_hwfns; 985 986 if (!IS_ENABLED(CONFIG_QED_RDMA) || 987 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 988 return 0; 989 990 for_each_hwfn(cdev, i) 991 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 992 993 DP_VERBOSE(cdev, QED_MSG_RDMA, 994 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 995 cdev->int_params.fp_msix_cnt, num_l2_queues); 996 997 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 998 cdev->int_params.rdma_msix_cnt = 999 (cdev->int_params.fp_msix_cnt - num_l2_queues) 1000 / cdev->num_hwfns; 1001 cdev->int_params.rdma_msix_base = 1002 cdev->int_params.fp_msix_base + num_l2_queues; 1003 cdev->int_params.fp_msix_cnt = num_l2_queues; 1004 } else { 1005 cdev->int_params.rdma_msix_cnt = 0; 1006 } 1007 1008 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 1009 cdev->int_params.rdma_msix_cnt, 1010 cdev->int_params.rdma_msix_base); 1011 1012 return 0; 1013 } 1014 1015 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 1016 { 1017 int rc; 1018 1019 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 1020 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 1021 1022 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 1023 &cdev->int_params.in.num_vectors); 1024 if (cdev->num_hwfns > 1) { 1025 u8 vectors = 0; 1026 1027 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 1028 cdev->int_params.in.num_vectors += vectors; 1029 } 1030 1031 /* We want a minimum of one fastpath vector per vf hwfn */ 1032 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1033 1034 rc = qed_set_int_mode(cdev, true); 1035 if (rc) 1036 return rc; 1037 1038 cdev->int_params.fp_msix_base = 0; 1039 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1040 1041 return 0; 1042 } 1043 1044 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1045 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1046 { 1047 int rc; 1048 1049 p_hwfn->stream->next_in = input_buf; 1050 p_hwfn->stream->avail_in = input_len; 1051 p_hwfn->stream->next_out = unzip_buf; 1052 p_hwfn->stream->avail_out = max_size; 1053 1054 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1055 1056 if (rc != Z_OK) { 1057 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1058 rc); 1059 return 0; 1060 } 1061 1062 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1063 zlib_inflateEnd(p_hwfn->stream); 1064 1065 if (rc != Z_OK && rc != Z_STREAM_END) { 1066 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1067 p_hwfn->stream->msg, rc); 1068 return 0; 1069 } 1070 1071 return p_hwfn->stream->total_out / 4; 1072 } 1073 1074 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1075 { 1076 int i; 1077 void *workspace; 1078 1079 for_each_hwfn(cdev, i) { 1080 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1081 1082 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1083 if (!p_hwfn->stream) 1084 return -ENOMEM; 1085 1086 workspace = vzalloc(zlib_inflate_workspacesize()); 1087 if (!workspace) 1088 return -ENOMEM; 1089 p_hwfn->stream->workspace = workspace; 1090 } 1091 1092 return 0; 1093 } 1094 1095 static void qed_free_stream_mem(struct qed_dev *cdev) 1096 { 1097 int i; 1098 1099 for_each_hwfn(cdev, i) { 1100 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1101 1102 if (!p_hwfn->stream) 1103 return; 1104 1105 vfree(p_hwfn->stream->workspace); 1106 kfree(p_hwfn->stream); 1107 } 1108 } 1109 1110 static void qed_update_pf_params(struct qed_dev *cdev, 1111 struct qed_pf_params *params) 1112 { 1113 int i; 1114 1115 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1116 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1117 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1118 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1119 /* divide by 3 the MRs to avoid MF ILT overflow */ 1120 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1121 } 1122 1123 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1124 params->eth_pf_params.num_arfs_filters = 0; 1125 1126 /* In case we might support RDMA, don't allow qede to be greedy 1127 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1128 * per hwfn. 1129 */ 1130 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1131 u16 *num_cons; 1132 1133 num_cons = ¶ms->eth_pf_params.num_cons; 1134 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1135 } 1136 1137 for (i = 0; i < cdev->num_hwfns; i++) { 1138 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1139 1140 p_hwfn->pf_params = *params; 1141 } 1142 } 1143 1144 #define QED_PERIODIC_DB_REC_COUNT 10 1145 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1146 #define QED_PERIODIC_DB_REC_INTERVAL \ 1147 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1148 1149 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1150 enum qed_slowpath_wq_flag wq_flag, 1151 unsigned long delay) 1152 { 1153 if (!hwfn->slowpath_wq_active) 1154 return -EINVAL; 1155 1156 /* Memory barrier for setting atomic bit */ 1157 smp_mb__before_atomic(); 1158 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1159 /* Memory barrier after setting atomic bit */ 1160 smp_mb__after_atomic(); 1161 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1162 1163 return 0; 1164 } 1165 1166 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1167 { 1168 /* Reset periodic Doorbell Recovery counter */ 1169 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1170 1171 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1172 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1173 &p_hwfn->slowpath_task_flags)) 1174 return; 1175 1176 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1177 QED_PERIODIC_DB_REC_INTERVAL); 1178 } 1179 1180 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1181 { 1182 int i; 1183 1184 if (IS_VF(cdev)) 1185 return; 1186 1187 for_each_hwfn(cdev, i) { 1188 if (!cdev->hwfns[i].slowpath_wq) 1189 continue; 1190 1191 /* Stop queuing new delayed works */ 1192 cdev->hwfns[i].slowpath_wq_active = false; 1193 1194 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1195 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1196 } 1197 } 1198 1199 static void qed_slowpath_task(struct work_struct *work) 1200 { 1201 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1202 slowpath_task.work); 1203 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1204 1205 if (!ptt) { 1206 if (hwfn->slowpath_wq_active) 1207 queue_delayed_work(hwfn->slowpath_wq, 1208 &hwfn->slowpath_task, 0); 1209 1210 return; 1211 } 1212 1213 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1214 &hwfn->slowpath_task_flags)) 1215 qed_mfw_process_tlv_req(hwfn, ptt); 1216 1217 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1218 &hwfn->slowpath_task_flags)) { 1219 /* skip qed_db_rec_handler during recovery/unload */ 1220 if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) 1221 goto out; 1222 1223 qed_db_rec_handler(hwfn, ptt); 1224 if (hwfn->periodic_db_rec_count--) 1225 qed_slowpath_delayed_work(hwfn, 1226 QED_SLOWPATH_PERIODIC_DB_REC, 1227 QED_PERIODIC_DB_REC_INTERVAL); 1228 } 1229 1230 out: 1231 qed_ptt_release(hwfn, ptt); 1232 } 1233 1234 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1235 { 1236 struct qed_hwfn *hwfn; 1237 char name[NAME_SIZE]; 1238 int i; 1239 1240 if (IS_VF(cdev)) 1241 return 0; 1242 1243 for_each_hwfn(cdev, i) { 1244 hwfn = &cdev->hwfns[i]; 1245 1246 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1247 cdev->pdev->bus->number, 1248 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1249 1250 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1251 if (!hwfn->slowpath_wq) { 1252 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1253 return -ENOMEM; 1254 } 1255 1256 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1257 hwfn->slowpath_wq_active = true; 1258 } 1259 1260 return 0; 1261 } 1262 1263 static int qed_slowpath_start(struct qed_dev *cdev, 1264 struct qed_slowpath_params *params) 1265 { 1266 struct qed_drv_load_params drv_load_params; 1267 struct qed_hw_init_params hw_init_params; 1268 struct qed_mcp_drv_version drv_version; 1269 struct qed_tunnel_info tunn_info; 1270 const u8 *data = NULL; 1271 struct qed_hwfn *hwfn; 1272 struct qed_ptt *p_ptt; 1273 int rc = -EINVAL; 1274 1275 if (qed_iov_wq_start(cdev)) 1276 goto err; 1277 1278 if (qed_slowpath_wq_start(cdev)) 1279 goto err; 1280 1281 if (IS_PF(cdev)) { 1282 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1283 &cdev->pdev->dev); 1284 if (rc) { 1285 DP_NOTICE(cdev, 1286 "Failed to find fw file - /lib/firmware/%s\n", 1287 QED_FW_FILE_NAME); 1288 goto err; 1289 } 1290 1291 if (cdev->num_hwfns == 1) { 1292 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1293 if (p_ptt) { 1294 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1295 } else { 1296 DP_NOTICE(cdev, 1297 "Failed to acquire PTT for aRFS\n"); 1298 goto err; 1299 } 1300 } 1301 } 1302 1303 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1304 rc = qed_nic_setup(cdev); 1305 if (rc) 1306 goto err; 1307 1308 if (IS_PF(cdev)) 1309 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1310 else 1311 rc = qed_slowpath_vf_setup_int(cdev); 1312 if (rc) 1313 goto err1; 1314 1315 if (IS_PF(cdev)) { 1316 /* Allocate stream for unzipping */ 1317 rc = qed_alloc_stream_mem(cdev); 1318 if (rc) 1319 goto err2; 1320 1321 /* First Dword used to differentiate between various sources */ 1322 data = cdev->firmware->data + sizeof(u32); 1323 1324 qed_dbg_pf_init(cdev); 1325 } 1326 1327 /* Start the slowpath */ 1328 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1329 memset(&tunn_info, 0, sizeof(tunn_info)); 1330 tunn_info.vxlan.b_mode_enabled = true; 1331 tunn_info.l2_gre.b_mode_enabled = true; 1332 tunn_info.ip_gre.b_mode_enabled = true; 1333 tunn_info.l2_geneve.b_mode_enabled = true; 1334 tunn_info.ip_geneve.b_mode_enabled = true; 1335 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1336 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1337 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1338 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1339 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1340 hw_init_params.p_tunn = &tunn_info; 1341 hw_init_params.b_hw_start = true; 1342 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1343 hw_init_params.allow_npar_tx_switch = true; 1344 hw_init_params.bin_fw_data = data; 1345 1346 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1347 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1348 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1349 drv_load_params.avoid_eng_reset = false; 1350 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1351 hw_init_params.p_drv_load_params = &drv_load_params; 1352 1353 rc = qed_hw_init(cdev, &hw_init_params); 1354 if (rc) 1355 goto err2; 1356 1357 DP_INFO(cdev, 1358 "HW initialization and function start completed successfully\n"); 1359 1360 if (IS_PF(cdev)) { 1361 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1362 BIT(QED_MODE_L2GENEVE_TUNN) | 1363 BIT(QED_MODE_IPGENEVE_TUNN) | 1364 BIT(QED_MODE_L2GRE_TUNN) | 1365 BIT(QED_MODE_IPGRE_TUNN)); 1366 } 1367 1368 /* Allocate LL2 interface if needed */ 1369 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1370 rc = qed_ll2_alloc_if(cdev); 1371 if (rc) 1372 goto err3; 1373 } 1374 if (IS_PF(cdev)) { 1375 hwfn = QED_LEADING_HWFN(cdev); 1376 drv_version.version = (params->drv_major << 24) | 1377 (params->drv_minor << 16) | 1378 (params->drv_rev << 8) | 1379 (params->drv_eng); 1380 strscpy(drv_version.name, params->name, 1381 MCP_DRV_VER_STR_SIZE - 4); 1382 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1383 &drv_version); 1384 if (rc) { 1385 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1386 goto err4; 1387 } 1388 } 1389 1390 qed_reset_vport_stats(cdev); 1391 1392 return 0; 1393 1394 err4: 1395 qed_ll2_dealloc_if(cdev); 1396 err3: 1397 qed_hw_stop(cdev); 1398 err2: 1399 qed_hw_timers_stop_all(cdev); 1400 if (IS_PF(cdev)) 1401 qed_slowpath_irq_free(cdev); 1402 qed_free_stream_mem(cdev); 1403 qed_disable_msix(cdev); 1404 err1: 1405 qed_resc_free(cdev); 1406 err: 1407 if (IS_PF(cdev)) 1408 release_firmware(cdev->firmware); 1409 1410 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1411 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1412 qed_ptt_release(QED_LEADING_HWFN(cdev), 1413 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1414 1415 qed_iov_wq_stop(cdev, false); 1416 1417 qed_slowpath_wq_stop(cdev); 1418 1419 return rc; 1420 } 1421 1422 static int qed_slowpath_stop(struct qed_dev *cdev) 1423 { 1424 if (!cdev) 1425 return -ENODEV; 1426 1427 qed_slowpath_wq_stop(cdev); 1428 1429 qed_ll2_dealloc_if(cdev); 1430 1431 if (IS_PF(cdev)) { 1432 if (cdev->num_hwfns == 1) 1433 qed_ptt_release(QED_LEADING_HWFN(cdev), 1434 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1435 qed_free_stream_mem(cdev); 1436 if (IS_QED_ETH_IF(cdev)) 1437 qed_sriov_disable(cdev, true); 1438 } 1439 1440 qed_nic_stop(cdev); 1441 1442 if (IS_PF(cdev)) 1443 qed_slowpath_irq_free(cdev); 1444 1445 qed_disable_msix(cdev); 1446 1447 qed_resc_free(cdev); 1448 1449 qed_iov_wq_stop(cdev, true); 1450 1451 if (IS_PF(cdev)) 1452 release_firmware(cdev->firmware); 1453 1454 return 0; 1455 } 1456 1457 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1458 { 1459 int i; 1460 1461 memcpy(cdev->name, name, NAME_SIZE); 1462 for_each_hwfn(cdev, i) 1463 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1464 } 1465 1466 static u32 qed_sb_init(struct qed_dev *cdev, 1467 struct qed_sb_info *sb_info, 1468 void *sb_virt_addr, 1469 dma_addr_t sb_phy_addr, u16 sb_id, 1470 enum qed_sb_type type) 1471 { 1472 struct qed_hwfn *p_hwfn; 1473 struct qed_ptt *p_ptt; 1474 u16 rel_sb_id; 1475 u32 rc; 1476 1477 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1478 if (type == QED_SB_TYPE_L2_QUEUE) { 1479 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1480 rel_sb_id = sb_id / cdev->num_hwfns; 1481 } else { 1482 p_hwfn = QED_AFFIN_HWFN(cdev); 1483 rel_sb_id = sb_id; 1484 } 1485 1486 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1487 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1488 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1489 1490 if (IS_PF(p_hwfn->cdev)) { 1491 p_ptt = qed_ptt_acquire(p_hwfn); 1492 if (!p_ptt) 1493 return -EBUSY; 1494 1495 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1496 sb_phy_addr, rel_sb_id); 1497 qed_ptt_release(p_hwfn, p_ptt); 1498 } else { 1499 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1500 sb_phy_addr, rel_sb_id); 1501 } 1502 1503 return rc; 1504 } 1505 1506 static u32 qed_sb_release(struct qed_dev *cdev, 1507 struct qed_sb_info *sb_info, 1508 u16 sb_id, 1509 enum qed_sb_type type) 1510 { 1511 struct qed_hwfn *p_hwfn; 1512 u16 rel_sb_id; 1513 u32 rc; 1514 1515 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1516 if (type == QED_SB_TYPE_L2_QUEUE) { 1517 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1518 rel_sb_id = sb_id / cdev->num_hwfns; 1519 } else { 1520 p_hwfn = QED_AFFIN_HWFN(cdev); 1521 rel_sb_id = sb_id; 1522 } 1523 1524 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1525 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1526 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1527 1528 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1529 1530 return rc; 1531 } 1532 1533 static bool qed_can_link_change(struct qed_dev *cdev) 1534 { 1535 return true; 1536 } 1537 1538 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1539 const struct qed_link_params *params) 1540 { 1541 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1542 const struct qed_mfw_speed_map *map; 1543 u32 i; 1544 1545 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1546 ext_speed->autoneg = !!params->autoneg; 1547 1548 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1549 ext_speed->advertised_speeds = 0; 1550 1551 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1552 map = qed_mfw_ext_maps + i; 1553 1554 if (linkmode_intersects(params->adv_speeds, map->caps)) 1555 ext_speed->advertised_speeds |= map->mfw_val; 1556 } 1557 } 1558 1559 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1560 switch (params->forced_speed) { 1561 case SPEED_1000: 1562 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1563 break; 1564 case SPEED_10000: 1565 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1566 break; 1567 case SPEED_20000: 1568 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1569 break; 1570 case SPEED_25000: 1571 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1572 break; 1573 case SPEED_40000: 1574 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1575 break; 1576 case SPEED_50000: 1577 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1578 QED_EXT_SPEED_50G_R2; 1579 break; 1580 case SPEED_100000: 1581 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1582 QED_EXT_SPEED_100G_R4 | 1583 QED_EXT_SPEED_100G_P4; 1584 break; 1585 default: 1586 break; 1587 } 1588 } 1589 1590 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1591 return; 1592 1593 switch (params->forced_speed) { 1594 case SPEED_25000: 1595 switch (params->fec) { 1596 case FEC_FORCE_MODE_NONE: 1597 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1598 break; 1599 case FEC_FORCE_MODE_FIRECODE: 1600 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1601 break; 1602 case FEC_FORCE_MODE_RS: 1603 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1604 break; 1605 case FEC_FORCE_MODE_AUTO: 1606 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1607 ETH_EXT_FEC_25G_BASE_R | 1608 ETH_EXT_FEC_25G_NONE; 1609 break; 1610 default: 1611 break; 1612 } 1613 1614 break; 1615 case SPEED_40000: 1616 switch (params->fec) { 1617 case FEC_FORCE_MODE_NONE: 1618 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1619 break; 1620 case FEC_FORCE_MODE_FIRECODE: 1621 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1622 break; 1623 case FEC_FORCE_MODE_AUTO: 1624 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1625 ETH_EXT_FEC_40G_NONE; 1626 break; 1627 default: 1628 break; 1629 } 1630 1631 break; 1632 case SPEED_50000: 1633 switch (params->fec) { 1634 case FEC_FORCE_MODE_NONE: 1635 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1636 break; 1637 case FEC_FORCE_MODE_FIRECODE: 1638 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1639 break; 1640 case FEC_FORCE_MODE_RS: 1641 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1642 break; 1643 case FEC_FORCE_MODE_AUTO: 1644 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1645 ETH_EXT_FEC_50G_BASE_R | 1646 ETH_EXT_FEC_50G_NONE; 1647 break; 1648 default: 1649 break; 1650 } 1651 1652 break; 1653 case SPEED_100000: 1654 switch (params->fec) { 1655 case FEC_FORCE_MODE_NONE: 1656 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1657 break; 1658 case FEC_FORCE_MODE_FIRECODE: 1659 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1660 break; 1661 case FEC_FORCE_MODE_RS: 1662 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1663 break; 1664 case FEC_FORCE_MODE_AUTO: 1665 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1666 ETH_EXT_FEC_100G_BASE_R | 1667 ETH_EXT_FEC_100G_NONE; 1668 break; 1669 default: 1670 break; 1671 } 1672 1673 break; 1674 default: 1675 break; 1676 } 1677 } 1678 1679 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1680 { 1681 struct qed_mcp_link_params *link_params; 1682 struct qed_mcp_link_speed_params *speed; 1683 const struct qed_mfw_speed_map *map; 1684 struct qed_hwfn *hwfn; 1685 struct qed_ptt *ptt; 1686 int rc; 1687 u32 i; 1688 1689 if (!cdev) 1690 return -ENODEV; 1691 1692 /* The link should be set only once per PF */ 1693 hwfn = &cdev->hwfns[0]; 1694 1695 /* When VF wants to set link, force it to read the bulletin instead. 1696 * This mimics the PF behavior, where a noitification [both immediate 1697 * and possible later] would be generated when changing properties. 1698 */ 1699 if (IS_VF(cdev)) { 1700 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1701 return 0; 1702 } 1703 1704 ptt = qed_ptt_acquire(hwfn); 1705 if (!ptt) 1706 return -EBUSY; 1707 1708 link_params = qed_mcp_get_link_params(hwfn); 1709 if (!link_params) 1710 return -ENODATA; 1711 1712 speed = &link_params->speed; 1713 1714 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1715 speed->autoneg = !!params->autoneg; 1716 1717 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1718 speed->advertised_speeds = 0; 1719 1720 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1721 map = qed_mfw_legacy_maps + i; 1722 1723 if (linkmode_intersects(params->adv_speeds, map->caps)) 1724 speed->advertised_speeds |= map->mfw_val; 1725 } 1726 } 1727 1728 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1729 speed->forced_speed = params->forced_speed; 1730 1731 if (qed_mcp_is_ext_speed_supported(hwfn)) 1732 qed_set_ext_speed_params(link_params, params); 1733 1734 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1735 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1736 link_params->pause.autoneg = true; 1737 else 1738 link_params->pause.autoneg = false; 1739 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1740 link_params->pause.forced_rx = true; 1741 else 1742 link_params->pause.forced_rx = false; 1743 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1744 link_params->pause.forced_tx = true; 1745 else 1746 link_params->pause.forced_tx = false; 1747 } 1748 1749 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1750 switch (params->loopback_mode) { 1751 case QED_LINK_LOOPBACK_INT_PHY: 1752 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1753 break; 1754 case QED_LINK_LOOPBACK_EXT_PHY: 1755 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1756 break; 1757 case QED_LINK_LOOPBACK_EXT: 1758 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1759 break; 1760 case QED_LINK_LOOPBACK_MAC: 1761 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1762 break; 1763 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1764 link_params->loopback_mode = 1765 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1766 break; 1767 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1768 link_params->loopback_mode = 1769 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1770 break; 1771 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1772 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1773 break; 1774 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1775 link_params->loopback_mode = 1776 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1777 break; 1778 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1779 link_params->loopback_mode = 1780 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1781 break; 1782 default: 1783 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1784 break; 1785 } 1786 } 1787 1788 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1789 memcpy(&link_params->eee, ¶ms->eee, 1790 sizeof(link_params->eee)); 1791 1792 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1793 link_params->fec = params->fec; 1794 1795 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1796 1797 qed_ptt_release(hwfn, ptt); 1798 1799 return rc; 1800 } 1801 1802 static int qed_get_port_type(u32 media_type) 1803 { 1804 int port_type; 1805 1806 switch (media_type) { 1807 case MEDIA_SFPP_10G_FIBER: 1808 case MEDIA_SFP_1G_FIBER: 1809 case MEDIA_XFP_FIBER: 1810 case MEDIA_MODULE_FIBER: 1811 port_type = PORT_FIBRE; 1812 break; 1813 case MEDIA_DA_TWINAX: 1814 port_type = PORT_DA; 1815 break; 1816 case MEDIA_BASE_T: 1817 port_type = PORT_TP; 1818 break; 1819 case MEDIA_KR: 1820 case MEDIA_NOT_PRESENT: 1821 port_type = PORT_NONE; 1822 break; 1823 case MEDIA_UNSPECIFIED: 1824 default: 1825 port_type = PORT_OTHER; 1826 break; 1827 } 1828 return port_type; 1829 } 1830 1831 static int qed_get_link_data(struct qed_hwfn *hwfn, 1832 struct qed_mcp_link_params *params, 1833 struct qed_mcp_link_state *link, 1834 struct qed_mcp_link_capabilities *link_caps) 1835 { 1836 void *p; 1837 1838 if (!IS_PF(hwfn->cdev)) { 1839 qed_vf_get_link_params(hwfn, params); 1840 qed_vf_get_link_state(hwfn, link); 1841 qed_vf_get_link_caps(hwfn, link_caps); 1842 1843 return 0; 1844 } 1845 1846 p = qed_mcp_get_link_params(hwfn); 1847 if (!p) 1848 return -ENXIO; 1849 memcpy(params, p, sizeof(*params)); 1850 1851 p = qed_mcp_get_link_state(hwfn); 1852 if (!p) 1853 return -ENXIO; 1854 memcpy(link, p, sizeof(*link)); 1855 1856 p = qed_mcp_get_link_capabilities(hwfn); 1857 if (!p) 1858 return -ENXIO; 1859 memcpy(link_caps, p, sizeof(*link_caps)); 1860 1861 return 0; 1862 } 1863 1864 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1865 struct qed_ptt *ptt, u32 capability, 1866 unsigned long *if_caps) 1867 { 1868 u32 media_type, tcvr_state, tcvr_type; 1869 u32 speed_mask, board_cfg; 1870 1871 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1872 media_type = MEDIA_UNSPECIFIED; 1873 1874 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1875 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1876 1877 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1878 speed_mask = 0xFFFFFFFF; 1879 1880 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1881 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1882 1883 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1884 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1885 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1886 1887 switch (media_type) { 1888 case MEDIA_DA_TWINAX: 1889 phylink_set(if_caps, FIBRE); 1890 1891 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1892 phylink_set(if_caps, 20000baseKR2_Full); 1893 1894 /* For DAC media multiple speed capabilities are supported */ 1895 capability |= speed_mask; 1896 1897 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1898 phylink_set(if_caps, 1000baseKX_Full); 1899 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1900 phylink_set(if_caps, 10000baseCR_Full); 1901 1902 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1903 switch (tcvr_type) { 1904 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1905 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1906 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1907 phylink_set(if_caps, 40000baseCR4_Full); 1908 break; 1909 default: 1910 break; 1911 } 1912 1913 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1914 phylink_set(if_caps, 25000baseCR_Full); 1915 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1916 phylink_set(if_caps, 50000baseCR2_Full); 1917 1918 if (capability & 1919 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1920 switch (tcvr_type) { 1921 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1922 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1923 phylink_set(if_caps, 100000baseCR4_Full); 1924 break; 1925 default: 1926 break; 1927 } 1928 1929 break; 1930 case MEDIA_BASE_T: 1931 phylink_set(if_caps, TP); 1932 1933 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1934 if (capability & 1935 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1936 phylink_set(if_caps, 1000baseT_Full); 1937 if (capability & 1938 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1939 phylink_set(if_caps, 10000baseT_Full); 1940 } 1941 1942 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1943 phylink_set(if_caps, FIBRE); 1944 1945 switch (tcvr_type) { 1946 case ETH_TRANSCEIVER_TYPE_1000BASET: 1947 phylink_set(if_caps, 1000baseT_Full); 1948 break; 1949 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1950 phylink_set(if_caps, 10000baseT_Full); 1951 break; 1952 default: 1953 break; 1954 } 1955 } 1956 1957 break; 1958 case MEDIA_SFP_1G_FIBER: 1959 case MEDIA_SFPP_10G_FIBER: 1960 case MEDIA_XFP_FIBER: 1961 case MEDIA_MODULE_FIBER: 1962 phylink_set(if_caps, FIBRE); 1963 capability |= speed_mask; 1964 1965 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1966 switch (tcvr_type) { 1967 case ETH_TRANSCEIVER_TYPE_1G_LX: 1968 case ETH_TRANSCEIVER_TYPE_1G_SX: 1969 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1970 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1971 phylink_set(if_caps, 1000baseKX_Full); 1972 break; 1973 default: 1974 break; 1975 } 1976 1977 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1978 switch (tcvr_type) { 1979 case ETH_TRANSCEIVER_TYPE_10G_SR: 1980 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1981 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1982 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1983 phylink_set(if_caps, 10000baseSR_Full); 1984 break; 1985 case ETH_TRANSCEIVER_TYPE_10G_LR: 1986 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1987 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1988 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1989 phylink_set(if_caps, 10000baseLR_Full); 1990 break; 1991 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1992 phylink_set(if_caps, 10000baseLRM_Full); 1993 break; 1994 case ETH_TRANSCEIVER_TYPE_10G_ER: 1995 phylink_set(if_caps, 10000baseR_FEC); 1996 break; 1997 default: 1998 break; 1999 } 2000 2001 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2002 phylink_set(if_caps, 20000baseKR2_Full); 2003 2004 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2005 switch (tcvr_type) { 2006 case ETH_TRANSCEIVER_TYPE_25G_SR: 2007 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 2008 phylink_set(if_caps, 25000baseSR_Full); 2009 break; 2010 default: 2011 break; 2012 } 2013 2014 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2015 switch (tcvr_type) { 2016 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2017 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2018 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2019 phylink_set(if_caps, 40000baseLR4_Full); 2020 break; 2021 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2022 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2023 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2024 phylink_set(if_caps, 40000baseSR4_Full); 2025 break; 2026 default: 2027 break; 2028 } 2029 2030 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2031 phylink_set(if_caps, 50000baseKR2_Full); 2032 2033 if (capability & 2034 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2035 switch (tcvr_type) { 2036 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2037 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2038 phylink_set(if_caps, 100000baseSR4_Full); 2039 break; 2040 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2041 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2042 break; 2043 default: 2044 break; 2045 } 2046 2047 break; 2048 case MEDIA_KR: 2049 phylink_set(if_caps, Backplane); 2050 2051 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2052 phylink_set(if_caps, 20000baseKR2_Full); 2053 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2054 phylink_set(if_caps, 1000baseKX_Full); 2055 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2056 phylink_set(if_caps, 10000baseKR_Full); 2057 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2058 phylink_set(if_caps, 25000baseKR_Full); 2059 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2060 phylink_set(if_caps, 40000baseKR4_Full); 2061 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2062 phylink_set(if_caps, 50000baseKR2_Full); 2063 if (capability & 2064 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2065 phylink_set(if_caps, 100000baseKR4_Full); 2066 2067 break; 2068 case MEDIA_UNSPECIFIED: 2069 case MEDIA_NOT_PRESENT: 2070 default: 2071 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2072 "Unknown media and transceiver type;\n"); 2073 break; 2074 } 2075 } 2076 2077 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2078 { 2079 *speed_mask = 0; 2080 2081 if (caps & 2082 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2083 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2084 if (caps & QED_LINK_PARTNER_SPEED_10G) 2085 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2086 if (caps & QED_LINK_PARTNER_SPEED_20G) 2087 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2088 if (caps & QED_LINK_PARTNER_SPEED_25G) 2089 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2090 if (caps & QED_LINK_PARTNER_SPEED_40G) 2091 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2092 if (caps & QED_LINK_PARTNER_SPEED_50G) 2093 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2094 if (caps & QED_LINK_PARTNER_SPEED_100G) 2095 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2096 } 2097 2098 static void qed_fill_link(struct qed_hwfn *hwfn, 2099 struct qed_ptt *ptt, 2100 struct qed_link_output *if_link) 2101 { 2102 struct qed_mcp_link_capabilities link_caps; 2103 struct qed_mcp_link_params params; 2104 struct qed_mcp_link_state link; 2105 u32 media_type, speed_mask; 2106 2107 memset(if_link, 0, sizeof(*if_link)); 2108 2109 /* Prepare source inputs */ 2110 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2111 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2112 return; 2113 } 2114 2115 /* Set the link parameters to pass to protocol driver */ 2116 if (link.link_up) 2117 if_link->link_up = true; 2118 2119 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2120 if (link_caps.default_ext_autoneg) 2121 phylink_set(if_link->supported_caps, Autoneg); 2122 2123 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2124 2125 if (params.ext_speed.autoneg) 2126 phylink_set(if_link->advertised_caps, Autoneg); 2127 else 2128 phylink_clear(if_link->advertised_caps, Autoneg); 2129 2130 qed_fill_link_capability(hwfn, ptt, 2131 params.ext_speed.advertised_speeds, 2132 if_link->advertised_caps); 2133 } else { 2134 if (link_caps.default_speed_autoneg) 2135 phylink_set(if_link->supported_caps, Autoneg); 2136 2137 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2138 2139 if (params.speed.autoneg) 2140 phylink_set(if_link->advertised_caps, Autoneg); 2141 else 2142 phylink_clear(if_link->advertised_caps, Autoneg); 2143 } 2144 2145 if (params.pause.autoneg || 2146 (params.pause.forced_rx && params.pause.forced_tx)) 2147 phylink_set(if_link->supported_caps, Asym_Pause); 2148 if (params.pause.autoneg || params.pause.forced_rx || 2149 params.pause.forced_tx) 2150 phylink_set(if_link->supported_caps, Pause); 2151 2152 if_link->sup_fec = link_caps.fec_default; 2153 if_link->active_fec = params.fec; 2154 2155 /* Fill link advertised capability */ 2156 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2157 if_link->advertised_caps); 2158 2159 /* Fill link supported capability */ 2160 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2161 if_link->supported_caps); 2162 2163 /* Fill partner advertised capability */ 2164 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2165 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2166 2167 if (link.link_up) 2168 if_link->speed = link.speed; 2169 2170 /* TODO - fill duplex properly */ 2171 if_link->duplex = DUPLEX_FULL; 2172 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2173 if_link->port = qed_get_port_type(media_type); 2174 2175 if_link->autoneg = params.speed.autoneg; 2176 2177 if (params.pause.autoneg) 2178 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2179 if (params.pause.forced_rx) 2180 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2181 if (params.pause.forced_tx) 2182 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2183 2184 if (link.an_complete) 2185 phylink_set(if_link->lp_caps, Autoneg); 2186 if (link.partner_adv_pause) 2187 phylink_set(if_link->lp_caps, Pause); 2188 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2189 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2190 phylink_set(if_link->lp_caps, Asym_Pause); 2191 2192 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2193 if_link->eee_supported = false; 2194 } else { 2195 if_link->eee_supported = true; 2196 if_link->eee_active = link.eee_active; 2197 if_link->sup_caps = link_caps.eee_speed_caps; 2198 /* MFW clears adv_caps on eee disable; use configured value */ 2199 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2200 params.eee.adv_caps; 2201 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2202 if_link->eee.enable = params.eee.enable; 2203 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2204 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2205 } 2206 } 2207 2208 static void qed_get_current_link(struct qed_dev *cdev, 2209 struct qed_link_output *if_link) 2210 { 2211 struct qed_hwfn *hwfn; 2212 struct qed_ptt *ptt; 2213 int i; 2214 2215 hwfn = &cdev->hwfns[0]; 2216 if (IS_PF(cdev)) { 2217 ptt = qed_ptt_acquire(hwfn); 2218 if (ptt) { 2219 qed_fill_link(hwfn, ptt, if_link); 2220 qed_ptt_release(hwfn, ptt); 2221 } else { 2222 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2223 } 2224 } else { 2225 qed_fill_link(hwfn, NULL, if_link); 2226 } 2227 2228 for_each_hwfn(cdev, i) 2229 qed_inform_vf_link_state(&cdev->hwfns[i]); 2230 } 2231 2232 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2233 { 2234 void *cookie = hwfn->cdev->ops_cookie; 2235 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2236 struct qed_link_output if_link; 2237 2238 qed_fill_link(hwfn, ptt, &if_link); 2239 qed_inform_vf_link_state(hwfn); 2240 2241 if (IS_LEAD_HWFN(hwfn) && cookie) 2242 op->link_update(cookie, &if_link); 2243 } 2244 2245 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2246 { 2247 void *cookie = hwfn->cdev->ops_cookie; 2248 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2249 2250 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2251 op->bw_update(cookie); 2252 } 2253 2254 static int qed_drain(struct qed_dev *cdev) 2255 { 2256 struct qed_hwfn *hwfn; 2257 struct qed_ptt *ptt; 2258 int i, rc; 2259 2260 if (IS_VF(cdev)) 2261 return 0; 2262 2263 for_each_hwfn(cdev, i) { 2264 hwfn = &cdev->hwfns[i]; 2265 ptt = qed_ptt_acquire(hwfn); 2266 if (!ptt) { 2267 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2268 return -EBUSY; 2269 } 2270 rc = qed_mcp_drain(hwfn, ptt); 2271 qed_ptt_release(hwfn, ptt); 2272 if (rc) 2273 return rc; 2274 } 2275 2276 return 0; 2277 } 2278 2279 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2280 struct qed_nvm_image_att *nvm_image, 2281 u32 *crc) 2282 { 2283 u8 *buf = NULL; 2284 int rc; 2285 2286 /* Allocate a buffer for holding the nvram image */ 2287 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2288 if (!buf) 2289 return -ENOMEM; 2290 2291 /* Read image into buffer */ 2292 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2293 buf, nvm_image->length); 2294 if (rc) { 2295 DP_ERR(cdev, "Failed reading image from nvm\n"); 2296 goto out; 2297 } 2298 2299 /* Convert the buffer into big-endian format (excluding the 2300 * closing 4 bytes of CRC). 2301 */ 2302 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2303 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2304 2305 /* Calc CRC for the "actual" image buffer, i.e. not including 2306 * the last 4 CRC bytes. 2307 */ 2308 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2309 *crc = (__force u32)cpu_to_be32p(crc); 2310 2311 out: 2312 kfree(buf); 2313 2314 return rc; 2315 } 2316 2317 /* Binary file format - 2318 * /----------------------------------------------------------------------\ 2319 * 0B | 0x4 [command index] | 2320 * 4B | image_type | Options | Number of register settings | 2321 * 8B | Value | 2322 * 12B | Mask | 2323 * 16B | Offset | 2324 * \----------------------------------------------------------------------/ 2325 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2326 * Options - 0'b - Calculate & Update CRC for image 2327 */ 2328 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2329 bool *check_resp) 2330 { 2331 struct qed_nvm_image_att nvm_image; 2332 struct qed_hwfn *p_hwfn; 2333 bool is_crc = false; 2334 u32 image_type; 2335 int rc = 0, i; 2336 u16 len; 2337 2338 *data += 4; 2339 image_type = **data; 2340 p_hwfn = QED_LEADING_HWFN(cdev); 2341 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2342 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2343 break; 2344 if (i == p_hwfn->nvm_info.num_images) { 2345 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2346 image_type); 2347 return -ENOENT; 2348 } 2349 2350 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2351 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2352 2353 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2354 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2355 **data, image_type, nvm_image.start_addr, 2356 nvm_image.start_addr + nvm_image.length - 1); 2357 (*data)++; 2358 is_crc = !!(**data & BIT(0)); 2359 (*data)++; 2360 len = *((u16 *)*data); 2361 *data += 2; 2362 if (is_crc) { 2363 u32 crc = 0; 2364 2365 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2366 if (rc) { 2367 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2368 goto exit; 2369 } 2370 2371 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2372 (nvm_image.start_addr + 2373 nvm_image.length - 4), (u8 *)&crc, 4); 2374 if (rc) 2375 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2376 nvm_image.start_addr + nvm_image.length - 4, rc); 2377 goto exit; 2378 } 2379 2380 /* Iterate over the values for setting */ 2381 while (len) { 2382 u32 offset, mask, value, cur_value; 2383 u8 buf[4]; 2384 2385 value = *((u32 *)*data); 2386 *data += 4; 2387 mask = *((u32 *)*data); 2388 *data += 4; 2389 offset = *((u32 *)*data); 2390 *data += 4; 2391 2392 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2393 4); 2394 if (rc) { 2395 DP_ERR(cdev, "Failed reading from %08x\n", 2396 nvm_image.start_addr + offset); 2397 goto exit; 2398 } 2399 2400 cur_value = le32_to_cpu(*((__le32 *)buf)); 2401 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2402 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2403 nvm_image.start_addr + offset, cur_value, 2404 (cur_value & ~mask) | (value & mask), value, mask); 2405 value = (value & mask) | (cur_value & ~mask); 2406 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2407 nvm_image.start_addr + offset, 2408 (u8 *)&value, 4); 2409 if (rc) { 2410 DP_ERR(cdev, "Failed writing to %08x\n", 2411 nvm_image.start_addr + offset); 2412 goto exit; 2413 } 2414 2415 len--; 2416 } 2417 exit: 2418 return rc; 2419 } 2420 2421 /* Binary file format - 2422 * /----------------------------------------------------------------------\ 2423 * 0B | 0x3 [command index] | 2424 * 4B | b'0: check_response? | b'1-31 reserved | 2425 * 8B | File-type | reserved | 2426 * 12B | Image length in bytes | 2427 * \----------------------------------------------------------------------/ 2428 * Start a new file of the provided type 2429 */ 2430 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2431 const u8 **data, bool *check_resp) 2432 { 2433 u32 file_type, file_size = 0; 2434 int rc; 2435 2436 *data += 4; 2437 *check_resp = !!(**data & BIT(0)); 2438 *data += 4; 2439 file_type = **data; 2440 2441 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2442 "About to start a new file of type %02x\n", file_type); 2443 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2444 *data += 4; 2445 file_size = *((u32 *)(*data)); 2446 } 2447 2448 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2449 (u8 *)(&file_size), 4); 2450 *data += 4; 2451 2452 return rc; 2453 } 2454 2455 /* Binary file format - 2456 * /----------------------------------------------------------------------\ 2457 * 0B | 0x2 [command index] | 2458 * 4B | Length in bytes | 2459 * 8B | b'0: check_response? | b'1-31 reserved | 2460 * 12B | Offset in bytes | 2461 * 16B | Data ... | 2462 * \----------------------------------------------------------------------/ 2463 * Write data as part of a file that was previously started. Data should be 2464 * of length equal to that provided in the message 2465 */ 2466 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2467 const u8 **data, bool *check_resp) 2468 { 2469 u32 offset, len; 2470 int rc; 2471 2472 *data += 4; 2473 len = *((u32 *)(*data)); 2474 *data += 4; 2475 *check_resp = !!(**data & BIT(0)); 2476 *data += 4; 2477 offset = *((u32 *)(*data)); 2478 *data += 4; 2479 2480 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2481 "About to write File-data: %08x bytes to offset %08x\n", 2482 len, offset); 2483 2484 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2485 (char *)(*data), len); 2486 *data += len; 2487 2488 return rc; 2489 } 2490 2491 /* Binary file format [General header] - 2492 * /----------------------------------------------------------------------\ 2493 * 0B | QED_NVM_SIGNATURE | 2494 * 4B | Length in bytes | 2495 * 8B | Highest command in this batchfile | Reserved | 2496 * \----------------------------------------------------------------------/ 2497 */ 2498 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2499 const struct firmware *image, 2500 const u8 **data) 2501 { 2502 u32 signature, len; 2503 2504 /* Check minimum size */ 2505 if (image->size < 12) { 2506 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2507 return -EINVAL; 2508 } 2509 2510 /* Check signature */ 2511 signature = *((u32 *)(*data)); 2512 if (signature != QED_NVM_SIGNATURE) { 2513 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2514 return -EINVAL; 2515 } 2516 2517 *data += 4; 2518 /* Validate internal size equals the image-size */ 2519 len = *((u32 *)(*data)); 2520 if (len != image->size) { 2521 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2522 len, (u32)image->size); 2523 return -EINVAL; 2524 } 2525 2526 *data += 4; 2527 /* Make sure driver familiar with all commands necessary for this */ 2528 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2529 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2530 *((u16 *)(*data))); 2531 return -EINVAL; 2532 } 2533 2534 *data += 4; 2535 2536 return 0; 2537 } 2538 2539 /* Binary file format - 2540 * /----------------------------------------------------------------------\ 2541 * 0B | 0x5 [command index] | 2542 * 4B | Number of config attributes | Reserved | 2543 * 4B | Config ID | Entity ID | Length | 2544 * 4B | Value | 2545 * | | 2546 * \----------------------------------------------------------------------/ 2547 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2548 * 'Number of config attributes'. 2549 * 2550 * The API parses config attributes from the user provided buffer and flashes 2551 * them to the respective NVM path using Management FW inerface. 2552 */ 2553 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2554 { 2555 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2556 u8 entity_id, len, buf[32]; 2557 bool need_nvm_init = true; 2558 struct qed_ptt *ptt; 2559 u16 cfg_id, count; 2560 int rc = 0, i; 2561 u32 flags; 2562 2563 ptt = qed_ptt_acquire(hwfn); 2564 if (!ptt) 2565 return -EAGAIN; 2566 2567 /* NVM CFG ID attribute header */ 2568 *data += 4; 2569 count = *((u16 *)*data); 2570 *data += 4; 2571 2572 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2573 "Read config ids: num_attrs = %0d\n", count); 2574 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2575 * arithmetic operations in the implementation. 2576 */ 2577 for (i = 1; i <= count; i++) { 2578 cfg_id = *((u16 *)*data); 2579 *data += 2; 2580 entity_id = **data; 2581 (*data)++; 2582 len = **data; 2583 (*data)++; 2584 memcpy(buf, *data, len); 2585 *data += len; 2586 2587 flags = 0; 2588 if (need_nvm_init) { 2589 flags |= QED_NVM_CFG_OPTION_INIT; 2590 need_nvm_init = false; 2591 } 2592 2593 /* Commit to flash and free the resources */ 2594 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2595 flags |= QED_NVM_CFG_OPTION_COMMIT | 2596 QED_NVM_CFG_OPTION_FREE; 2597 need_nvm_init = true; 2598 } 2599 2600 if (entity_id) 2601 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2602 2603 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2604 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2605 entity_id, len); 2606 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2607 buf, len); 2608 if (rc) { 2609 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2610 break; 2611 } 2612 } 2613 2614 qed_ptt_release(hwfn, ptt); 2615 2616 return rc; 2617 } 2618 2619 #define QED_MAX_NVM_BUF_LEN 32 2620 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2621 { 2622 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2623 u8 buf[QED_MAX_NVM_BUF_LEN]; 2624 struct qed_ptt *ptt; 2625 u32 len; 2626 int rc; 2627 2628 ptt = qed_ptt_acquire(hwfn); 2629 if (!ptt) 2630 return QED_MAX_NVM_BUF_LEN; 2631 2632 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2633 &len); 2634 if (rc || !len) { 2635 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2636 len = QED_MAX_NVM_BUF_LEN; 2637 } 2638 2639 qed_ptt_release(hwfn, ptt); 2640 2641 return len; 2642 } 2643 2644 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2645 u32 cmd, u32 entity_id) 2646 { 2647 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2648 struct qed_ptt *ptt; 2649 u32 flags, len; 2650 int rc = 0; 2651 2652 ptt = qed_ptt_acquire(hwfn); 2653 if (!ptt) 2654 return -EAGAIN; 2655 2656 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2657 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2658 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2659 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2660 if (rc) 2661 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2662 2663 qed_ptt_release(hwfn, ptt); 2664 2665 return rc; 2666 } 2667 2668 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2669 { 2670 const struct firmware *image; 2671 const u8 *data, *data_end; 2672 u32 cmd_type; 2673 int rc; 2674 2675 rc = request_firmware(&image, name, &cdev->pdev->dev); 2676 if (rc) { 2677 DP_ERR(cdev, "Failed to find '%s'\n", name); 2678 return rc; 2679 } 2680 2681 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2682 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2683 name, image->data, (u32)image->size); 2684 data = image->data; 2685 data_end = data + image->size; 2686 2687 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2688 if (rc) 2689 goto exit; 2690 2691 while (data < data_end) { 2692 bool check_resp = false; 2693 2694 /* Parse the actual command */ 2695 cmd_type = *((u32 *)data); 2696 switch (cmd_type) { 2697 case QED_NVM_FLASH_CMD_FILE_DATA: 2698 rc = qed_nvm_flash_image_file_data(cdev, &data, 2699 &check_resp); 2700 break; 2701 case QED_NVM_FLASH_CMD_FILE_START: 2702 rc = qed_nvm_flash_image_file_start(cdev, &data, 2703 &check_resp); 2704 break; 2705 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2706 rc = qed_nvm_flash_image_access(cdev, &data, 2707 &check_resp); 2708 break; 2709 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2710 rc = qed_nvm_flash_cfg_write(cdev, &data); 2711 break; 2712 default: 2713 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2714 rc = -EINVAL; 2715 goto exit; 2716 } 2717 2718 if (rc) { 2719 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2720 goto exit; 2721 } 2722 2723 /* Check response if needed */ 2724 if (check_resp) { 2725 u32 mcp_response = 0; 2726 2727 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2728 DP_ERR(cdev, "Failed getting MCP response\n"); 2729 rc = -EINVAL; 2730 goto exit; 2731 } 2732 2733 switch (mcp_response & FW_MSG_CODE_MASK) { 2734 case FW_MSG_CODE_OK: 2735 case FW_MSG_CODE_NVM_OK: 2736 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2737 case FW_MSG_CODE_PHY_OK: 2738 break; 2739 default: 2740 DP_ERR(cdev, "MFW returns error: %08x\n", 2741 mcp_response); 2742 rc = -EINVAL; 2743 goto exit; 2744 } 2745 } 2746 } 2747 2748 exit: 2749 release_firmware(image); 2750 2751 return rc; 2752 } 2753 2754 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2755 u8 *buf, u16 len) 2756 { 2757 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2758 2759 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2760 } 2761 2762 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2763 { 2764 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2765 void *cookie = p_hwfn->cdev->ops_cookie; 2766 2767 if (ops && ops->schedule_recovery_handler) 2768 ops->schedule_recovery_handler(cookie); 2769 } 2770 2771 static const char * const qed_hw_err_type_descr[] = { 2772 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2773 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2774 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2775 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2776 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2777 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2778 [QED_HW_ERR_LAST] = "Unknown", 2779 }; 2780 2781 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2782 enum qed_hw_err_type err_type) 2783 { 2784 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2785 void *cookie = p_hwfn->cdev->ops_cookie; 2786 const char *err_str; 2787 2788 if (err_type > QED_HW_ERR_LAST) 2789 err_type = QED_HW_ERR_LAST; 2790 err_str = qed_hw_err_type_descr[err_type]; 2791 2792 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2793 2794 /* Call the HW error handler of the protocol driver. 2795 * If it is not available - perform a minimal handling of preventing 2796 * HW attentions from being reasserted. 2797 */ 2798 if (ops && ops->schedule_hw_err_handler) 2799 ops->schedule_hw_err_handler(cookie, err_type); 2800 else 2801 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2802 } 2803 2804 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2805 void *handle) 2806 { 2807 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2808 } 2809 2810 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2811 { 2812 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2813 struct qed_ptt *ptt; 2814 int status = 0; 2815 2816 ptt = qed_ptt_acquire(hwfn); 2817 if (!ptt) 2818 return -EAGAIN; 2819 2820 status = qed_mcp_set_led(hwfn, ptt, mode); 2821 2822 qed_ptt_release(hwfn, ptt); 2823 2824 return status; 2825 } 2826 2827 int qed_recovery_process(struct qed_dev *cdev) 2828 { 2829 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2830 struct qed_ptt *p_ptt; 2831 int rc = 0; 2832 2833 p_ptt = qed_ptt_acquire(p_hwfn); 2834 if (!p_ptt) 2835 return -EAGAIN; 2836 2837 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2838 2839 qed_ptt_release(p_hwfn, p_ptt); 2840 2841 return rc; 2842 } 2843 2844 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2845 { 2846 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2847 struct qed_ptt *ptt; 2848 int rc = 0; 2849 2850 if (IS_VF(cdev)) 2851 return 0; 2852 2853 ptt = qed_ptt_acquire(hwfn); 2854 if (!ptt) 2855 return -EAGAIN; 2856 2857 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2858 : QED_OV_WOL_DISABLED); 2859 if (rc) 2860 goto out; 2861 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2862 2863 out: 2864 qed_ptt_release(hwfn, ptt); 2865 return rc; 2866 } 2867 2868 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2869 { 2870 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2871 struct qed_ptt *ptt; 2872 int status = 0; 2873 2874 if (IS_VF(cdev)) 2875 return 0; 2876 2877 ptt = qed_ptt_acquire(hwfn); 2878 if (!ptt) 2879 return -EAGAIN; 2880 2881 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2882 QED_OV_DRIVER_STATE_ACTIVE : 2883 QED_OV_DRIVER_STATE_DISABLED); 2884 2885 qed_ptt_release(hwfn, ptt); 2886 2887 return status; 2888 } 2889 2890 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2891 { 2892 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2893 struct qed_ptt *ptt; 2894 int status = 0; 2895 2896 if (IS_VF(cdev)) 2897 return 0; 2898 2899 ptt = qed_ptt_acquire(hwfn); 2900 if (!ptt) 2901 return -EAGAIN; 2902 2903 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2904 if (status) 2905 goto out; 2906 2907 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2908 2909 out: 2910 qed_ptt_release(hwfn, ptt); 2911 return status; 2912 } 2913 2914 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2915 { 2916 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2917 struct qed_ptt *ptt; 2918 int status = 0; 2919 2920 if (IS_VF(cdev)) 2921 return 0; 2922 2923 ptt = qed_ptt_acquire(hwfn); 2924 if (!ptt) 2925 return -EAGAIN; 2926 2927 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2928 if (status) 2929 goto out; 2930 2931 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2932 2933 out: 2934 qed_ptt_release(hwfn, ptt); 2935 return status; 2936 } 2937 2938 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2939 u8 dev_addr, u32 offset, u32 len) 2940 { 2941 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2942 struct qed_ptt *ptt; 2943 int rc = 0; 2944 2945 if (IS_VF(cdev)) 2946 return 0; 2947 2948 ptt = qed_ptt_acquire(hwfn); 2949 if (!ptt) 2950 return -EAGAIN; 2951 2952 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2953 offset, len, buf); 2954 2955 qed_ptt_release(hwfn, ptt); 2956 2957 return rc; 2958 } 2959 2960 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2961 { 2962 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2963 struct qed_ptt *ptt; 2964 int rc = 0; 2965 2966 if (IS_VF(cdev)) 2967 return 0; 2968 2969 ptt = qed_ptt_acquire(hwfn); 2970 if (!ptt) 2971 return -EAGAIN; 2972 2973 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2974 2975 qed_ptt_release(hwfn, ptt); 2976 2977 return rc; 2978 } 2979 2980 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2981 { 2982 return QED_AFFIN_HWFN_IDX(cdev); 2983 } 2984 2985 static struct qed_selftest_ops qed_selftest_ops_pass = { 2986 .selftest_memory = &qed_selftest_memory, 2987 .selftest_interrupt = &qed_selftest_interrupt, 2988 .selftest_register = &qed_selftest_register, 2989 .selftest_clock = &qed_selftest_clock, 2990 .selftest_nvram = &qed_selftest_nvram, 2991 }; 2992 2993 const struct qed_common_ops qed_common_ops_pass = { 2994 .selftest = &qed_selftest_ops_pass, 2995 .probe = &qed_probe, 2996 .remove = &qed_remove, 2997 .set_power_state = &qed_set_power_state, 2998 .set_name = &qed_set_name, 2999 .update_pf_params = &qed_update_pf_params, 3000 .slowpath_start = &qed_slowpath_start, 3001 .slowpath_stop = &qed_slowpath_stop, 3002 .set_fp_int = &qed_set_int_fp, 3003 .get_fp_int = &qed_get_int_fp, 3004 .sb_init = &qed_sb_init, 3005 .sb_release = &qed_sb_release, 3006 .simd_handler_config = &qed_simd_handler_config, 3007 .simd_handler_clean = &qed_simd_handler_clean, 3008 .dbg_grc = &qed_dbg_grc, 3009 .dbg_grc_size = &qed_dbg_grc_size, 3010 .can_link_change = &qed_can_link_change, 3011 .set_link = &qed_set_link, 3012 .get_link = &qed_get_current_link, 3013 .drain = &qed_drain, 3014 .update_msglvl = &qed_init_dp, 3015 .devlink_register = qed_devlink_register, 3016 .devlink_unregister = qed_devlink_unregister, 3017 .report_fatal_error = qed_report_fatal_error, 3018 .dbg_all_data = &qed_dbg_all_data, 3019 .dbg_all_data_size = &qed_dbg_all_data_size, 3020 .chain_alloc = &qed_chain_alloc, 3021 .chain_free = &qed_chain_free, 3022 .nvm_flash = &qed_nvm_flash, 3023 .nvm_get_image = &qed_nvm_get_image, 3024 .set_coalesce = &qed_set_coalesce, 3025 .set_led = &qed_set_led, 3026 .recovery_process = &qed_recovery_process, 3027 .recovery_prolog = &qed_recovery_prolog, 3028 .attn_clr_enable = &qed_int_attn_clr_enable, 3029 .update_drv_state = &qed_update_drv_state, 3030 .update_mac = &qed_update_mac, 3031 .update_mtu = &qed_update_mtu, 3032 .update_wol = &qed_update_wol, 3033 .db_recovery_add = &qed_db_recovery_add, 3034 .db_recovery_del = &qed_db_recovery_del, 3035 .read_module_eeprom = &qed_read_module_eeprom, 3036 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3037 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3038 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3039 .set_grc_config = &qed_set_grc_config, 3040 }; 3041 3042 void qed_get_protocol_stats(struct qed_dev *cdev, 3043 enum qed_mcp_protocol_type type, 3044 union qed_mcp_protocol_stats *stats) 3045 { 3046 struct qed_eth_stats eth_stats; 3047 3048 memset(stats, 0, sizeof(*stats)); 3049 3050 switch (type) { 3051 case QED_MCP_LAN_STATS: 3052 qed_get_vport_stats(cdev, ð_stats); 3053 stats->lan_stats.ucast_rx_pkts = 3054 eth_stats.common.rx_ucast_pkts; 3055 stats->lan_stats.ucast_tx_pkts = 3056 eth_stats.common.tx_ucast_pkts; 3057 stats->lan_stats.fcs_err = -1; 3058 break; 3059 case QED_MCP_FCOE_STATS: 3060 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 3061 break; 3062 case QED_MCP_ISCSI_STATS: 3063 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 3064 break; 3065 default: 3066 DP_VERBOSE(cdev, QED_MSG_SP, 3067 "Invalid protocol type = %d\n", type); 3068 return; 3069 } 3070 } 3071 3072 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3073 { 3074 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3075 "Scheduling slowpath task [Flag: %d]\n", 3076 QED_SLOWPATH_MFW_TLV_REQ); 3077 /* Memory barrier for setting atomic bit */ 3078 smp_mb__before_atomic(); 3079 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3080 /* Memory barrier after setting atomic bit */ 3081 smp_mb__after_atomic(); 3082 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3083 3084 return 0; 3085 } 3086 3087 static void 3088 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3089 { 3090 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3091 struct qed_eth_stats_common *p_common; 3092 struct qed_generic_tlvs gen_tlvs; 3093 struct qed_eth_stats stats; 3094 int i; 3095 3096 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3097 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3098 3099 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3100 tlv->flags.ipv4_csum_offload = true; 3101 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3102 tlv->flags.lso_supported = true; 3103 tlv->flags.b_set = true; 3104 3105 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3106 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3107 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3108 tlv->mac_set[i] = true; 3109 } 3110 } 3111 3112 qed_get_vport_stats(cdev, &stats); 3113 p_common = &stats.common; 3114 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3115 p_common->rx_bcast_pkts; 3116 tlv->rx_frames_set = true; 3117 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3118 p_common->rx_bcast_bytes; 3119 tlv->rx_bytes_set = true; 3120 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3121 p_common->tx_bcast_pkts; 3122 tlv->tx_frames_set = true; 3123 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3124 p_common->tx_bcast_bytes; 3125 tlv->rx_bytes_set = true; 3126 } 3127 3128 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3129 union qed_mfw_tlv_data *tlv_buf) 3130 { 3131 struct qed_dev *cdev = hwfn->cdev; 3132 struct qed_common_cb_ops *ops; 3133 3134 ops = cdev->protocol_ops.common; 3135 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3136 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3137 return -EINVAL; 3138 } 3139 3140 switch (type) { 3141 case QED_MFW_TLV_GENERIC: 3142 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3143 break; 3144 case QED_MFW_TLV_ETH: 3145 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3146 break; 3147 case QED_MFW_TLV_FCOE: 3148 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3149 break; 3150 case QED_MFW_TLV_ISCSI: 3151 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3152 break; 3153 default: 3154 break; 3155 } 3156 3157 return 0; 3158 } 3159 3160 unsigned long qed_get_epoch_time(void) 3161 { 3162 return ktime_get_real_seconds(); 3163 } 3164